diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 0c2679a05..b2abd11aa 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -3,7 +3,7 @@ name: Bug report about: Create a report to help us improve title: '' labels: new -assignees: jkryl +assignees: GlennBullingham --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index bbcbbe7d6..8db53dff1 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -3,7 +3,7 @@ name: Feature request about: Suggest an idea for this project title: '' labels: '' -assignees: '' +assignees: GlennBullingham --- diff --git a/.github/auto_assign.yml b/.github/auto_assign.yml index b3639ddcd..1dbdd37c5 100644 --- a/.github/auto_assign.yml +++ b/.github/auto_assign.yml @@ -7,13 +7,13 @@ addAssignees: false # A list of reviewers to be added to pull requests (GitHub user name) reviewers: - blaisedias + - chriswldenyer - cjones1024 - - jonathan-teh - - jkryl - gila + - jonathan-teh - paulyoong - - chriswldenyer - tiagolobocastro + - mtzaurus # A list of keywords to be skipped the process that add reviewers if pull requests include it skipKeywords: diff --git a/.github/workflows/pr-commitlint.yml b/.github/workflows/pr-commitlint.yml index b691ce993..1a324894c 100644 --- a/.github/workflows/pr-commitlint.yml +++ b/.github/workflows/pr-commitlint.yml @@ -17,8 +17,3 @@ jobs: first_commit=$(curl ${{ github.event.pull_request.commits_url }} 2>/dev/null | jq '.[0].sha' | sed 's/"//g') last_commit=HEAD^2 # don't lint the merge commit npx commitlint --from $first_commit~1 --to $last_commit -V - - name: Lint Pull Request - env: - TITLE: ${{ github.event.pull_request.title }} - BODY: ${{ github.event.pull_request.body }} - run: export NL=; printenv TITLE NL BODY | npx commitlint -V diff --git a/.gitignore b/.gitignore index 95ca3f72a..366db3baa 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,9 @@ artifacts/ **/__pycache__ /chart/charts/ ansible-hosts +/test/python/csi_pb2.py +/test/python/csi_pb2_grpc.py +/test/python/mayastor_pb2.py +/test/python/mayastor_pb2_grpc.py +/test/python/venv/* +/package.json diff --git a/.gitmodules b/.gitmodules index e69de29bb..6b0370c90 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "rpc/mayastor-api"] + path = rpc/mayastor-api + url = https://github.com/openebs/mayastor-api diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f9183550f..d12261056 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,3 +37,10 @@ repos: entry: bash -c "npm install @commitlint/config-conventional @commitlint/cli; cat $1 | npx commitlint" args: [$1] stages: [commit-msg] + - id: python-check + name: python lint + entry: black + description: runs black against the python code + pass_filenames: true + types: [file, python] + language: system diff --git a/Cargo.lock b/Cargo.lock index 20c33e666..9d5e65d13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.15.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ "gimli", ] @@ -46,9 +46,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.41" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" +checksum = "61604a8f862e1d5c3229fdd78f8b02c68dcf73a4c4b05fd636d12240aaa242c1" [[package]] name = "assert_matches" @@ -67,100 +67,6 @@ dependencies = [ "futures-core", ] -[[package]] -name = "async-executor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", - "slab", -] - -[[package]] -name = "async-fs" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b3ca4f8ff117c37c278a2f7415ce9be55560b846b5bc4412aaa5d29c1c3dae2" -dependencies = [ - "async-lock", - "blocking", - "futures-lite", -] - -[[package]] -name = "async-io" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bbfd5cf2794b1e908ea8457e6c45f8f8f1f6ec5f74617bf4662623f47503c3b" -dependencies = [ - "concurrent-queue", - "fastrand", - "futures-lite", - "libc", - "log", - "once_cell", - "parking", - "polling", - "slab", - "socket2", - "waker-fn", - "winapi", -] - -[[package]] -name = "async-lock" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-net" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b0a74e7f70af3c8cf1aa539edbd044795706659ac52b78a71dc1a205ecefdf" -dependencies = [ - "async-io", - "blocking", - "fastrand", - "futures-lite", -] - -[[package]] -name = "async-process" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f38756dd9ac84671c428afbf7c9f7495feff9ec5b0710f17100098e5b354ac" -dependencies = [ - "async-io", - "blocking", - "cfg-if 1.0.0", - "event-listener", - "futures-lite", - "libc", - "once_cell", - "signal-hook 0.3.9", - "winapi", -] - -[[package]] -name = "async-rustls" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f38092e8f467f47aadaff680903c7cbfeee7926b058d7f40af2dd4c878fbdee" -dependencies = [ - "futures-lite", - "rustls", - "webpki", -] - [[package]] name = "async-stream" version = "0.3.2" @@ -177,9 +83,9 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -190,13 +96,13 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" +checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -224,25 +130,19 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.60" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282" +checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -255,7 +155,7 @@ version = "1.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44265cf903f576fcaa1c2f23b32ec2dadaa8ec9d6b7c6212704d72a417bfbeef" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] @@ -274,16 +174,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f8523b410d7187a43085e7e064416ea32ded16bd0a4e6fc025e21616d01258f" dependencies = [ "bitflags", - "cexpr", + "cexpr 0.4.0", "clang-sys", "clap", - "env_logger", + "env_logger 0.8.4", "lazy_static", "lazycell", "log", "peeking_take_while", - "proc-macro2", - "quote", + "proc-macro2 1.0.32", + "quote 1.0.10", + "regex", + "rustc-hash", + "shlex", + "which 3.1.1", +] + +[[package]] +name = "bindgen" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" +dependencies = [ + "bitflags", + "cexpr 0.5.0", + "clang-sys", + "clap", + "env_logger 0.8.4", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2 1.0.32", + "quote 1.0.10", "regex", "rustc-hash", "shlex", @@ -297,21 +220,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] -name = "blkid" -version = "0.2.1" -source = "git+https://github.com/openebs/blkid?branch=blkid-sys#3dc458b001fffeaa4525296b838d1aafd51a7c33" -dependencies = [ - "blkid-sys", - "err-derive", - "libc", -] - -[[package]] -name = "blkid-sys" -version = "0.1.4" -source = "git+https://github.com/openebs/blkid?branch=blkid-sys#3dc458b001fffeaa4525296b838d1aafd51a7c33" +name = "bitvec" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" dependencies = [ - "bindgen", + "funty", + "radium", + "tap", + "wyz", ] [[package]] @@ -339,22 +256,21 @@ dependencies = [ [[package]] name = "bollard" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "699194c00f3a2effd3358d47f880646818e3d483190b17ebcdf598c654fb77e9" +checksum = "a4a3f238d4b66f33d9162893ade03cd8a485320f591b244ea5b7f236d3494e98" dependencies = [ - "base64 0.13.0", + "base64", "bollard-stubs", "bytes", "chrono", - "ct-logs", "dirs-next", "futures-core", "futures-util", "hex", "http", "hyper", - "hyper-unix-connector", + "hyperlocal", "log", "pin-project", "serde", @@ -387,15 +303,18 @@ checksum = "b4ae4235e6dac0694637c763029ecea1a2ec9e4e06ec2729bd21ba4d9c863eb7" [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "byte-unit" -version = "3.1.4" +version = "4.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415301c9de11005d4b92193c0eb7ac7adc37e5a49e0ac9bed0a42343512744b8" +checksum = "956ffc5b0ec7d7a6949e3f21fd63ba5af4cffdc2ba1e0b7bf62b481458c4ae7f" +dependencies = [ + "utf8-width", +] [[package]] name = "byteorder" @@ -405,9 +324,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cache-padded" @@ -417,9 +336,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cc" -version = "1.0.68" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" [[package]] name = "cexpr" @@ -427,14 +346,17 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ - "nom", + "nom 5.1.2", ] [[package]] -name = "cfg-if" -version = "0.1.10" +name = "cexpr" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" +dependencies = [ + "nom 6.1.2", +] [[package]] name = "cfg-if" @@ -458,9 +380,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" +checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" dependencies = [ "glob", "libc", @@ -521,9 +443,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.7.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" dependencies = [ "core-foundation-sys", "libc", @@ -531,15 +453,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.7.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.1.4" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" dependencies = [ "libc", ] @@ -555,11 +477,11 @@ dependencies = [ [[package]] name = "crossbeam" -version = "0.7.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" +checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -569,49 +491,46 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" dependencies = [ + "cfg-if", "crossbeam-utils", - "maybe-uninit", ] [[package]] name = "crossbeam-deque" -version = "0.7.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ + "cfg-if", "crossbeam-epoch", "crossbeam-utils", - "maybe-uninit", ] [[package]] name = "crossbeam-epoch" -version = "0.8.2" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "autocfg", - "cfg-if 0.1.10", + "cfg-if", "crossbeam-utils", "lazy_static", - "maybe-uninit", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.2.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "crossbeam-utils", - "maybe-uninit", ] [[package]] @@ -622,12 +541,11 @@ checksum = "3f901f6110b99fdcf0825d0f35e5e26b7ae222ed0d6f2a07c595043e7d5b9429" [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg", - "cfg-if 0.1.10", + "cfg-if", "lazy_static", ] @@ -637,10 +555,10 @@ version = "0.2.0" dependencies = [ "async-stream", "async-trait", - "blkid", "chrono", "clap", - "env_logger", + "devinfo", + "env_logger 0.9.0", "failure", "futures", "glob", @@ -666,23 +584,14 @@ dependencies = [ "udev", "url", "uuid", - "which 3.1.1", -] - -[[package]] -name = "ct-logs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" -dependencies = [ - "sct", + "which 4.2.2", ] [[package]] name = "curve25519-dalek" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest", @@ -719,10 +628,10 @@ checksum = "8e91455b86830a1c21799d94524df0845183fa55bafd9aa137b01c7d1065fa36" dependencies = [ "fnv", "ident_case", - "proc-macro2", - "quote", + "proc-macro2 1.0.32", + "quote 1.0.10", "strsim 0.10.0", - "syn", + "syn 1.0.81", ] [[package]] @@ -733,10 +642,10 @@ checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" dependencies = [ "fnv", "ident_case", - "proc-macro2", - "quote", + "proc-macro2 1.0.32", + "quote 1.0.10", "strsim 0.10.0", - "syn", + "syn 1.0.81", ] [[package]] @@ -746,8 +655,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29b5acf0dea37a7f66f7b25d2c5e93fd46f8f6968b1a5d7a3e02e97768afc95a" dependencies = [ "darling_core 0.12.4", - "quote", - "syn", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -757,8 +666,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" dependencies = [ "darling_core 0.13.0", - "quote", - "syn", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -783,9 +692,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66e616858f6187ed828df7c64a6d71720d83767a7f19740b2d1b6fe6327b36e5" dependencies = [ "darling 0.12.4", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -795,13 +704,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58a94ace95092c5acb1e97a7e846b310cfbd499652f72297da7493f618a98d73" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.81", ] [[package]] name = "devinfo" version = "0.1.0" dependencies = [ + "bindgen 0.59.1", "snafu", "udev", "url", @@ -823,7 +733,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dirs-sys-next", ] @@ -840,11 +750,11 @@ dependencies = [ [[package]] name = "dns-lookup" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8138b2add98729577ebb861755b154635cbb4dc30cb777cd9944bfdc70a3a075" +checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "socket2", "winapi", @@ -878,9 +788,9 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -891,9 +801,9 @@ checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" [[package]] name = "ed25519" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d0860415b12243916284c67a9be413e044ee6668247b99ba26d94b2bc06c8f6" +checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" dependencies = [ "signature", ] @@ -906,8 +816,6 @@ checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek", "ed25519", - "rand 0.7.3", - "serde", "sha2", "zeroize", ] @@ -920,13 +828,13 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "enum-primitive-derive" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f52288f9a7ebb08959188872b58e7eaa12af9cb47da8e94158e16da7e143340" +checksum = "c375b9c5eadb68d0a6efee2999fef292f45854c3444c86f09d8ab086ba942b0e" dependencies = [ "num-traits", - "quote", - "syn", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -942,6 +850,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "err-derive" version = "0.2.4" @@ -949,18 +870,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22deed3a8124cff5fa835713fa105621e43bbdc46690c3a6b68328a012d350d4" dependencies = [ "proc-macro-error", - "proc-macro2", - "quote", + "proc-macro2 1.0.32", + "quote 1.0.10", "rustversion", - "syn", + "syn 1.0.81", "synstructure", ] [[package]] name = "errno" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ "errno-dragonfly", "libc", @@ -969,19 +890,19 @@ dependencies = [ [[package]] name = "errno-dragonfly" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ - "gcc", + "cc", "libc", ] [[package]] name = "etcd-client" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06da8620f9398a2f5d24a38d77baee793a270d6bbd1c41418e3e59775b6700bd" +checksum = "76b9f5b0b4f53cf836bef05b22cd5239479700bc8d44a04c3c77f1ba6c2c73e9" dependencies = [ "http", "prost", @@ -989,6 +910,7 @@ dependencies = [ "tokio-stream", "tonic", "tonic-build", + "tower-service", ] [[package]] @@ -1013,17 +935,17 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", "synstructure", ] [[package]] name = "fastrand" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b705829d1e87f762c2df6da140b26af5839e1033aa84aa5f56bb688e4e1bdb" +checksum = "b394ed3d285a429378d3b384b9eb1285267e7df4b166df24b7a6939a04dc392e" dependencies = [ "instant", ] @@ -1056,15 +978,41 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a50045aa8931ae01afbc5d72439e8f57f326becb8c70d07dfc816778eff3d167" dependencies = [ - "rand 0.8.3", + "rand 0.8.4", "users", ] +[[package]] +name = "function_name" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b2afa9b514dc3a75af6cf24d1914e1c7eb6f1b86de849147563548d5c0a0cd" +dependencies = [ + "function_name-proc-macro", +] + +[[package]] +name = "function_name-proc-macro" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6790a8d356d2f65d7972181e866b92a50a87c27d6a48cbe9dbb8be13ca784c7d" +dependencies = [ + "proc-macro-crate", + "quote 0.6.13", + "syn 0.15.44", +] + +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "futures" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" +checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" dependencies = [ "futures-channel", "futures-core", @@ -1077,9 +1025,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" +checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" dependencies = [ "futures-core", "futures-sink", @@ -1087,15 +1035,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" +checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" [[package]] name = "futures-executor" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" +checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" dependencies = [ "futures-core", "futures-task", @@ -1104,9 +1052,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" +checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" [[package]] name = "futures-lite" @@ -1125,34 +1073,34 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" +checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" dependencies = [ "autocfg", "proc-macro-hack", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "futures-sink" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" +checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" [[package]] name = "futures-task" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" +checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" [[package]] name = "futures-util" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" +checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" dependencies = [ "autocfg", "futures-channel", @@ -1169,12 +1117,6 @@ dependencies = [ "slab", ] -[[package]] -name = "gcc" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" - [[package]] name = "generic-array" version = "0.14.4" @@ -1191,7 +1133,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -1202,22 +1144,22 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] name = "gimli" -version = "0.24.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] name = "git-version" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94918e83f1e01dedc2e361d00ce9487b14c58c7f40bab148026fa39d42cb41e2" +checksum = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899" dependencies = [ "git-version-macro", "proc-macro-hack", @@ -1225,14 +1167,14 @@ dependencies = [ [[package]] name = "git-version-macro" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34a97a52fdee1870a34fa6e4b77570cba531b27d1838874fef4429a791a3d657" +checksum = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f" dependencies = [ "proc-macro-hack", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -1244,7 +1186,7 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" version = "0.3.3" -source = "git+https://github.com/openebs/h2?branch=v0.3.3#64033595cbf9211e670481c519a9c0ac7691891e" +source = "git+https://github.com/openebs/h2?rev=0.3.3#64033595cbf9211e670481c519a9c0ac7691891e" dependencies = [ "bytes", "fnv", @@ -1261,9 +1203,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "heck" @@ -1276,9 +1218,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -1291,9 +1233,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" dependencies = [ "bytes", "fnv", @@ -1302,9 +1244,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", @@ -1313,9 +1255,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" @@ -1331,9 +1273,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.9" +version = "0.14.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" +checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" dependencies = [ "bytes", "futures-channel", @@ -1354,12 +1296,24 @@ dependencies = [ ] [[package]] -name = "hyper-unix-connector" -version = "0.2.2" +name = "hyper-timeout" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ef1fd95d34b4ff007d3f0590727b5cf33572cace09b42032fc817dc8b16557" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "anyhow", + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyperlocal" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fafdf7b2b2de7c9784f76e02c0935e65a8117ec3b768644379983ab333ac98c" +dependencies = [ + "futures-util", "hex", "hyper", "pin-project", @@ -1385,9 +1339,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg", "hashbrown", @@ -1395,18 +1349,18 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "io-uring" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7589adca0ddd74f56ed83a5098b45e3abf264dc27e150a8bec3397fcc34338" +checksum = "8d75829ed9377bab6c90039fe47b9d84caceb4b5063266142e21bcce6550cda8" dependencies = [ "bitflags", "libc", @@ -1420,33 +1374,33 @@ checksum = "1745979ddec01f66bfed491fee60958959015239cb9c8878960a18cb73906be7" [[package]] name = "ipnetwork" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" +checksum = "4088d739b183546b239688ddbc79891831df421773df95e236daf7867866d355" dependencies = [ "serde", ] [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "js-sys" -version = "0.3.51" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -1484,17 +1438,17 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.97" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" +checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" [[package]] name = "libloading" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" +checksum = "c0cf036d15402bea3c5d4de17b3fce76b3e4a56ebc1f577be0e7a72f7c607cf0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -1516,9 +1470,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ "scopeguard", ] @@ -1529,15 +1483,16 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "loopdev" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac9e35cfb6646d67059f2ca8913a90e6c60633053c103df423975297f33d6fcc" +checksum = "94c0ef06b33e606cc9d0cb38d8f4cda8c313a3d182c80a830d39aab1762d8b9a" dependencies = [ + "bindgen 0.58.1", "errno", "libc", ] @@ -1553,9 +1508,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "mayastor" @@ -1563,6 +1518,7 @@ version = "0.9.0" dependencies = [ "ansi_term 0.12.1", "assert_matches", + "async-channel", "async-task", "async-trait", "atty", @@ -1577,8 +1533,9 @@ dependencies = [ "crossbeam", "crossbeam-sync", "dns-lookup", - "env_logger", + "env_logger 0.9.0", "etcd-client", + "function_name", "futures", "git-version", "http", @@ -1591,7 +1548,6 @@ dependencies = [ "mbus_api", "md5", "merge", - "nats", "nix", "nvmeadm", "once_cell", @@ -1601,14 +1557,13 @@ dependencies = [ "prost", "prost-derive", "prost-types", - "rand 0.7.3", + "rand 0.8.4", "rpc", "run_script", "serde", "serde_json", "serde_yaml", - "signal-hook 0.1.17", - "smol", + "signal-hook", "snafu", "spdk-sys", "structopt", @@ -1626,12 +1581,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "mbus_api" version = "0.1.0" @@ -1646,7 +1595,6 @@ dependencies = [ "rpc", "serde", "serde_json", - "smol", "snafu", "strum", "strum_macros", @@ -1663,15 +1611,15 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ "autocfg", ] @@ -1693,9 +1641,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209d075476da2e63b4b29e72a2ef627b840589588e71400a25e3565c4f849d07" dependencies = [ "proc-macro-error", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -1710,9 +1658,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.11" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", "log", @@ -1738,47 +1686,51 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "nats" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b716f15b711daea70d5da9195f5c10063d2a14d74b8dba256f8eb6d45d8b29" +version = "0.15.2" +source = "git+https://github.com/openebs/nats.rs?rev=main_fixes#6fc96e7923b9489cccfa38ebe8d44c1ccf46014d" dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-lock", - "async-net", - "async-rustls", - "base64 0.13.0", + "base64", "base64-url", + "blocking", + "chrono", + "crossbeam-channel", "fastrand", - "futures-lite", "itoa", "json", + "libc", "log", + "memchr", "nkeys", "nuid", "once_cell", + "parking_lot", "regex", + "rustls", "rustls-native-certs", + "serde", + "serde_json", + "webpki", + "winapi", ] [[package]] name = "nix" -version = "0.20.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa9b4819da1bc61c0ea48b63b7bc8604064dd43013e7cc325df098d49cd7c18a" +checksum = "d3bb9a13fa32bc5aeb64150cd3f32d6cf4c748f8f8a417cce5d2eb976a8370ba" dependencies = [ "bitflags", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", + "memoffset", ] [[package]] name = "nkeys" -version = "0.0.11" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0aa1a33567887c95af653f9f88e482e34df8eaabb98df92cf5c81dfd882b0a" +checksum = "c1a98f0a974ff737974b57ba1c71d2e0fe7ec18e5a828d4b8e02683171349dfa" dependencies = [ "byteorder", "data-encoding", @@ -1798,6 +1750,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "nom" +version = "6.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" +dependencies = [ + "bitvec", + "funty", + "memchr", + "version_check", +] + [[package]] name = "ntapi" version = "0.3.6" @@ -1809,12 +1773,12 @@ dependencies = [ [[package]] name = "nuid" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8061bec52f76dc109f1a392ee03afcf2fae4c7950953de6388bc2f5a57b61979" +checksum = "7000c9392b545c4ba43e8abc086bf7d01cd2948690934c16980170b0549a2bd3" dependencies = [ "lazy_static", - "rand 0.7.3", + "rand 0.8.4", ] [[package]] @@ -1865,9 +1829,9 @@ dependencies = [ [[package]] name = "object" -version = "0.25.3" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "memchr", ] @@ -1898,9 +1862,9 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", @@ -1909,11 +1873,11 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", @@ -1924,7 +1888,8 @@ dependencies = [ [[package]] name = "partition-identity" version = "0.2.8" -source = "git+https://github.com/openebs/partition-identity.git#0111c030aa82d4dbb1f234a9939ba86535775d36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec13ba9a0eec5c10a89f6ec1b6e9e2ef7d29b810d771355abbd1c43cae003ed6" dependencies = [ "err-derive", ] @@ -1953,29 +1918,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" +checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" +checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -1985,28 +1950,24 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" [[package]] -name = "polling" -version = "2.0.3" +name = "ppv-lite86" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fc12d774e799ee9ebae13f4076ca003b40d18a11ac0f3641e6f899618580b7b" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "log", - "wepoll-sys", - "winapi", -] +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" [[package]] -name = "ppv-lite86" -version = "0.2.10" +name = "proc-macro-crate" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] [[package]] name = "proc-macro-error" @@ -2015,9 +1976,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", "version_check", ] @@ -2027,8 +1988,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2", - "quote", + "proc-macro2 1.0.32", + "quote 1.0.10", "version_check", ] @@ -2046,11 +2007,20 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" dependencies = [ - "unicode-xid", + "unicode-xid 0.1.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +dependencies = [ + "unicode-xid 0.2.2", ] [[package]] @@ -2065,9 +2035,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ "bytes", "prost-derive", @@ -2075,9 +2045,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ "bytes", "heck", @@ -2088,27 +2058,27 @@ dependencies = [ "prost", "prost-types", "tempfile", - "which 4.1.0", + "which 4.2.2", ] [[package]] name = "prost-derive" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", "itertools", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "prost-types" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ "bytes", "prost", @@ -2116,13 +2086,28 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" dependencies = [ - "proc-macro2", + "proc-macro2 0.4.30", ] +[[package]] +name = "quote" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +dependencies = [ + "proc-macro2 1.0.32", +] + +[[package]] +name = "radium" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" + [[package]] name = "rand" version = "0.7.3" @@ -2138,14 +2123,14 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -2165,7 +2150,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -2179,9 +2164,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom 0.2.3", ] @@ -2197,18 +2182,18 @@ dependencies = [ [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] name = "redox_syscall" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] @@ -2299,9 +2284,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc-hash" @@ -2311,11 +2296,11 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -2324,9 +2309,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", "rustls", @@ -2374,9 +2359,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "1.0.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ "bitflags", "core-foundation", @@ -2387,9 +2372,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "1.0.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" +checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" dependencies = [ "core-foundation-sys", "libc", @@ -2397,29 +2382,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" dependencies = [ "itoa", "ryu", @@ -2440,9 +2425,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "1.9.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e3132bd01cfb74aac8b1b10083ad1f38dbf756df3176d5e63dd91e3f62a87f5" +checksum = "ad6056b4cb69b6e43e3a0f055def223380baecc99da683884f205bf347f7c4b3" dependencies = [ "rustversion", "serde", @@ -2451,36 +2436,36 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "1.4.2" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1569374bd54623ec8bd592cf22ba6e03c0f177ff55fbc8c29a49e296e7adecf" +checksum = "12e47be9471c72889ebafb5e14d5ff930d89ae7a67bbdb5f8abb564f845a927e" dependencies = [ "darling 0.13.0", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "serde_yaml" -version = "0.8.17" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" +checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" dependencies = [ "dtoa", - "linked-hash-map", + "indexmap", "serde", "yaml-rust", ] [[package]] name = "sha2" -version = "0.9.5" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" +checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", "opaque-debug", @@ -2488,34 +2473,24 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.1" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "shlex" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d" - -[[package]] -name = "signal-hook" -version = "0.1.17" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" -dependencies = [ - "libc", - "signal-hook-registry", -] +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "470c5a6397076fae0094aaf06a08e6ba6f37acb77d3b1b91ea92b4d6c8650c39" +checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1" dependencies = [ "libc", "signal-hook-registry", @@ -2544,39 +2519,21 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" +checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" [[package]] name = "slab" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "smol" -version = "1.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cf3b5351f3e783c1d79ab5fc604eeed8b8ae9abd36b166e8b87a089efd85e4" -dependencies = [ - "async-channel", - "async-executor", - "async-fs", - "async-io", - "async-lock", - "async-net", - "async-process", - "blocking", - "futures-lite", - "once_cell", -] +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "snafu" @@ -2594,16 +2551,16 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "socket2" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ "libc", "winapi", @@ -2613,7 +2570,7 @@ dependencies = [ name = "spdk-sys" version = "0.1.0" dependencies = [ - "bindgen", + "bindgen 0.59.1", "cc", ] @@ -2637,9 +2594,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "structopt" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" +checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" dependencies = [ "clap", "lazy_static", @@ -2648,40 +2605,40 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.14" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck", "proc-macro-error", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "strum" -version = "0.19.5" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" +checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" [[package]] name = "strum_macros" -version = "0.19.4" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" +checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" dependencies = [ "heck", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "subtle-encoding" @@ -2694,32 +2651,43 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.73" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] + +[[package]] +name = "syn" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", + "proc-macro2 1.0.32", + "quote 1.0.10", + "unicode-xid 0.2.2", ] [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", + "unicode-xid 0.2.2", ] [[package]] name = "sys-mount" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f5703caf67c45ad3450104001b4620a605e9def0cef13dde3c9add23f73cee" +checksum = "777948089ea2ab5673e2062ff9818dd8ea9db04941f0ea9ab408b855858cc715" dependencies = [ "bitflags", "libc", @@ -2730,15 +2698,21 @@ dependencies = [ name = "sysfs" version = "0.1.0" +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.3", + "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -2764,22 +2738,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.25" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.25" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] @@ -2793,19 +2767,20 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "tinyvec" -version = "1.2.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" dependencies = [ "tinyvec_macros", ] @@ -2818,9 +2793,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.6.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a38d31d7831c6ed7aad00aa4c12d9375fd225a6dd77da1d25b707346319a975" +checksum = "588b2d10a336da58d877567cd8fb8a14b463e2104910f8132cd054b4b96e29ee" dependencies = [ "autocfg", "bytes", @@ -2836,22 +2811,32 @@ dependencies = [ "winapi", ] +[[package]] +name = "tokio-io-timeout" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" -version = "1.2.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" +checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "tokio-stream" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite", @@ -2860,9 +2845,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", @@ -2872,15 +2857,24 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + [[package]] name = "tonic" -version = "0.4.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac42cd97ac6bd2339af5bcabf105540e21e45636ec6fa6aae5e85d44db31be0" +checksum = "796c5e1cd49905e65dd8e700d4cb1dffcbfdb4fc9d017de08c1a537afd83627c" dependencies = [ "async-stream", "async-trait", - "base64 0.13.0", + "base64", "bytes", "futures-core", "futures-util", @@ -2888,6 +2882,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-timeout", "percent-encoding", "pin-project", "prost", @@ -2896,6 +2891,7 @@ dependencies = [ "tokio-stream", "tokio-util", "tower", + "tower-layer", "tower-service", "tracing", "tracing-futures", @@ -2903,27 +2899,28 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c695de27302f4697191dda1c7178131a8cb805463dda02864acb80fe1322fdcf" +checksum = "12b52d07035516c2b74337d2ac7746075e7dcae7643816c1b12c5ff8a7484c08" dependencies = [ - "proc-macro2", + "proc-macro2 1.0.32", "prost-build", - "quote", - "syn", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "tower" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60422bc7fefa2f3ec70359b8ff1caff59d785877eb70595904605bcc412470f" +checksum = "c00e500fff5fa1131c866b246041a6bf96da9c965f8fe4128cb1421f23e93c00" dependencies = [ "futures-core", "futures-util", "indexmap", "pin-project", - "rand 0.8.3", + "pin-project-lite", + "rand 0.8.4", "slab", "tokio", "tokio-stream", @@ -2947,11 +2944,11 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -2960,20 +2957,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", ] [[package]] name = "tracing-core" -version = "0.1.18" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] @@ -3011,9 +3008,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.18" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -3039,9 +3036,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" +checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" [[package]] name = "udev" @@ -3056,12 +3053,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = [ - "matches", -] +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -3074,15 +3068,21 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" @@ -3118,6 +3118,12 @@ dependencies = [ "log", ] +[[package]] +name = "utf8-width" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cf7d77f457ef8dfa11e4cd5933c5ddb5dc52a94664071951219a97710f0a32b" + [[package]] name = "uuid" version = "0.8.2" @@ -3163,69 +3169,69 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ - "quote", + "quote 1.0.10", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3241,32 +3247,23 @@ dependencies = [ "untrusted", ] -[[package]] -name = "wepoll-sys" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" dependencies = [ - "failure", "libc", ] [[package]] name = "which" -version = "4.1.0" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55551e42cbdf2ce2bedd2203d0cc08dba002c27510f86dab6d0ce304cba3dfe" +checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" dependencies = [ "either", + "lazy_static", "libc", ] @@ -3301,6 +3298,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "yaml-rust" version = "0.4.5" @@ -3312,21 +3315,21 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.3.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7" dependencies = [ - "proc-macro2", - "quote", - "syn", + "proc-macro2 1.0.32", + "quote 1.0.10", + "syn 1.0.81", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 92051901e..d5c30a444 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [patch.crates-io] -partition-identity = { git = "https://github.com/openebs/partition-identity.git" } -h2 = { git = "https://github.com/openebs/h2", branch = "v0.3.3"} +h2 = { git = "https://github.com/openebs/h2", rev = "0.3.3"} +# Nats with the tcp stream timeout fix: CAS-1192 +nats = { git = "https://github.com/openebs/nats.rs", rev="main_fixes" } [profile.dev] panic = "abort" diff --git a/Dockerfile b/Dockerfile index b9dfdcaf8..9687e8c2a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,10 +12,9 @@ RUN nix-env -i bash git nano sudo procps # Copy all nix files from the repo so that we can use them to install # mayastor dependencies -COPY shell.nix $NIX_EXPR_DIR/ +COPY ci.nix $NIX_EXPR_DIR/ COPY nix $NIX_EXPR_DIR/nix -COPY csi/moac/*.nix $NIX_EXPR_DIR/csi/moac/ RUN cd $NIX_EXPR_DIR && \ - nix-shell --argstr channel nightly --command "echo Debug dependencies done" && \ - nix-shell --argstr channel stable --command "echo Release dependencies done" + nix-shell --argstr channel nightly --command "echo Debug dependencies done" ci.nix && \ + nix-shell --argstr channel stable --command "echo Release dependencies done" ci.nix diff --git a/Jenkinsfile b/Jenkinsfile index bff038d59..7c8aa5b7a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -98,13 +98,13 @@ def lokiInstall(tag, loki_run_id) { sh 'kubectl apply -f ./mayastor-e2e/loki/promtail_rbac_e2e.yaml' sh 'kubectl apply -f ./mayastor-e2e/loki/promtail_configmap_e2e.yaml' def cmd = "run=\"${loki_run_id}\" version=\"${tag}\" envsubst -no-unset < ./mayastor-e2e/loki/promtail_daemonset_e2e.template.yaml | kubectl apply -f -" - sh "nix-shell --run '${cmd}'" + sh "nix-shell --run '${cmd}' ci.nix" } // Unnstall Loki def lokiUninstall(tag, loki_run_id) { def cmd = "run=\"${loki_run_id}\" version=\"${tag}\" envsubst -no-unset < ./mayastor-e2e/loki/promtail_daemonset_e2e.template.yaml | kubectl delete -f -" - sh "nix-shell --run '${cmd}'" + sh "nix-shell --run '${cmd}' ci.nix" sh 'kubectl delete -f ./mayastor-e2e/loki/promtail_configmap_e2e.yaml' sh 'kubectl delete -f ./mayastor-e2e/loki/promtail_rbac_e2e.yaml' sh 'kubectl delete -f ./mayastor-e2e/loki/promtail_namespace_e2e.yaml' @@ -139,7 +139,7 @@ if (params.e2e_continuous == true) { run_linter = false rust_test = false grpc_test = false - moac_test = false + pytest_test = false e2e_test_profile = "continuous" // use images from dockerhub tagged with e2e_continuous_image_tag instead of building from current source e2e_build_images = false @@ -151,7 +151,7 @@ if (params.e2e_continuous == true) { run_linter = true rust_test = true grpc_test = true - moac_test = true + pytest_test = true // Some long e2e tests are not suitable to be run for each PR e2e_test_profile = (env.BRANCH_NAME != 'staging' && env.BRANCH_NAME != 'trying') ? "nightly" : "ondemand" e2e_build_images = true @@ -214,9 +214,9 @@ pipeline { steps { cleanWs() unstash 'source' - sh 'nix-shell --run "cargo fmt --all -- --check"' - sh 'nix-shell --run "cargo clippy --all-targets -- -D warnings"' - sh 'nix-shell --run "./scripts/js-check.sh"' + sh 'nix-shell --run "cargo fmt --all -- --check" ci.nix' + sh 'nix-shell --run "cargo clippy --all-targets -- -D warnings" ci.nix' + sh 'nix-shell --run "./scripts/js-check.sh" ci.nix' } } stage('test') { @@ -243,7 +243,7 @@ pipeline { cleanWs() unstash 'source' sh 'printenv' - sh 'nix-shell --run "./scripts/cargo-test.sh"' + sh 'nix-shell --run "./scripts/cargo-test.sh" ci.nix' } post { always { @@ -266,7 +266,7 @@ pipeline { cleanWs() unstash 'source' sh 'printenv' - sh 'nix-shell --run "./scripts/grpc-test.sh"' + sh 'nix-shell --run "./scripts/grpc-test.sh" ci.nix' } post { always { @@ -275,21 +275,40 @@ pipeline { } } } - stage('moac unit tests') { + stage('pytest tests') { when { beforeAgent true - expression { moac_test == true } + expression { pytest_test == true } } - agent { label 'nixos-mayastor' } - steps { - cleanWs() - unstash 'source' - sh 'printenv' - sh 'nix-shell --run "./scripts/moac-test.sh"' - } - post { - always { - junit 'moac-xunit-report.xml' + agent { label 'virtual-nixos-mayastor' } + stages { + stage('checkout') { + steps { + cleanWs() + checkout([ + $class: 'GitSCM', + branches: scm.branches, + extensions: scm.extensions.findAll{!(it instanceof jenkins.plugins.git.GitSCMSourceDefaults)} + [[$class: 'CloneOption', noTags: false, reference: '', shallow: false]], + userRemoteConfigs: scm.userRemoteConfigs + ]) + } + } + stage('build') { + steps { + sh 'printenv' + sh 'nix-shell --run "cargo build --bins" ci.nix' + } + } + stage('python setup') { + steps { + sh 'nix-shell --run "./test/python/setup.sh" ci.nix' + } + } + stage('run tests') { + steps { + sh 'printenv' + sh 'nix-shell --run "./scripts/pytest-tests.sh" ci.nix' + } } } } @@ -384,7 +403,7 @@ pipeline { usernamePassword(credentialsId: 'GRAFANA_API', usernameVariable: 'grafana_api_user', passwordVariable: 'grafana_api_pw') ]) { lokiInstall(tag, loki_run_id) - sh "nix-shell --run 'cd mayastor-e2e && ${cmd}'" + sh "nix-shell --run 'cd mayastor-e2e && ${cmd}' ci.nix" lokiUninstall(tag, loki_run_id) // so that, if we keep the cluster, the next Loki instance can use different parameters } } @@ -395,7 +414,7 @@ pipeline { withCredentials([string(credentialsId: 'HCLOUD_TOKEN', variable: 'HCLOUD_TOKEN')]) { e2e_nodes=sh( script: """ - nix-shell -p hcloud --run 'hcloud server list' | grep -e '-${k8s_job.getNumber()} ' | awk '{ print \$2" "\$4 }' + nix-shell -p hcloud --run 'hcloud server list' ci.nix | grep -e '-${k8s_job.getNumber()} ' | awk '{ print \$2" "\$4 }' """, returnStdout: true ).trim() diff --git a/README.md b/README.md index 79fa8582d..734e9a107 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,24 @@ -# MayaStor +# Mayastor [![Releases](https://img.shields.io/github/release/openebs/Mayastor/all.svg?style=flat-square)](https://github.com/openebs/Mayastor/releases) -[![CI-basic](https://mayastor-ci.mayadata.io/buildStatus/icon?job=Mayastor%2Fmaster)](https://mayastor-ci.mayadata.io/blue/organizations/jenkins/Mayastor/activity/) +[![CI-basic](https://mayastor-ci.mayadata.io/buildStatus/icon?job=Mayastor%2Fdevelop)](https://mayastor-ci.mayadata.io/blue/organizations/jenkins/Mayastor/activity/) [![Slack](https://img.shields.io/badge/JOIN-SLACK-blue)](https://kubernetes.slack.com/messages/openebs) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor?ref=badge_shield) [![built with nix](https://builtwithnix.org/badge.svg)](https://builtwithnix.org) -OpenEBS Logo +Table of contents: +================== +- [Quickly deploy it on K8s and get started](https://mayastor.gitbook.io) + - [Deploying on microk8s](/doc/microk8s.md) +- [High-level overview](#overview) + - [The Nexus CAS module](#Nexus) + - [Local storage](#local-storage) + - [Exporting a Nexus](#exporting-the-nexus) +- [Building from source](/doc/build.md) +- [Examples of the Nexus module](/doc/mcli.md) +- [Frequently asked questions](/doc/FAQ.md)

-MayaStor is a cloud-native declarative data plane written in Rust. +Mayastor is a cloud-native declarative data plane written in Rust. Our goal is to abstract storage resources and their differences through the data plane such that users only need to supply the what and do not have to worry about the how so that individual teams stay in control. @@ -31,38 +40,21 @@ Some targeted use cases are: The official user documentation for the Mayastor Project is published here in GitBook format: [mayastor.gitbook.io](https://mayastor.gitbook.io/) -# Project Status - -Mayastor is currently beta software. From Wikipedia: "it (beta software) will generally have many more bugs in it than completed software and speed or performance issues, and may still cause crashes or data loss." - -The project's maintainers operate a live issue tracking dashboard for defects which they have under active triage and investigation. It can be accessed [here](https://mayadata.atlassian.net/secure/Dashboard.jspa?selectPageId=10015). You are strongly encouraged to familisarise yourself with the issues identified there before deploying Mayastor and/or raising issue reports. - -Table of contents: -================== -- [Quickly deploy it on K8s and get started](/deploy/README.md) - - [Deploying on microk8s](/doc/microk8s.md) -- [High-level overview](#overview) - - [The Nexus CAS module](#Nexus) - - [Local storage](#local-storage) - - [Exporting a Nexus](#exporting-the-nexus) -- [Building from source](/doc/build.md) -- [Examples of the Nexus module](/doc/mcli.md) -- [Frequently asked questions](/doc/FAQ.md) - ## Overview -At a high-level, MayaStor consists of two major components. +At a high-level, Mayastor consists of two major components. ### **Control plane:** - * A single instance K8s controller which implements the [CSI](https://github.com/container-storage-interface/spec/blob/master/spec.md) - controller spec but also private interfaces that otherwise would be implemented by your storage system. This is called Mother Of All Containers native storage or *MOAC* for short; it runs as a k8s deployment. + * A microservices patterned control plane, centered around a core agent which publically exposes a RESTful API. This is extended by a dedicated operator responsible + for managing the life cycle of "Mayastor Pools" (an abstraction for devices supplying the cluster with persistent backing storage) and a CSI compliant external provisioner (controller). + Source code for the control plane components is located in its [own repository](https://github.com/openebs/mayastor-control-plane) * A _per_ node instance *mayastor-csi* plugin which implements the identity and node grpc services from CSI protocol. ### **Data plane:** -* Each node you wish to use for storage or storage services will have to run a MayaStor daemon set. MayaStor itself has three major components: the Nexus, a local storage component, and the mayastor-csi plugin. +* Each node you wish to use for storage or storage services will have to run a Mayastor daemon set. Mayastor itself has three major components: the Nexus, a local storage component, and the mayastor-csi plugin. ## Nexus @@ -96,8 +88,8 @@ use mayastor::bdev::nexus::nexus_bdev::nexus_create; let children = vec![ "aio:////disk1.img?blk_size=512".to_string(), // it is assumed these hosts are reachable over the network - "iscsi://foobar/iqn.2019-05.io.openebs:disk0".into(), - "nvmf://fooo/nqn.2019-05.io-openebs:disk0".into() + "nvmf://fooo/nqn.2019-05.io-openebs:disk0".into(), + "nvmf://barr/nqn.2019-05.io-openebs:disk0".into() ]; // if no UUID given, one will be generated for you @@ -133,9 +125,8 @@ buf.as_slice().into_iter().map(|b| assert_eq!(b, 0xff)).for_each(drop); We think this can help a lot of database projects as well, where they typically have all the smarts in their database engine and they want the most simple (but fast) storage device. For a more elaborate example see some of the tests in mayastor/tests. -To communicate with the children, the Nexus uses industry standard protocols. Currently, the Nexus has support for -direct access to local storage and remote storage using NVMF or iSCSI. The other advantage is that if you were to remove -the Nexus out of the data path, you would still be able to access your data as if Mayastor was not there. +To communicate with the children, the Nexus uses industry standard protocols. The Nexus supports direct access to local storage and remote storage using NVMe-oF TCP. Another advantage of the implementation is that if you were to remove +the Nexus from the data path, you would still be able to access your data as if Mayastor was not there. The Nexus itself does not store any data and in its most simplistic form the Nexus is a proxy towards real storage devices where the transport may vary. It can however, as mentioned, "transform" the data, which makes it possible to @@ -165,13 +156,8 @@ additional functionality that otherwise would require you setup kernel specific ## Exporting the Nexus

-Our current main focus of development is on NVMe and vhost-user. Vhost-user allows developers to expose virtio devices -implemented as a user space process that the hyper-visor can use to submit IO to. This means that our Nexus can be exposed as a -vhost-user device such that a micro-vm (which typically does not have a feature rich kernel with drivers) can submit IO -to the Nexus. - -In turn, the Nexus can then use nvmf to replicate (if needed) the data to multiple devices and or nodes. Our -vhost-user code can be seen in the link section (still in C). +The primary focus of development is using NVMe as a transport protocol. The Nexus uses +NVMe-oF to replicate a volume's data to multiple devices on multiple nodes (if required).

@@ -223,9 +209,6 @@ other open source projects and are distributed under their respective licenses. ```http://www.apache.org/licenses/LICENSE-2.0``` - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor?ref=badge_large) - ### Contributions Unless you explicitly state otherwise, any contribution intentionally submitted for diff --git a/chart/crds/mayastorpoolcrd.yaml b/chart/crds/mayastorpoolcrd.yaml deleted file mode 120000 index 6f163fc5e..000000000 --- a/chart/crds/mayastorpoolcrd.yaml +++ /dev/null @@ -1 +0,0 @@ -../../csi/moac/crds/mayastorpool.yaml \ No newline at end of file diff --git a/chart/develop/values.yaml b/chart/develop/values.yaml index 6bc92b219..ec574566a 100644 --- a/chart/develop/values.yaml +++ b/chart/develop/values.yaml @@ -12,7 +12,5 @@ etcd: rbac: enabled: false replicaCount: 1 - persistence: - enabled: false mayastorLogLevel: debug diff --git a/chart/release/values.yaml b/chart/release/values.yaml index 9066c8ad5..783ca8813 100644 --- a/chart/release/values.yaml +++ b/chart/release/values.yaml @@ -13,7 +13,5 @@ etcd: rbac: enabled: false replicaCount: 3 - persistence: - enabled: false mayastorLogLevel: info diff --git a/chart/templates/csi-daemonset.yaml b/chart/templates/csi-daemonset.yaml index 941c571cd..5b833ed13 100644 --- a/chart/templates/csi-daemonset.yaml +++ b/chart/templates/csi-daemonset.yaml @@ -28,7 +28,7 @@ spec: # the same. containers: - name: mayastor-csi - image: {{ include "mayastorImagesPrefix" . }}mayadata/mayastor-csi:{{ .Values.mayastorImagesTag }} + image: {{ include "mayastorImagesPrefix" . }}mayadata/mayastor:{{ .Values.mayastorImagesTag }} imagePullPolicy: {{ .Values.mayastorImagePullPolicy }} # we need privileged because we mount filesystems and use mknod securityContext: @@ -43,13 +43,17 @@ spec: fieldRef: fieldPath: status.podIP - name: RUST_BACKTRACE - value: "1" + value: "1"{{ if .Values.moac }} + - name: MOAC + value: "true"{{ end }} args: - "--csi-socket=/csi/csi.sock" - "--node-name=$(MY_NODE_NAME)" - "--grpc-endpoint=$(MY_POD_IP):10199"{{ if .Values.csi.nvme.io_timeout_enabled }} - "--nvme-core-io-timeout={{ .Values.csi.nvme.io_timeout }}"{{ end }} - "-v" + command: + - mayastor-csi volumeMounts: - name: device mountPath: /dev diff --git a/chart/templates/etcd/storage/localpv.yaml b/chart/templates/etcd/storage/localpv.yaml new file mode 100644 index 000000000..b19517703 --- /dev/null +++ b/chart/templates/etcd/storage/localpv.yaml @@ -0,0 +1,23 @@ +--- +{{ if and .Values.etcd.persistence.enabled (eq .Values.etcd.persistence.storageClass "manual") }} +{{- range $index, $end := until (.Values.etcd.replicaCount | int) }} +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: mayastor + name: etcd-volume-{{ $index }} + labels: + statefulset.kubernetes.io/pod-name: mayastor-etcd-{{ $index }} +spec: + storageClassName: manual + # You must also delete the hostpath on the node + persistentVolumeReclaimPolicy: Retain + capacity: + storage: {{ $.Values.etcd.persistence.size | quote }} + accessModes: + - ReadWriteOnce + hostPath: + path: "/var/local/mayastor/etcd/pod-{{ $index }}" +--- +{{- end }} +{{- end }} diff --git a/chart/templates/mayastor-daemonset.yaml b/chart/templates/mayastor-daemonset.yaml index 360e7d325..3aa5a558c 100644 --- a/chart/templates/mayastor-daemonset.yaml +++ b/chart/templates/mayastor-daemonset.yaml @@ -35,7 +35,7 @@ spec: imagePullPolicy: {{ .Values.mayastorImagePullPolicy }} env: - name: RUST_LOG - value: mayastor={{ .Values.mayastorLogLevel }} + value: info,mayastor={{ .Values.mayastorLogLevel }} - name: MY_NODE_NAME valueFrom: fieldRef: @@ -44,8 +44,6 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - - name: IMPORT_NEXUSES - value: "false" args: # The -l argument accepts cpu-list. Indexing starts at zero. # For example -l 1,2,10-20 means use core 1, 2, 10 to 20. @@ -58,6 +56,8 @@ spec: - "-P/var/local/mayastor/pools.yaml" - "-l{{ include "mayastorCpuSpec" . }}" - "-pmayastor-etcd" + command: + - mayastor securityContext: privileged: true volumeMounts: diff --git a/chart/templates/moac-deployment.yaml b/chart/templates/moac-deployment.yaml deleted file mode 100644 index 54a708ea6..000000000 --- a/chart/templates/moac-deployment.yaml +++ /dev/null @@ -1,77 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: moac - namespace: {{ .Release.Namespace }} -spec: - replicas: 1 - selector: - matchLabels: - app: moac - template: - metadata: - labels: - app: moac - spec: - serviceAccount: moac - containers: - - name: csi-provisioner - image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.1 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology=false" - - "--default-fstype=ext4" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: csi-attacher - image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: moac - image: {{ include "mayastorImagesPrefix" . }}mayadata/moac:{{ .Values.mayastorImagesTag }} - imagePullPolicy: {{ .Values.mayastorImagePullPolicy }} - args: - - "--csi-address=$(CSI_ENDPOINT)" - - "--port=3000" - - "--watcher-idle-timeout=600000" - - "--etcd-endpoint=mayastor-etcd" - - "--message-bus=nats"{{ if .Values.moacDebug }} - - "-vv"{{ end }} - env: - - name: CSI_ENDPOINT - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - livenessProbe: - httpGet: - path: / - port: 3000 - initialDelaySeconds: 20 - timeoutSeconds: 10 - readinessProbe: - httpGet: - path: / - port: 3000 - initialDelaySeconds: 20 - timeoutSeconds: 10 - volumes: - - name: socket-dir - emptyDir: diff --git a/chart/templates/moac-rbac.yaml b/chart/templates/moac-rbac.yaml deleted file mode 100644 index ae271ca2d..000000000 --- a/chart/templates/moac-rbac.yaml +++ /dev/null @@ -1,93 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: moac - namespace: {{ .Release.Namespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: moac -rules: - # must create mayastor crd if it doesn't exist -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create"] - # must read csi plugin info -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - # must read/write mayastor node resources -- apiGroups: ["openebs.io"] - resources: ["mayastornodes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - # must update mayastor node status -- apiGroups: ["openebs.io"] - resources: ["mayastornodes/status"] - verbs: ["update"] - # must read mayastor pools info -- apiGroups: ["openebs.io"] - resources: ["mayastorpools"] - verbs: ["get", "list", "watch", "update", "replace"] - # must update mayastor pools status -- apiGroups: ["openebs.io"] - resources: ["mayastorpools/status"] - verbs: ["update"] - # must read/write mayastor volume resources -- apiGroups: ["openebs.io"] - resources: ["mayastorvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - # must update mayastor volumes status -- apiGroups: ["openebs.io"] - resources: ["mayastorvolumes/status"] - verbs: ["update"] - - # external provisioner & attacher -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - # external provisioner -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["get", "list"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - # external attacher -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: moac -subjects: -- kind: ServiceAccount - name: moac - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: moac - apiGroup: rbac.authorization.k8s.io diff --git a/chart/values.yaml b/chart/values.yaml index b7968688f..b68f0e152 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -10,6 +10,7 @@ mayastorPools: # moac that does not update status of msp resource in some cases. Feel free to # remove when no longer needed. moacDebug: false +moac: false csi: nvme: @@ -27,6 +28,9 @@ nats: trace: false etcd: + ## Number of replicas + ## + replicaCount: 1 ## Kubernetes Cluster Domain ## clusterDomain: cluster.local @@ -46,7 +50,25 @@ etcd: persistence: ## If true, use a Persistent Volume Claim. If false, use emptyDir. ## - enabled: false + enabled: true + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "manual" + ## Persistent Volume size + ## + size: 2Gi + + ## Init containers parameters: + ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. + ## + volumePermissions: + enabled: true + ## Set to true if you would like to see extra information on logs ## debug: false diff --git a/ci.nix b/ci.nix new file mode 100644 index 000000000..ab7931251 --- /dev/null +++ b/ci.nix @@ -0,0 +1,77 @@ +{ nospdk ? false, norust ? false }: +let + sources = import ./nix/sources.nix; + pkgs = import sources.nixpkgs { + overlays = + [ (_: _: { inherit sources; }) (import ./nix/mayastor-overlay.nix) ]; + }; +in +with pkgs; +let + nospdk_moth = + "You have requested environment without SPDK, you should provide it!"; + norust_moth = + "You have requested environment without RUST, you should provide it!"; + channel = import ./nix/lib/rust.nix { inherit sources; }; + # python environment for test/python + pytest_inputs = python3.withPackages + (ps: with ps; [ virtualenv grpcio grpcio-tools asyncssh black ]); +in +mkShell { + name = "mayastor-dev-shell"; + # fortify does not work with -O0 which is used by spdk when --enable-debug + hardeningDisable = [ "fortify" ]; + buildInputs = [ + clang_11 + cowsay + docker + docker-compose + e2fsprogs + etcd + fio + gdb + git + kubernetes-helm + libaio + libiscsi + libudev + liburing + llvmPackages_11.libclang + meson + nats-server + ninja + nodejs-16_x + nvme-cli + numactl + openssl + pkg-config + pre-commit + procps + pytest_inputs + python3 + utillinux + xfsprogs + ] ++ (if (nospdk) then [ libspdk-dev.buildInputs ] else [ libspdk-dev ]) + ++ pkgs.lib.optional (!norust) channel.nightly; + + LIBCLANG_PATH = mayastor.LIBCLANG_PATH; + PROTOC = mayastor.PROTOC; + PROTOC_INCLUDE = mayastor.PROTOC_INCLUDE; + SPDK_PATH = if nospdk then null else "${libspdk-dev}"; + + shellHook = '' + ${pkgs.lib.optionalString (nospdk) "cowsay ${nospdk_moth}"} + ${pkgs.lib.optionalString (nospdk) "export CFLAGS=-msse4"} + ${pkgs.lib.optionalString (nospdk) + ''export RUSTFLAGS="-C link-args=-Wl,-rpath,$(pwd)/spdk-sys/spdk"''} + ${pkgs.lib.optionalString (nospdk) "echo"} + ${pkgs.lib.optionalString (norust) "cowsay ${norust_moth}"} + ${pkgs.lib.optionalString (norust) "echo 'Hint: use rustup tool.'"} + ${pkgs.lib.optionalString (norust) "echo"} + + # SRCDIR is needed by docker-compose files as it requires absolute paths + export SRCDIR=`pwd` + pre-commit install + pre-commit install --hook commit-msg + ''; +} diff --git a/composer/Cargo.toml b/composer/Cargo.toml index 4bb300c06..dd353222f 100644 --- a/composer/Cargo.toml +++ b/composer/Cargo.toml @@ -7,12 +7,12 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tokio = { version = "1", features = ["full"] } -futures = "0.3.8" -tonic = "0.4" +tokio = { version = "1.10.0", features = ["full"] } +futures = "0.3.16" +tonic = "0.5.2" rpc = { path = "../rpc" } -ipnetwork = "0.17.0" -bollard = "0.10.0" -tracing = "0.1.22" -tracing-subscriber = "0.2.15" +ipnetwork = "0.18.0" +bollard = "0.11.0" +tracing = "0.1.26" +tracing-subscriber = "0.2.20" mbus_api = { path = "../mbus-api" } diff --git a/composer/src/lib.rs b/composer/src/lib.rs index a0699f304..c6f4012d9 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -30,6 +30,7 @@ use bollard::{ PortMap, }, Docker, + API_DEFAULT_VERSION, }; use futures::{StreamExt, TryStreamExt}; use ipnetwork::Ipv4Network; @@ -112,6 +113,7 @@ pub struct Binary { arguments: Vec, nats_arg: Option, env: HashMap, + binds: HashMap, } impl Binary { @@ -160,6 +162,11 @@ impl Binary { } self } + /// Add volume bind between host path and container path + pub fn with_bind(mut self, host: &str, container: &str) -> Self { + self.binds.insert(container.to_string(), host.to_string()); + self + } /// pick up the nats argument name for a particular binary from nats_arg /// and fill up the nats server endpoint using the network name fn setup_nats(&mut self, network: &str) { @@ -286,6 +293,11 @@ impl ContainerSpec { self.binds.iter().for_each(|(container, host)| { vec.push(format!("{}:{}", host, container)); }); + if let Some(binary) = &self.binary { + binary.binds.iter().for_each(|(container, host)| { + vec.push(format!("{}:{}", host, container)); + }); + } vec } @@ -558,7 +570,16 @@ impl Builder { let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); let srcdir = path.parent().unwrap().to_string_lossy().into(); - let docker = Docker::connect_with_unix_defaults()?; + // FIXME: We can just use `connect_with_unix_defaults` once this is + // resolved and released: + // + // https://github.com/fussybeaver/bollard/issues/166 + let docker = match std::env::var("DOCKER_HOST") { + Ok(host) => { + Docker::connect_with_unix(&host, 120, API_DEFAULT_VERSION) + } + Err(_) => Docker::connect_with_unix_defaults(), + }?; let mut cfg = HashMap::new(); cfg.insert( @@ -698,7 +719,7 @@ impl Drop for ComposeTest { .output() .unwrap(); std::process::Command::new("docker") - .args(&["rm", c]) + .args(&["rm", "-v", c]) .output() .unwrap(); }); diff --git a/csi/Cargo.toml b/csi/Cargo.toml index 9a12a7999..62224b5a8 100644 --- a/csi/Cargo.toml +++ b/csi/Cargo.toml @@ -9,41 +9,37 @@ name = "mayastor-csi" path = "src/server.rs" [build-dependencies] -tonic-build = "0.4" -prost-build = "0.7" - +tonic-build = "0.5.2" +prost-build = "0.8.0" [dependencies] -async-trait = "0.1.36" -async-stream = "0.3.0" -chrono = "0.4.9" -clap = "2.33.0" -env_logger = "0.8" -failure = "0.1" -futures = { version = "0.3", default-features = false } -glob = "*" +async-trait = "0.1.51" +async-stream = "0.3.2" +chrono = "0.4.19" +clap = "2.33.3" +env_logger = "0.9.0" +failure = "0.1.8" +futures = { version = "0.3.16", default-features = false } +glob = "0.3.0" lazy_static = "1.4.0" nvmeadm = { path = "../nvmeadm", version = "0.1.0" } -proc-mounts = "0.2" -prost = "0.7" -prost-derive = "0.7" -prost-types = "0.7" -regex = "1.3.6" -serde_json = "1.0.40" -snafu = "0.6" -sys-mount = "1.2" +proc-mounts = "0.2.4" +prost = "0.8.0" +prost-derive = "0.8.0" +prost-types = "0.8.0" +regex = "1.5.4" +serde_json = "1.0.66" +snafu = "0.6.10" +sys-mount = "1.3.0" sysfs = { path = "../sysfs", version = "0.1.0" } -tokio = { version = "1", features = ["full"] } -tokio-stream = { version = "0.1.3", features = ["net"] } -tonic = "0.4" -tower = "0.4.5" -tracing = "0.1" -tracing-futures = "0.2.4" -udev = "0.6" -url = "2.1.1" -uuid = { version = "0.8", features = ["v4"] } -which = "3.1.1" - -[dependencies.blkid] -branch = "blkid-sys" -git = "https://github.com/openebs/blkid" +tokio = { version = "1.10.0", features = ["full"] } +tokio-stream = { version = "0.1.7", features = ["net"] } +tonic = "0.5.2" +tower = "0.4.8" +tracing = "0.1.26" +tracing-futures = "0.2.5" +udev = "0.6.2" +url = "2.2.2" +uuid = { version = "0.8.2", features = ["v4"] } +which = "4.2.2" +devinfo = { path = "../devinfo"} diff --git a/csi/README.md b/csi/README.md index c9271ca91..5dd96a3ab 100644 --- a/csi/README.md +++ b/csi/README.md @@ -1,9 +1,8 @@ ## About -This crate contains CSI protocol implementation: the node part (mayastor-csi) -and the control plane part (moac) written in node-js. The rest of the doc -is about the mayastor-csi. It is an asynchronous server implementation making -use of tokio.rs. +This crate contains CSI protocol implementation, the part that implements +`identity` and `node` grpc CSI services. It is an asynchronous server +implementation making use of tokio.rs. The mayastor-csi plugin implements two gRPC services: diff --git a/csi/moac/.gitignore b/csi/moac/.gitignore deleted file mode 100644 index de4ab3fa1..000000000 --- a/csi/moac/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/node_modules/ -/proto/ -/result -/dist/ diff --git a/csi/moac/.mocharc.json b/csi/moac/.mocharc.json deleted file mode 100644 index 3139515f4..000000000 --- a/csi/moac/.mocharc.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "extension": ["ts"], - "require": "ts-node/register" -} diff --git a/csi/moac/README.md b/csi/moac/README.md deleted file mode 100644 index 55ef3637b..000000000 --- a/csi/moac/README.md +++ /dev/null @@ -1,329 +0,0 @@ -# MOAC - -MOAC is a control plane of MayaStor. It is a NodeJS application written in -javascript/typescript and makes use of kubernetes-client library to interact -with K8s API server. In a nutshell it has following responsibilities: - -- _node operator_: keeps track of nodes with running MayaStor instances. -- [pool operator](/doc/pool-operator.md): creates/updates/deletes storage pools on storage nodes as requested by admin by means of msp custom resources. -- [volume operator](/doc/volume-operator.md): informs user about existing volumes on storage nodes by means of msv custom resources and allows simple modifications to them. -- _CSI controller_: provisions volumes on storage nodes based on requests from k8s through CSI interface. - -## Requirements - -- required K8s version is 1.14 or newer -- NodeJS v12 (instructions below) -- Nix when building a docker image - -### NodeJS on Nix(OS) - -Enter a nix shell with NodeJS and python packages: - -```bash -nix-shell -p nodejs-16_x python -``` - -### NodeJS on Ubuntu - -NodeJS v12 may not be available in default package repository on Ubuntu -depending on Ubuntu release. If that's the case, new package source has to be -added for NodeJS: - -```bash -curl -sL https://deb.nodesource.com/setup_12.x -o nodesource_setup.sh -sudo bash nodesource_setup.sh -sudo apt install nodejs -``` - -## Build it - -Following command downloads and installs npm dependencies of moac and compiles -ts files using typescript compiler: - -```bash -npm install -npm run compile -``` - -## Run it - -### Inside k8s cluster - -It is the most straightforward way to run moac. However also the least -convenient for debugging issues. Use -[k8s yaml file](/deploy/moac-deployment.yaml) for deploying MOAC to K8s cluster -in usual way. This assumes that you are either fine with using the official -docker image of MOAC or that you run your own private registry and you modified -the deployment yaml file to use the private image instead. - -### Outside k8s cluster - -You can run MOAC without any K8s cluster with all components that are K8s -specific disabled: - -```bash -./index.js --skip-k8s -``` - -## Contributing - -1. Check your code style: `npm run check` -2. Fix style errors that can be fixed: `npm run fix` -3. Check that TS files compile: `npm run compile` -4. All unit tests must pass: `npm run test` -5. Clean generated JS files (optional): `npm run clean` - -## Updating the dependencies - -Updating npm dependencies in `package-lock.json` is not enough. In order to -update dependencies in built docker images as well, the nix files need to be -updated too; - -1. Update npm dependencies: - - ```bash - npm update - ``` - - NOTE: If you want to update all packages to the very latest major versions - that will likely include breaking API changes, then install - `npm-check-updates` npm package and run `npm-check-updates -u` before the - first step. - -2. If not already installed, install a `node2nix` tool which automates nix - package creation for npm packages. On NixOS that can be done by following - command: - - ```bash - nix-env -f '' -iA nodePackages.node2nix - ``` - -3. Generate nix package build files. The flag `development` is needed for - typescript to get included in the package, because it is in `devDependencies` - (common case in JS world). The flag does not influence how things are built - (debug info, code optimisations, etc.). - ```bash - rm -rf node_modules - node2nix -l package-lock.json --development --nodejs-16 -c node-composition.nix - ``` - -4. Patch generated nix files by following patch in order to reduce the size - of the package. This is a temporary workaround for - https://github.com/svanderburg/node2nix/issues/187. - ```diff - diff --git a/csi/moac/node-composition.nix b/csi/moac/node-composition.nix - index ac8de82..6441534 100644 - --- a/csi/moac/node-composition.nix - +++ b/csi/moac/node-composition.nix - @@ -2,11 +2,12 @@ - - {pkgs ? import { - inherit system; - - }, system ? builtins.currentSystem, nodejs ? pkgs."nodejs-16_x"}: - + }, system ? builtins.currentSystem, nodejs-slim ? pkgs.nodejs-slim-16_x, nodejs ? pkgs."nodejs-16_x"}: - let - nodeEnv = import ./node-env.nix { - inherit (pkgs) stdenv python2 utillinux runCommand writeTextFile; - inherit nodejs; - + inherit nodejs-slim; - libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null; - }; - in - diff --git a/csi/moac/node-env.nix b/csi/moac/node-env.nix - index e1abf53..4d35a5e 100644 - --- a/csi/moac/node-env.nix - +++ b/csi/moac/node-env.nix - @@ -1,6 +1,6 @@ - # This file originates from node2nix - - -{lib, stdenv, nodejs, python2, utillinux, libtool, runCommand, writeTextFile}: - +{lib, stdenv, nodejs-slim, nodejs, python2, utillinux, libtool, runCommand, writeTextFile}: - - let - python = if nodejs ? python then nodejs.python else python2; - @@ -395,7 +395,7 @@ let - in - stdenv.mkDerivation ({ - name = "node_${name}-${version}"; - - buildInputs = [ tarWrapper python nodejs ] - + buildInputs = [ tarWrapper python nodejs-slim nodejs ] - ++ lib.optional (stdenv.isLinux) utillinux - ++ lib.optional (stdenv.isDarwin) libtool - ++ buildInputs; - ``` - -## Building a Nix Package - -All Nix files generated by the `node2nix` tool are part of the repository so if -you just want to build the package without changing or updating dependencies -it is rather simple: - -1. Build the Nix MOAC package: - - ```bash - nix-build default.nix -A package - ``` - -2. Run MOAC from the package: - ```bash - ./result/... - ``` - -## Architecture - -Unfortunately ASCII art is not good with colours. Left side of the picture -is reserved for k8s cluster components. Up and right are MOAC components. -Lines denote relations between all components. - -```text - moac - +------------------------------------------------------+ - +------------+ | +-----------+ +---------+ | - | K8S CSI +------|--> CSI +---> | | - +------------+ | | controller| | | +--------+ | - | +-----------+ | volumes +--+ volume | | - +------------+ | +-----------+ | | +--------+ | - | | | | volume +---> | | - | +------|--> operator | +---+-----+ +--------+ | - | K8S | | +-----------+ | | REST | | - | api-server | | +-----------+ +---+-----+--+ API | | - | +------|--> pool | | | +--------+ | - | | | | operator +---+ | | - | | | +-----------+ |registry | +------+ +-------+ | - | | | +-----------+ | +--+ +---+ nexus | | - | +------|--> node +---+ | | node | +-------+ | - | | | | operator | | | | | +------+ | - +-----+------+ | +-----------+ +---+-----+ +--+---+---+ pool | | - | | | | +---+--+ | - | | +---+-----+ | | | - | | | message | | +---+-----+ | - | | +---+ bus | | | replica | | - | | | +---------+ | +---------+ | - | +--------------|-------------------|-------------------+ - | | | - | | | - | | | - App | Node Node | | Storage Node -+------+-----+---+ +-----+---+ +----------------------+ -| | | | NATS | | mayastor | -| kubelet | | | server +------------+ | -| | | +---------+ +-+--------------------+ -+------+-----+ | | -| | | | -+------+-----+ | | -| mayastor | | volume mount | -| CSI plugin +-+-|-------------------------------------+ -+------------+ | | -| | | -+------------+ | | -| App +-+ | -+------------+ | -| | -+----------------+ -``` - -## Volume states - -Volume life cycle can be described by a finite state automaton (FSA). It is -crucial for understanding what and when can happen with the volume. Imperfect -approximation of FSA diagram for the volume follows: - -```text - new volume - + - | - +----v-----+ - | | - | pending | - | | - +----+-----+ - | - | set up the volume - | - +----v-----+ - | | - | healthy | - | | - +----+-----+ - | - | - yes v no - +--+is volume published?+--+ - | | - | | +----------+ - v v no | | - yes is any replica any replica online? +-------> faulted | -reshare <---+ unreachable from + | | -replica nexus? | +----------+ - + | yes - | no | - yes v v +----------+ -recreate <---+ is nexus missing? insufficient # of sound yes | | create new -nexus + replicas? +-----> degraded +----> replica - | no | | - no v +-----^----+ -share <---+ is nexus exposed? | -nexus + | - | yes | - v | - insufficient # of yes | - sound replicas? +--------------------------------------------+ - + - | no - v +------------+ - volume under yes | | - rebuild? +-----> degraded | - + | | - | no +------------+ - v - +---+-----+ - | | - | healthy | - | | - +---+-----+ - | - v yes - any replica faulty? +--> remove it - + - | no - v - more online replicas yes - than needed? +---> remove the least - + preferred replica - | no - v - should move - volume to yes - different +---> create new replica - node(s)? -``` - -## Troubleshooting - -Running moac with trace log level enabled (`-vv`) prints all details about -incoming/outgoing CSI messages, watcher events, etc. - -## History - -The acronym MOAC comes from "Mother Of All Cases" (CAS means Container Attached -Storage). - -## VSCode - -VSCode is a perfect choice for developing JS/TS projects. Remote docker plugin -can be used to setup a dev environment for moac in a moment. Example of -`.devcontainer.json` file: - -```json -{ - "image": "node:16", - "workspaceMount": "source=/path/to/repo/on/the/host/Mayastor,target=/workspace,type=bind,consistency=cached", - "workspaceFolder": "/workspace", - "extensions": [ - "chenxsan.vscode-standardjs" - ] -} -``` - -Note that this env is suitable for writing the code and running the tests but -for building nix docker image you need nix package manager to be installed. diff --git a/csi/moac/bundle_protos.sh b/csi/moac/bundle_protos.sh deleted file mode 100755 index 5bfd33d47..000000000 --- a/csi/moac/bundle_protos.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# Copy rpc proto files from other locations in repository to a proto subdir -# of the moac. This is impotant when creating a nix and npm package of the -# moac. Before changing anything here make sure you understand the code in -# default.nix. - -[ -n "$csiProto" ] || csiProto=../proto/csi.proto -[ -n "$mayastorProto" ] || mayastorProto=../../rpc/proto/mayastor.proto - -echo "Adding RPC proto files to the bundle ..." -mkdir -p proto || exit 1 -cp -f "$csiProto" proto/csi.proto || exit 1 -cp -f "$mayastorProto" proto/mayastor.proto || exit 1 -echo "Done" diff --git a/csi/moac/crds/mayastornode.yaml b/csi/moac/crds/mayastornode.yaml deleted file mode 100644 index c3265638e..000000000 --- a/csi/moac/crds/mayastornode.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: mayastornodes.openebs.io -spec: - group: openebs.io - versions: - - name: v1alpha1 - served: true - storage: true - subresources: - # Both status and spec parts are updated by the controller. - status: {} - schema: - openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - description: Specification of the mayastor node. - type: object - required: - - grpcEndpoint - properties: - grpcEndpoint: - description: Address of gRPC server that mayastor listens on - type: string - status: - description: State of the node as seen by the control plane - type: string - additionalPrinterColumns: - - name: State - type: string - description: State of the storage pool - jsonPath: .status - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - scope: Namespaced - names: - kind: MayastorNode - listKind: MayastorNodeList - plural: mayastornodes - singular: mayastornode - shortNames: ['msn'] \ No newline at end of file diff --git a/csi/moac/crds/mayastorpool.yaml b/csi/moac/crds/mayastorpool.yaml deleted file mode 100644 index 79ccbbf29..000000000 --- a/csi/moac/crds/mayastorpool.yaml +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: mayastorpools.openebs.io -spec: - group: openebs.io - versions: - - name: v1alpha1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - description: Specification of the mayastor pool. - type: object - required: - - node - - disks - properties: - node: - description: Name of the k8s node where the storage pool is located. - type: string - disks: - description: Disk devices (paths or URIs) that should be used for the pool. - type: array - items: - type: string - status: - description: Status part updated by the pool controller. - type: object - properties: - spec: - type: object - properties: - node: - description: Name of the k8s node where the storage pool is located. (set on creation) - type: string - disks: - description: Disk devices (paths or URIs) that should be used for the pool. (set on creation) - type: array - items: - type: string - state: - description: Pool state. - type: string - reason: - description: Reason for the pool state value if applicable. - type: string - disks: - description: Disk device URIs that are actually used for the pool. - type: array - items: - type: string - capacity: - description: Capacity of the pool in bytes. - type: integer - format: int64 - minimum: 0 - used: - description: How many bytes are used in the pool. - type: integer - format: int64 - minimum: 0 - additionalPrinterColumns: - - name: Node - type: string - description: Node where the storage pool is located - jsonPath: .spec.node - - name: State - type: string - description: State of the storage pool - jsonPath: .status.state - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - scope: Namespaced - names: - kind: MayastorPool - listKind: MayastorPoolList - plural: mayastorpools - singular: mayastorpool - shortNames: ["msp"] diff --git a/csi/moac/crds/mayastorvolume.yaml b/csi/moac/crds/mayastorvolume.yaml deleted file mode 100644 index 243298e51..000000000 --- a/csi/moac/crds/mayastorvolume.yaml +++ /dev/null @@ -1,148 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: mayastorvolumes.openebs.io -spec: - group: openebs.io - versions: - - name: v1alpha1 - served: true - storage: true - subresources: - # The status part is updated by the controller and spec part by the user - # usually. Well, not in this case. The mayastor's control plane updates both - # parts and user is allowed to update some of the properties in the spec - # too. Though status part remains read-only for the user. - status: {} - schema: - openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - description: Specification of the mayastor volume. - type: object - required: - - replicaCount - - requiredBytes - properties: - replicaCount: - description: The number of replicas used for the volume. - type: integer - minimum: 1 - local: - description: The app should run on the same node as the nexus. - type: boolean - preferredNodes: - description: A list of preferred cluster nodes for the volume. - type: array - items: - type: string - requiredNodes: - description: Only cluster nodes from this list should be used for the volume. - type: array - items: - type: string - requiredBytes: - description: The minimum size of the volume. - type: integer - minimum: 1 - limitBytes: - description: The maximum size of the volume (if zero then same as the requiredBytes). - type: integer - minimum: 0 - protocol: - description: Share protocol of the nexus - type: string - status: - description: Properties related to current state of the volume. - type: object - properties: - size: - description: The size of the volume if it has been created - type: integer - format: int64 - state: - description: Overall state of the volume. - type: string - reason: - description: Further explanation of the state if applicable. - type: string - targetNodes: - description: k8s node(s) with storage targets for the volume. - type: array - items: - type: string - nexus: - description: Frontend of the volume. - type: object - properties: - node: - description: Name of the k8s node with the nexus. - type: string - deviceUri: - description: URI of a block device for IO. - type: string - state: - description: State of the nexus. - type: string - children: - description: Child devices of the nexus (replicas). - type: array - items: - description: child device of the nexus (replica). - type: object - properties: - uri: - description: URI used by nexus to access the child. - type: string - state: - description: State of the child as seen by the nexus. - type: string - replicas: - description: List of replicas - type: array - items: - type: object - properties: - node: - description: Name of the k8s node with the replica. - type: string - pool: - description: Name of the pool that replica was created on. - type: string - uri: - description: URI of the replica used by the nexus. - type: string - offline: - description: Is replica reachable by control plane. - type: boolean - additionalPrinterColumns: - - name: Targets - type: string - description: k8s node(s) with storage targets for the volume. - jsonPath: .status.targetNodes - - name: Size - type: integer - format: int64 - description: Size of the volume - jsonPath: .status.size - - name: State - type: string - description: State of the storage pool - jsonPath: .status.state - - name: Age - type: date - jsonPath: .metadata.creationTimestamp - scope: Namespaced - names: - kind: MayastorVolume - listKind: MayastorVolumeList - plural: mayastorvolumes - singular: mayastorvolume - shortNames: ['msv'] diff --git a/csi/moac/default.nix b/csi/moac/default.nix deleted file mode 100644 index 6c4731cc2..000000000 --- a/csi/moac/default.nix +++ /dev/null @@ -1,28 +0,0 @@ -# This file relies on files generated by node2nix tool: -# -# node2nix -l package-lock.json --nodejs-12 -c node-composition.nix -# -# It is used to bundle rpc proto files from mayastor repo to moac nix package. - -{ pkgs ? import { inherit system; } -, system ? builtins.currentSystem -}: -let - result = import ./node-composition.nix { inherit pkgs system; }; -in -result // rec { - package = result.package.override { - csiProto = ../proto/csi.proto; - mayastorProto = ../../rpc/proto/mayastor.proto; - # Prepare script is executed only if npm install is run without any - # arguments. node2path runs it with a number of args so we must run - # in manually in postInstall hook :-/ - postInstall = '' - npm run compile - npm run prepare - ''; - # add nodejs and busybox to this variable to set it later to ammend the - # path variable. This makes it easer to exec into the container - env = pkgs.lib.makeBinPath [ pkgs.busybox pkgs.nodejs-slim-16_x ]; - }; -} diff --git a/csi/moac/mbus.js b/csi/moac/mbus.js deleted file mode 100755 index 7b05446b9..000000000 --- a/csi/moac/mbus.js +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env node - -// Message bus client -// -// A simple program for publishing messages to the NATS server for purpose -// of debugging mayastor. - -'use strict'; - -const yargs = require('yargs'); -const nats = require('nats'); - -const opts = yargs - .options({ - s: { - alias: 'server', - describe: 'NATS server address in host[:port] form', - default: '127.0.0.1:4222', - string: true - } - }) - .command('register ', 'Send registration request', (yargs) => { - yargs.positional('node', { - describe: 'Node name', - type: 'string' - }); - yargs.positional('grpc', { - describe: 'gRPC endpoint', - type: 'string' - }); - }) - .command('deregister ', 'Send deregistration request', (yargs) => { - yargs.positional('node', { - describe: 'Node name', - type: 'string' - }); - }) - .command('raw ', 'Publish raw NATS message', (yargs) => { - yargs.positional('name', { - describe: 'Name of the message', - type: 'string' - }); - yargs.positional('payload', { - describe: 'Raw payload sent as a string', - type: 'string' - }); - }) - .help('help') - .strict().argv; - -const nc = nats.connect(opts.s); -nc.on('connect', () => { - if (opts._[0] === 'register') { - nc.publish('v0/registry', JSON.stringify({ - id: 'v0/register', - sender: 'moac', - data: { - id: opts.node, - grpcEndpoint: opts.grpc - } - })); - } else if (opts._[0] === 'deregister') { - nc.publish('v0/registry', JSON.stringify({ - id: 'v0/deregister', - sender: 'moac', - data: { - id: opts.node - } - })); - } else if (opts._[0] === 'raw') { - nc.publish(opts.name, opts.payload); - } - nc.flush(); - nc.close(); - process.exit(0); -}); -nc.on('error', (err) => { - console.error(err.toString()); - nc.close(); - process.exit(1); -}); diff --git a/csi/moac/moac b/csi/moac/moac deleted file mode 100755 index 8abac2f07..000000000 --- a/csi/moac/moac +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env node - -'use strict'; - -const fs = require('fs'); -const path = require('path'); - -const MAIN_FILE = './dist/index.js'; - -try { - fs.statSync(path.join(__dirname, MAIN_FILE)); -} catch (err) { - console.error(`Missing ${MAIN_FILE}. You need to compile the code: "npm run compile"`); - process.exit(1); -} - -const { main } = require(MAIN_FILE); -main(); diff --git a/csi/moac/node-composition.nix b/csi/moac/node-composition.nix deleted file mode 100644 index 664ddd089..000000000 --- a/csi/moac/node-composition.nix +++ /dev/null @@ -1,21 +0,0 @@ -# This file has been generated by node2nix 1.9.0. Do not edit! - -{ pkgs ? import { - inherit system; - } -, system ? builtins.currentSystem -, nodejs-slim ? pkgs."nodejs-slim-16_x" -, nodejs ? pkgs."nodejs-16_x" -}: - -let - nodeEnv = import ./node-env.nix { - inherit (pkgs) stdenv lib python2 runCommand writeTextFile; - inherit pkgs nodejs nodejs-slim; - libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null; - }; -in -import ./node-packages.nix { - inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit; - inherit nodeEnv; -} diff --git a/csi/moac/node-env.nix b/csi/moac/node-env.nix deleted file mode 100644 index 2a7427586..000000000 --- a/csi/moac/node-env.nix +++ /dev/null @@ -1,572 +0,0 @@ -# This file originates from node2nix - -{ lib, stdenv, nodejs-slim, nodejs, python2, pkgs, libtool, runCommand, writeTextFile }: - -let - # Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master - utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux; - - python = if nodejs ? python then nodejs.python else python2; - - # Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise - tarWrapper = runCommand "tarWrapper" { } '' - mkdir -p $out/bin - - cat > $out/bin/tar <> $out/nix-support/hydra-build-products - ''; - }; - - includeDependencies = { dependencies }: - lib.optionalString (dependencies != [ ]) - (lib.concatMapStrings - (dependency: - '' - # Bundle the dependencies of the package - mkdir -p node_modules - cd node_modules - - # Only include dependencies if they don't exist. They may also be bundled in the package. - if [ ! -e "${dependency.name}" ] - then - ${composePackage dependency} - fi - - cd .. - '' - ) - dependencies); - - # Recursively composes the dependencies of a package - composePackage = { name, packageName, src, dependencies ? [ ], ... }@args: - builtins.addErrorContext "while evaluating node package '${packageName}'" '' - DIR=$(pwd) - cd $TMPDIR - - unpackFile ${src} - - # Make the base dir in which the target dependency resides first - mkdir -p "$(dirname "$DIR/${packageName}")" - - if [ -f "${src}" ] - then - # Figure out what directory has been unpacked - packageDir="$(find . -maxdepth 1 -type d | tail -1)" - - # Restore write permissions to make building work - find "$packageDir" -type d -exec chmod u+x {} \; - chmod -R u+w "$packageDir" - - # Move the extracted tarball into the output folder - mv "$packageDir" "$DIR/${packageName}" - elif [ -d "${src}" ] - then - # Get a stripped name (without hash) of the source directory. - # On old nixpkgs it's already set internally. - if [ -z "$strippedName" ] - then - strippedName="$(stripHash ${src})" - fi - - # Restore write permissions to make building work - chmod -R u+w "$strippedName" - - # Move the extracted directory into the output folder - mv "$strippedName" "$DIR/${packageName}" - fi - - # Unset the stripped name to not confuse the next unpack step - unset strippedName - - # Include the dependencies of the package - cd "$DIR/${packageName}" - ${includeDependencies { inherit dependencies; }} - cd .. - ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} - ''; - - pinpointDependencies = { dependencies, production }: - let - pinpointDependenciesFromPackageJSON = writeTextFile { - name = "pinpointDependencies.js"; - text = '' - var fs = require('fs'); - var path = require('path'); - - function resolveDependencyVersion(location, name) { - if(location == process.env['NIX_STORE']) { - return null; - } else { - var dependencyPackageJSON = path.join(location, "node_modules", name, "package.json"); - - if(fs.existsSync(dependencyPackageJSON)) { - var dependencyPackageObj = JSON.parse(fs.readFileSync(dependencyPackageJSON)); - - if(dependencyPackageObj.name == name) { - return dependencyPackageObj.version; - } - } else { - return resolveDependencyVersion(path.resolve(location, ".."), name); - } - } - } - - function replaceDependencies(dependencies) { - if(typeof dependencies == "object" && dependencies !== null) { - for(var dependency in dependencies) { - var resolvedVersion = resolveDependencyVersion(process.cwd(), dependency); - - if(resolvedVersion === null) { - process.stderr.write("WARNING: cannot pinpoint dependency: "+dependency+", context: "+process.cwd()+"\n"); - } else { - dependencies[dependency] = resolvedVersion; - } - } - } - } - - /* Read the package.json configuration */ - var packageObj = JSON.parse(fs.readFileSync('./package.json')); - - /* Pinpoint all dependencies */ - replaceDependencies(packageObj.dependencies); - if(process.argv[2] == "development") { - replaceDependencies(packageObj.devDependencies); - } - replaceDependencies(packageObj.optionalDependencies); - - /* Write the fixed package.json file */ - fs.writeFileSync("package.json", JSON.stringify(packageObj, null, 2)); - ''; - }; - in - '' - node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"} - - ${lib.optionalString (dependencies != []) - '' - if [ -d node_modules ] - then - cd node_modules - ${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies} - cd .. - fi - ''} - ''; - - # Recursively traverses all dependencies of a package and pinpoints all - # dependencies in the package.json file to the versions that are actually - # being used. - - pinpointDependenciesOfPackage = { packageName, dependencies ? [ ], production ? true, ... }@args: - '' - if [ -d "${packageName}" ] - then - cd "${packageName}" - ${pinpointDependencies { inherit dependencies production; }} - cd .. - ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} - fi - ''; - - # Extract the Node.js source code which is used to compile packages with - # native bindings - nodeSources = runCommand "node-sources" { } '' - tar --no-same-owner --no-same-permissions -xf ${nodejs.src} - mv node-* $out - ''; - - # Script that adds _integrity fields to all package.json files to prevent NPM from consulting the cache (that is empty) - addIntegrityFieldsScript = writeTextFile { - name = "addintegrityfields.js"; - text = '' - var fs = require('fs'); - var path = require('path'); - - function augmentDependencies(baseDir, dependencies) { - for(var dependencyName in dependencies) { - var dependency = dependencies[dependencyName]; - - // Open package.json and augment metadata fields - var packageJSONDir = path.join(baseDir, "node_modules", dependencyName); - var packageJSONPath = path.join(packageJSONDir, "package.json"); - - if(fs.existsSync(packageJSONPath)) { // Only augment packages that exist. Sometimes we may have production installs in which development dependencies can be ignored - console.log("Adding metadata fields to: "+packageJSONPath); - var packageObj = JSON.parse(fs.readFileSync(packageJSONPath)); - - if(dependency.integrity) { - packageObj["_integrity"] = dependency.integrity; - } else { - packageObj["_integrity"] = "sha1-000000000000000000000000000="; // When no _integrity string has been provided (e.g. by Git dependencies), add a dummy one. It does not seem to harm and it bypasses downloads. - } - - if(dependency.resolved) { - packageObj["_resolved"] = dependency.resolved; // Adopt the resolved property if one has been provided - } else { - packageObj["_resolved"] = dependency.version; // Set the resolved version to the version identifier. This prevents NPM from cloning Git repositories. - } - - if(dependency.from !== undefined) { // Adopt from property if one has been provided - packageObj["_from"] = dependency.from; - } - - fs.writeFileSync(packageJSONPath, JSON.stringify(packageObj, null, 2)); - } - - // Augment transitive dependencies - if(dependency.dependencies !== undefined) { - augmentDependencies(packageJSONDir, dependency.dependencies); - } - } - } - - if(fs.existsSync("./package-lock.json")) { - var packageLock = JSON.parse(fs.readFileSync("./package-lock.json")); - - if(![1, 2].includes(packageLock.lockfileVersion)) { - process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n"); - process.exit(1); - } - - if(packageLock.dependencies !== undefined) { - augmentDependencies(".", packageLock.dependencies); - } - } - ''; - }; - - # Reconstructs a package-lock file from the node_modules/ folder structure and package.json files with dummy sha1 hashes - reconstructPackageLock = writeTextFile { - name = "addintegrityfields.js"; - text = '' - var fs = require('fs'); - var path = require('path'); - - var packageObj = JSON.parse(fs.readFileSync("package.json")); - - var lockObj = { - name: packageObj.name, - version: packageObj.version, - lockfileVersion: 1, - requires: true, - dependencies: {} - }; - - function augmentPackageJSON(filePath, dependencies) { - var packageJSON = path.join(filePath, "package.json"); - if(fs.existsSync(packageJSON)) { - var packageObj = JSON.parse(fs.readFileSync(packageJSON)); - dependencies[packageObj.name] = { - version: packageObj.version, - integrity: "sha1-000000000000000000000000000=", - dependencies: {} - }; - processDependencies(path.join(filePath, "node_modules"), dependencies[packageObj.name].dependencies); - } - } - - function processDependencies(dir, dependencies) { - if(fs.existsSync(dir)) { - var files = fs.readdirSync(dir); - - files.forEach(function(entry) { - var filePath = path.join(dir, entry); - var stats = fs.statSync(filePath); - - if(stats.isDirectory()) { - if(entry.substr(0, 1) == "@") { - // When we encounter a namespace folder, augment all packages belonging to the scope - var pkgFiles = fs.readdirSync(filePath); - - pkgFiles.forEach(function(entry) { - if(stats.isDirectory()) { - var pkgFilePath = path.join(filePath, entry); - augmentPackageJSON(pkgFilePath, dependencies); - } - }); - } else { - augmentPackageJSON(filePath, dependencies); - } - } - }); - } - } - - processDependencies("node_modules", lockObj.dependencies); - - fs.writeFileSync("package-lock.json", JSON.stringify(lockObj, null, 2)); - ''; - }; - - prepareAndInvokeNPM = { packageName, bypassCache, reconstructLock, npmFlags, production }: - let - forceOfflineFlag = if bypassCache then "--offline" else "--registry http://www.example.com"; - in - '' - # Pinpoint the versions of all dependencies to the ones that are actually being used - echo "pinpointing versions of dependencies..." - source $pinpointDependenciesScriptPath - - # Patch the shebangs of the bundled modules to prevent them from - # calling executables outside the Nix store as much as possible - patchShebangs . - - # Deploy the Node.js package by running npm install. Since the - # dependencies have been provided already by ourselves, it should not - # attempt to install them again, which is good, because we want to make - # it Nix's responsibility. If it needs to install any dependencies - # anyway (e.g. because the dependency parameters are - # incomplete/incorrect), it fails. - # - # The other responsibilities of NPM are kept -- version checks, build - # steps, postprocessing etc. - - export HOME=$TMPDIR - cd "${packageName}" - runHook preRebuild - - ${lib.optionalString bypassCache '' - ${lib.optionalString reconstructLock '' - if [ -f package-lock.json ] - then - echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!" - echo "This will most likely result in version mismatches! We will remove the lock file and regenerate it!" - rm package-lock.json - else - echo "No package-lock.json file found, reconstructing..." - fi - - node ${reconstructPackageLock} - ''} - - node ${addIntegrityFieldsScript} - ''} - - npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild - - if [ "''${dontNpmInstall-}" != "1" ] - then - # NPM tries to download packages even when they already exist if npm-shrinkwrap is used. - rm -f npm-shrinkwrap.json - - npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} install - fi - ''; - - # Builds and composes an NPM package including all its dependencies - buildNodePackage = - { name - , packageName - , version - , dependencies ? [ ] - , buildInputs ? [ ] - , production ? true - , npmFlags ? "" - , dontNpmInstall ? false - , bypassCache ? false - , reconstructLock ? false - , preRebuild ? "" - , dontStrip ? true - , unpackPhase ? "true" - , buildPhase ? "true" - , ... - }@args: - - let - extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "preRebuild" "unpackPhase" "buildPhase" ]; - in - stdenv.mkDerivation ({ - name = "node_${name}-${version}"; - buildInputs = [ tarWrapper python nodejs-slim nodejs ] - ++ lib.optional (stdenv.isLinux) utillinux - ++ lib.optional (stdenv.isDarwin) libtool - ++ buildInputs; - - inherit nodejs; - - inherit dontStrip; # Stripping may fail a build for some package deployments - inherit dontNpmInstall preRebuild unpackPhase buildPhase; - - compositionScript = composePackage args; - pinpointDependenciesScript = pinpointDependenciesOfPackage args; - - passAsFile = [ "compositionScript" "pinpointDependenciesScript" ]; - - installPhase = '' - # Create and enter a root node_modules/ folder - mkdir -p $out/lib/node_modules - cd $out/lib/node_modules - - # Compose the package and all its dependencies - source $compositionScriptPath - - ${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }} - - # Create symlink to the deployed executable folder, if applicable - if [ -d "$out/lib/node_modules/.bin" ] - then - ln -s $out/lib/node_modules/.bin $out/bin - fi - - # Create symlinks to the deployed manual page folders, if applicable - if [ -d "$out/lib/node_modules/${packageName}/man" ] - then - mkdir -p $out/share - for dir in "$out/lib/node_modules/${packageName}/man/"* - do - mkdir -p $out/share/man/$(basename "$dir") - for page in "$dir"/* - do - ln -s $page $out/share/man/$(basename "$dir") - done - done - fi - - # Run post install hook, if provided - runHook postInstall - ''; - } // extraArgs); - - # Builds a node environment (a node_modules folder and a set of binaries) - buildNodeDependencies = - { name - , packageName - , version - , src - , dependencies ? [ ] - , buildInputs ? [ ] - , production ? true - , npmFlags ? "" - , dontNpmInstall ? false - , bypassCache ? false - , reconstructLock ? false - , dontStrip ? true - , unpackPhase ? "true" - , buildPhase ? "true" - , ... - }@args: - - let - extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ]; - in - stdenv.mkDerivation ({ - name = "node-dependencies-${name}-${version}"; - - buildInputs = [ tarWrapper python nodejs-slim nodejs ] - ++ lib.optional (stdenv.isLinux) utillinux - ++ lib.optional (stdenv.isDarwin) libtool - ++ buildInputs; - - inherit dontStrip; # Stripping may fail a build for some package deployments - inherit dontNpmInstall unpackPhase buildPhase; - - includeScript = includeDependencies { inherit dependencies; }; - pinpointDependenciesScript = pinpointDependenciesOfPackage args; - - passAsFile = [ "includeScript" "pinpointDependenciesScript" ]; - - installPhase = '' - mkdir -p $out/${packageName} - cd $out/${packageName} - - source $includeScriptPath - - # Create fake package.json to make the npm commands work properly - cp ${src}/package.json . - chmod 644 package.json - ${lib.optionalString bypassCache '' - if [ -f ${src}/package-lock.json ] - then - cp ${src}/package-lock.json . - fi - ''} - - # Go to the parent folder to make sure that all packages are pinpointed - cd .. - ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} - - ${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }} - - # Expose the executables that were installed - cd .. - ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} - - mv ${packageName} lib - ln -s $out/lib/node_modules/.bin $out/bin - ''; - } // extraArgs); - - # Builds a development shell - buildNodeShell = - { name - , packageName - , version - , src - , dependencies ? [ ] - , buildInputs ? [ ] - , production ? true - , npmFlags ? "" - , dontNpmInstall ? false - , bypassCache ? false - , reconstructLock ? false - , dontStrip ? true - , unpackPhase ? "true" - , buildPhase ? "true" - , ... - }@args: - - let - nodeDependencies = buildNodeDependencies args; - in - stdenv.mkDerivation { - name = "node-shell-${name}-${version}"; - - buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs; - buildCommand = '' - mkdir -p $out/bin - cat > $out/bin/shell <=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@dabh/diagnostics": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.2.tgz", - "integrity": "sha512-+A1YivoVDNNVCdfozHSR8v/jyuuLTMXwjWuxPFlFlUapXoGc+Gj9mDlTDDfrwl7rXCl2tNZ0kE8sIBO6YOn96Q==", - "dependencies": { - "colorspace": "1.1.x", - "enabled": "2.0.x", - "kuler": "^2.0.0" - } - }, - "node_modules/@eslint/eslintrc": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.2.2.tgz", - "integrity": "sha512-EfB5OHNYp1F4px/LI/FEnGylop7nOqkQ1LRzCM0KccA2U8tvV8w01KBv37LbO7nW4H+YhKyo2LcJhRwjjV17QQ==", - "dev": true, - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.1.1", - "espree": "^7.3.0", - "globals": "^12.1.0", - "ignore": "^4.0.6", - "import-fresh": "^3.2.1", - "js-yaml": "^3.13.1", - "lodash": "^4.17.19", - "minimatch": "^3.0.4", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/@eslint/eslintrc/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@eslint/eslintrc/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/@eslint/eslintrc/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@grpc/grpc-js": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.2.tgz", - "integrity": "sha512-UXepkOKCATJrhHGsxt+CGfpZy9zUn1q9mop5kfcXq1fBkTePxVNPOdnISlCbJFlCtld+pSLGyZCzr9/zVprFKA==", - "dependencies": { - "@types/node": ">=12.12.47" - }, - "engines": { - "node": "^8.13.0 || >=10.10.0" - } - }, - "node_modules/@grpc/proto-loader": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.2.tgz", - "integrity": "sha512-q2Qle60Ht2OQBCp9S5hv1JbI4uBBq6/mqSevFNK3ZEgRDBCAkWqZPUhD/K9gXOHrHKluliHiVq2L9sw1mVyAIg==", - "dependencies": { - "@types/long": "^4.0.1", - "lodash.camelcase": "^4.3.0", - "long": "^4.0.0", - "protobufjs": "^6.10.0", - "yargs": "^16.1.1" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@grpc/proto-loader/node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@kubernetes/client-node": { - "version": "0.14.3", - "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.14.3.tgz", - "integrity": "sha512-9hHGDNm2JEFQcRTpDxVoAVr0fowU+JH/l5atCXY9VXwvFM18pW5wr2LzLP+Q2Rh+uQv7Moz4gEjEKSCgVKykEQ==", - "dependencies": { - "@types/js-yaml": "^3.12.1", - "@types/node": "^10.12.0", - "@types/request": "^2.47.1", - "@types/stream-buffers": "^3.0.3", - "@types/tar": "^4.0.3", - "@types/underscore": "^1.8.9", - "@types/ws": "^6.0.1", - "byline": "^5.0.0", - "execa": "1.0.0", - "isomorphic-ws": "^4.0.1", - "js-yaml": "^3.13.1", - "jsonpath-plus": "^0.19.0", - "openid-client": "^4.1.1", - "request": "^2.88.0", - "rfc4648": "^1.3.0", - "shelljs": "^0.8.2", - "stream-buffers": "^3.0.2", - "tar": "^6.0.2", - "tmp-promise": "^3.0.2", - "tslib": "^1.9.3", - "underscore": "^1.9.1", - "ws": "^7.3.1" - } - }, - "node_modules/@kubernetes/client-node/node_modules/@types/node": { - "version": "10.17.60", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", - "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" - }, - "node_modules/@kubernetes/client-node/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/@panva/asn1.js": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@panva/asn1.js/-/asn1.js-1.0.0.tgz", - "integrity": "sha512-UdkG3mLEqXgnlKsWanWcgb6dOjUzJ+XC5f+aWw30qrtjxeNUSfKX1cd5FBzOaXQumoe9nIqeZUvrRJS03HCCtw==", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" - }, - "node_modules/@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" - }, - "node_modules/@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" - }, - "node_modules/@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" - }, - "node_modules/@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", - "dependencies": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "node_modules/@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" - }, - "node_modules/@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" - }, - "node_modules/@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" - }, - "node_modules/@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" - }, - "node_modules/@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" - }, - "node_modules/@sindresorhus/is": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.0.1.tgz", - "integrity": "sha512-Qm9hBEBu18wt1PO2flE7LPb30BHMQt1eQgbV76YntdNk73XZGpn3izvGTYxbGgzXKgbCjiia0uxTd3aTNQrY/g==", - "engines": { - "node": ">=10" - } - }, - "node_modules/@sinonjs/commons": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.3.tgz", - "integrity": "sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==", - "dependencies": { - "type-detect": "4.0.8" - } - }, - "node_modules/@sinonjs/fake-timers": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz", - "integrity": "sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA==", - "dependencies": { - "@sinonjs/commons": "^1.7.0" - } - }, - "node_modules/@sinonjs/samsam": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-5.3.1.tgz", - "integrity": "sha512-1Hc0b1TtyfBu8ixF/tpfSHTVWKwCBLY4QJbkgnE7HcwyvT2xArDxb4K7dMgqRm3szI+LJbzmW/s4xxEhv6hwDg==", - "dependencies": { - "@sinonjs/commons": "^1.6.0", - "lodash.get": "^4.4.2", - "type-detect": "^4.0.8" - } - }, - "node_modules/@sinonjs/text-encoding": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.1.tgz", - "integrity": "sha512-+iTbntw2IZPb/anVDbypzfQa+ay64MW0Zo8aJ8gZPWMMK6/OubMVb6lUPMagqjOPnmtauXnFCACVl3O7ogjeqQ==" - }, - "node_modules/@szmarczak/http-timer": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.5.tgz", - "integrity": "sha512-PyRA9sm1Yayuj5OIoJ1hGt2YISX45w9WcFbh6ddT0Z/0yaFxOtGLInr4jUfU1EAFVs0Yfyfev4RNwBlUaHdlDQ==", - "dependencies": { - "defer-to-connect": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@types/body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-W98JrE0j2K78swW4ukqMleo8R7h/pFETjM2DQ90MF6XK2i4LO4W3gQ71Lt4w3bfm2EvVSyWHplECvB5sK22yFQ==", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/body-parser/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/cacheable-request": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.1.tgz", - "integrity": "sha512-ykFq2zmBGOCbpIXtoVbz4SKY5QriWPh3AjyU4G74RYbtt5yOc5OfaY75ftjg7mikMOla1CTGpX3lLbuJh8DTrQ==", - "dependencies": { - "@types/http-cache-semantics": "*", - "@types/keyv": "*", - "@types/node": "*", - "@types/responselike": "*" - } - }, - "node_modules/@types/cacheable-request/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/caseless": { - "version": "0.12.2", - "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.2.tgz", - "integrity": "sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w==" - }, - "node_modules/@types/chai": { - "version": "4.2.18", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.18.tgz", - "integrity": "sha512-rS27+EkB/RE1Iz3u0XtVL5q36MGDWbgYe7zWiodyKNUnthxY0rukK5V36eiUCtCisB7NN8zKYH6DO2M37qxFEQ==" - }, - "node_modules/@types/connect": { - "version": "3.4.34", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.34.tgz", - "integrity": "sha512-ePPA/JuI+X0vb+gSWlPKOY0NdNAie/rPUqX2GUPpbZwiKTkSPhjXWuee47E4MtE54QVzGCQMQkAL6JhV2E1+cQ==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/express": { - "version": "4.17.11", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.11.tgz", - "integrity": "sha512-no+R6rW60JEc59977wIxreQVsIEOAYwgCqldrA/vkpCnbD7MqTefO97lmoBe4WE0F156bC4uLSP1XHDOySnChg==", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.18", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.19.tgz", - "integrity": "sha512-DJOSHzX7pCiSElWaGR8kCprwibCB/3yW6vcT8VG3P0SJjnv19gnWG/AZMfM60Xj/YJIp/YCaDHyvzsFVeniARA==", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - } - }, - "node_modules/@types/express-serve-static-core/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/http-cache-semantics": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.0.tgz", - "integrity": "sha512-c3Xy026kOF7QOTn00hbIllV1dLR9hG9NkSrLQgCVs8NF6sBU+VGWjD3wLPhmh1TYAc7ugCFsvHYMN4VcBN1U1A==" - }, - "node_modules/@types/js-yaml": { - "version": "3.12.6", - "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.6.tgz", - "integrity": "sha512-cK4XqrLvP17X6c0C8n4iTbT59EixqyXL3Fk8/Rsk4dF3oX4dg70gYUXrXVUUHpnsGMPNlTQMqf+TVmNPX6FmSQ==" - }, - "node_modules/@types/json5": { - "version": "0.0.29", - "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", - "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", - "dev": true - }, - "node_modules/@types/keyv": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.1.tgz", - "integrity": "sha512-MPtoySlAZQ37VoLaPcTHCu1RWJ4llDkULYZIzOYxlhxBqYPB0RsRlmMU0R6tahtFe27mIdkHV+551ZWV4PLmVw==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/keyv/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/lodash": { - "version": "4.14.170", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.170.tgz", - "integrity": "sha512-bpcvu/MKHHeYX+qeEN8GE7DIravODWdACVA1ctevD8CN24RhPZIKMn9ntfAsrvLfSX3cR5RrBKAbYm9bGs0A+Q==" - }, - "node_modules/@types/long": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", - "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" - }, - "node_modules/@types/mime": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", - "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" - }, - "node_modules/@types/minipass": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@types/minipass/-/minipass-2.2.0.tgz", - "integrity": "sha512-wuzZksN4w4kyfoOv/dlpov4NOunwutLA/q7uc00xU02ZyUY+aoM5PWIXEKBMnm0NHd4a+N71BMjq+x7+2Af1fg==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/minipass/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/mocha": { - "version": "5.2.7", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-5.2.7.tgz", - "integrity": "sha512-NYrtPht0wGzhwe9+/idPaBB+TqkY9AhTvOLMkThm0IoEfLaiVQZwBwyJ5puCkO3AUCWrmcoePjp2mbFocKy4SQ==", - "dev": true - }, - "node_modules/@types/node": { - "version": "12.20.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-12.20.13.tgz", - "integrity": "sha512-1x8W5OpxPq+T85OUsHRP6BqXeosKmeXRtjoF39STcdf/UWLqUsoehstZKOi0CunhVqHG17AyZgpj20eRVooK6A==" - }, - "node_modules/@types/qs": { - "version": "6.9.6", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.6.tgz", - "integrity": "sha512-0/HnwIfW4ki2D8L8c9GVcG5I72s9jP5GSLVF0VIXDW00kmIpA6O33G7a8n59Tmh7Nz0WUC3rSb7PTY/sdW2JzA==" - }, - "node_modules/@types/range-parser": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.3.tgz", - "integrity": "sha512-ewFXqrQHlFsgc09MK5jP5iR7vumV/BYayNC6PgJO2LPe8vrnNFyjQjSppfEngITi0qvfKtzFvgKymGheFM9UOA==" - }, - "node_modules/@types/request": { - "version": "2.48.5", - "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.5.tgz", - "integrity": "sha512-/LO7xRVnL3DxJ1WkPGDQrp4VTV1reX9RkC85mJ+Qzykj2Bdw+mG15aAfDahc76HtknjzE16SX/Yddn6MxVbmGQ==", - "dependencies": { - "@types/caseless": "*", - "@types/node": "*", - "@types/tough-cookie": "*", - "form-data": "^2.5.0" - } - }, - "node_modules/@types/request/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/responselike": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.0.tgz", - "integrity": "sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/responselike/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/serve-static": { - "version": "1.13.9", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.9.tgz", - "integrity": "sha512-ZFqF6qa48XsPdjXV5Gsz0Zqmux2PerNd3a/ktL45mHpa19cuMi/cL8tcxdAx497yRh+QtYPuofjT9oWw9P7nkA==", - "dependencies": { - "@types/mime": "^1", - "@types/node": "*" - } - }, - "node_modules/@types/serve-static/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/sinon": { - "version": "9.0.11", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-9.0.11.tgz", - "integrity": "sha512-PwP4UY33SeeVKodNE37ZlOsR9cReypbMJOhZ7BVE0lB+Hix3efCOxiJWiE5Ia+yL9Cn2Ch72EjFTRze8RZsNtg==", - "dependencies": { - "@types/sinonjs__fake-timers": "*" - } - }, - "node_modules/@types/sinon-chai": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/@types/sinon-chai/-/sinon-chai-3.2.5.tgz", - "integrity": "sha512-bKQqIpew7mmIGNRlxW6Zli/QVyc3zikpGzCa797B/tRnD9OtHvZ/ts8sYXV+Ilj9u3QRaUEM8xrjgd1gwm1BpQ==", - "dependencies": { - "@types/chai": "*", - "@types/sinon": "*" - } - }, - "node_modules/@types/sinonjs__fake-timers": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-6.0.2.tgz", - "integrity": "sha512-dIPoZ3g5gcx9zZEszaxLSVTvMReD3xxyyDnQUjA6IYDG9Ba2AV0otMPs+77sG9ojB4Qr2N2Vk5RnKeuA0X/0bg==" - }, - "node_modules/@types/stream-buffers": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.3.tgz", - "integrity": "sha512-NeFeX7YfFZDYsCfbuaOmFQ0OjSmHreKBpp7MQ4alWQBHeh2USLsj7qyMyn9t82kjqIX516CR/5SRHnARduRtbQ==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/stream-buffers/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/tar": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/tar/-/tar-4.0.4.tgz", - "integrity": "sha512-0Xv+xcmkTsOZdIF4yCnd7RkOOyfyqPaqJ7RZFKnwdxfDbkN3eAAE9sHl8zJFqBz4VhxolW9EErbjR1oyH7jK2A==", - "dependencies": { - "@types/minipass": "*", - "@types/node": "*" - } - }, - "node_modules/@types/tar/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@types/tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-I99sngh224D0M7XgW1s120zxCt3VYQ3IQsuw3P3jbq5GG4yc79+ZjyKznyOGIQrflfylLgcfekeZW/vk0yng6A==" - }, - "node_modules/@types/underscore": { - "version": "1.11.2", - "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.11.2.tgz", - "integrity": "sha512-Ls2ylbo7++ITrWk2Yc3G/jijwSq5V3GT0tlgVXEl2kKYXY3ImrtmTCoE2uyTWFRI5owMBriloZFWbE1SXOsE7w==" - }, - "node_modules/@types/ws": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-6.0.4.tgz", - "integrity": "sha512-PpPrX7SZW9re6+Ha8ojZG4Se8AZXgf0GK6zmfqEuCsY49LFDNXO3SByp44X3dFEqtB73lkCDAdUazhAjVPiNwg==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/ws/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/@ungap/promise-all-settled": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", - "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", - "dev": true - }, - "node_modules/accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "dependencies": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", - "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", - "dev": true, - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-jsx": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", - "dev": true - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "node_modules/ansi-colors": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", - "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "dev": true, - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/arg": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", - "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "node_modules/array-includes": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.3.tgz", - "integrity": "sha512-gcem1KlBU7c9rB+Rq8/3PPKsK2kjqeEBa3bD5kkQo4nYlOHQCJqIJFqBXDEfwaRuYTT4E+FxA9xez7Gf/e3Q7A==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "get-intrinsic": "^1.1.1", - "is-string": "^1.0.5" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/array.prototype.flat": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.4.tgz", - "integrity": "sha512-4470Xi3GAPAjZqFcljX2xzckv1qeKPizoNkiS0+O4IoPR2ZNpcjE0pkhdihlDouK+x6QOast26B4Q/O9DJnwSg==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/array.prototype.flatmap": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.4.tgz", - "integrity": "sha512-r9Z0zYoxqHz60vvQbWEdXIEtCwHF0yxaWfno9qzXeNHvfyl3BZqygmGzb84dsubyaXLH4husF+NFgMSdpZhk2Q==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1", - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/astral-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", - "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/async": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", - "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/bignumber.js": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz", - "integrity": "sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA==", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", - "dependencies": { - "bytes": "3.1.0", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browser-stdout": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", - "dev": true - }, - "node_modules/buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", - "dev": true - }, - "node_modules/byline": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", - "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cacheable-lookup": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", - "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==", - "engines": { - "node": ">=10.6.0" - } - }, - "node_modules/cacheable-request": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.1.tgz", - "integrity": "sha512-lt0mJ6YAnsrBErpTMWeu5kl/tg9xMAWjavYTN6VQXM1A/teBITuNcccXsCxF0tDQQJf9DfAaX5O4e0zp0KlfZw==", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^4.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "node_modules/chai": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.4.tgz", - "integrity": "sha512-yS5H68VYOCtN1cjfwumDSuzn/9c+yza4f3reKXlE5rUg7SFcCEy90gJvydNgOYtblyf4Zi6jIWRnXOgErta0KA==", - "dev": true, - "dependencies": { - "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^3.0.1", - "get-func-name": "^2.0.0", - "pathval": "^1.1.1", - "type-detect": "^4.0.5" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/chalk": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", - "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", - "dev": true, - "dependencies": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.1" - } - }, - "node_modules/chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "engines": { - "node": ">=10" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "dependencies": { - "mimic-response": "^1.0.0" - } - }, - "node_modules/cockatiel": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/cockatiel/-/cockatiel-1.1.1.tgz", - "integrity": "sha512-YO02ZhTcK2HOZodksWfg4tS1GYYt1j7R6U1unAkwcQf1uoIYKSBqPrfqXlLNbsyMvkDXMwa2nuwZDHbUkB1VbQ==", - "engines": { - "node": ">=10 <11 || >=12" - } - }, - "node_modules/color": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", - "dependencies": { - "color-convert": "^1.9.1", - "color-string": "^1.5.2" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", - "dependencies": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "node_modules/color/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/colorspace": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.2.tgz", - "integrity": "sha512-vt+OoIP2d76xLhjwbBaucYlNSpPsrJWPlBTtwCpQKIu6/CSMutyzX93O/Do0qzpH3YoHEes8YEFXyZ797rEhzQ==", - "dependencies": { - "color": "3.0.x", - "text-hex": "1.0.x" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "node_modules/contains-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", - "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", - "dependencies": { - "safe-buffer": "5.1.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-disposition/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "node_modules/create-require": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", - "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true - }, - "node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "dependencies": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - }, - "engines": { - "node": ">=4.8" - } - }, - "node_modules/cross-spawn/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/decompress-response/node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "engines": { - "node": ">=10" - } - }, - "node_modules/deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", - "dev": true, - "dependencies": { - "type-detect": "^4.0.0" - }, - "engines": { - "node": ">=0.12" - } - }, - "node_modules/deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true - }, - "node_modules/defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", - "engines": { - "node": ">=10" - } - }, - "node_modules/define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dev": true, - "dependencies": { - "object-keys": "^1.0.12" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "node_modules/diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/dirty-chai": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/dirty-chai/-/dirty-chai-2.0.1.tgz", - "integrity": "sha512-ys79pWKvDMowIDEPC6Fig8d5THiC0DJ2gmTeGzVAoEH18J8OzLud0Jh7I9IWg3NSk8x2UocznUuFmfHCXYZx9w==", - "dev": true - }, - "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/enabled": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", - "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==" - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/enquirer": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", - "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", - "dev": true, - "dependencies": { - "ansi-colors": "^4.1.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/error-ex/node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - }, - "node_modules/es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "get-intrinsic": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", - "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.2", - "string.prototype.trimend": "^1.0.4", - "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, - "dependencies": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "node_modules/eslint": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.12.1.tgz", - "integrity": "sha512-HlMTEdr/LicJfN08LB3nM1rRYliDXOmfoO4vj39xN6BLpFzF00hbwBoqHk8UcJ2M/3nlARZWy/mslvGEuZFvsg==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.0.0", - "@eslint/eslintrc": "^0.2.1", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.0.1", - "doctrine": "^3.0.0", - "enquirer": "^2.3.5", - "eslint-scope": "^5.1.1", - "eslint-utils": "^2.1.0", - "eslint-visitor-keys": "^2.0.0", - "espree": "^7.3.0", - "esquery": "^1.2.0", - "esutils": "^2.0.2", - "file-entry-cache": "^5.0.1", - "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", - "globals": "^12.1.0", - "ignore": "^4.0.6", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "js-yaml": "^3.13.1", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash": "^4.17.19", - "minimatch": "^3.0.4", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "progress": "^2.0.0", - "regexpp": "^3.1.0", - "semver": "^7.2.1", - "strip-ansi": "^6.0.0", - "strip-json-comments": "^3.1.0", - "table": "^5.2.3", - "text-table": "^0.2.0", - "v8-compile-cache": "^2.0.3" - }, - "bin": { - "eslint": "bin/eslint.js" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/eslint-config-semistandard": { - "version": "15.0.1", - "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-15.0.1.tgz", - "integrity": "sha512-sfV+qNBWKOmF0kZJll1VH5XqOAdTmLlhbOl9WKI11d2eMEe+Kicxnpm24PQWHOqAfk5pAWU2An0LjNCXKa4Usg==", - "dev": true - }, - "node_modules/eslint-config-standard": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.0.tgz", - "integrity": "sha512-kMCehB9yXIG+LNsu9uXfm06o6Pt63TFAOzn9tUOzw4r/hFIxHhNR1Xomxy+B5zMrXhqyfHVEcmanzttEjGei9w==", - "dev": true - }, - "node_modules/eslint-config-standard-jsx": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", - "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", - "dev": true - }, - "node_modules/eslint-import-resolver-node": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz", - "integrity": "sha512-ogtf+5AB/O+nM6DIeBUNr2fuT7ot9Qg/1harBfBtaP13ekEWFQEEMP94BCB7zaNW3gyY+8SHYF00rnqYwXKWOA==", - "dev": true, - "dependencies": { - "debug": "^2.6.9", - "resolve": "^1.13.1" - } - }, - "node_modules/eslint-module-utils": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.1.tgz", - "integrity": "sha512-ZXI9B8cxAJIH4nfkhTwcRTEAnrVfobYqwjWy/QMCZ8rHkZHFjf9yO4BzpiF9kCSfNlMG54eKigISHpX0+AaT4A==", - "dev": true, - "dependencies": { - "debug": "^3.2.7", - "pkg-dir": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/eslint-module-utils/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/eslint-module-utils/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "node_modules/eslint-plugin-es": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", - "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", - "dev": true, - "dependencies": { - "eslint-utils": "^2.0.0", - "regexpp": "^3.0.0" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/eslint-plugin-import": { - "version": "2.22.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.22.1.tgz", - "integrity": "sha512-8K7JjINHOpH64ozkAhpT3sd+FswIZTfMZTjdx052pnWrgRCVfp8op9tbjpAk3DdUeI/Ba4C8OjdC0r90erHEOw==", - "dev": true, - "dependencies": { - "array-includes": "^3.1.1", - "array.prototype.flat": "^1.2.3", - "contains-path": "^0.1.0", - "debug": "^2.6.9", - "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.4", - "eslint-module-utils": "^2.6.0", - "has": "^1.0.3", - "minimatch": "^3.0.4", - "object.values": "^1.1.1", - "read-pkg-up": "^2.0.0", - "resolve": "^1.17.0", - "tsconfig-paths": "^3.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/eslint-plugin-import/node_modules/doctrine": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", - "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", - "dev": true, - "dependencies": { - "esutils": "^2.0.2", - "isarray": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint-plugin-import/node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "node_modules/eslint-plugin-node": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", - "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", - "dev": true, - "dependencies": { - "eslint-plugin-es": "^3.0.0", - "eslint-utils": "^2.0.0", - "ignore": "^5.1.1", - "minimatch": "^3.0.4", - "resolve": "^1.10.1", - "semver": "^6.1.0" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/eslint-plugin-node/node_modules/ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/eslint-plugin-node/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/eslint-plugin-promise": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", - "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/eslint-plugin-react": { - "version": "7.21.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.21.5.tgz", - "integrity": "sha512-8MaEggC2et0wSF6bUeywF7qQ46ER81irOdWS4QWxnnlAEsnzeBevk1sWh7fhpCghPpXb+8Ks7hvaft6L/xsR6g==", - "dev": true, - "dependencies": { - "array-includes": "^3.1.1", - "array.prototype.flatmap": "^1.2.3", - "doctrine": "^2.1.0", - "has": "^1.0.3", - "jsx-ast-utils": "^2.4.1 || ^3.0.0", - "object.entries": "^1.1.2", - "object.fromentries": "^2.0.2", - "object.values": "^1.1.1", - "prop-types": "^15.7.2", - "resolve": "^1.18.1", - "string.prototype.matchall": "^4.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/eslint-plugin-react/node_modules/doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "dev": true, - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint-plugin-standard": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.2.tgz", - "integrity": "sha512-nKptN8l7jksXkwFk++PhJB3cCDTcXOEyhISIN86Ue2feJ1LFyY3PrY3/xT2keXlJSY5bpmbiTG0f885/YKAvTA==", - "dev": true - }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dev": true, - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/eslint-utils": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", - "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", - "dev": true, - "dependencies": { - "eslint-visitor-keys": "^1.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/eslint-utils/node_modules/eslint-visitor-keys": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", - "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/eslint-visitor-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", - "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/eslint/node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/eslint/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/eslint/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/eslint/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/eslint/node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/espree": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", - "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", - "dev": true, - "dependencies": { - "acorn": "^7.4.0", - "acorn-jsx": "^5.3.1", - "eslint-visitor-keys": "^1.3.0" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/espree/node_modules/eslint-visitor-keys": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", - "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esquery": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz", - "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==", - "dev": true, - "dependencies": { - "estraverse": "^5.1.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/esquery/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/etcd3": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/etcd3/-/etcd3-1.1.0.tgz", - "integrity": "sha512-9SnJvaPyW5IYdJHJWX91CYo1QZCAev2B7PxrQWIe2tGbutZOmsXHfjNDKwEltFWoG5h++K3/JfWPjJdOGX90hg==", - "dependencies": { - "@grpc/grpc-js": "^1.1.7", - "@grpc/proto-loader": "^0.5.5", - "bignumber.js": "^9.0.0", - "cockatiel": "^1.1.1" - } - }, - "node_modules/etcd3/node_modules/@grpc/proto-loader": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.6.tgz", - "integrity": "sha512-DT14xgw3PSzPxwS13auTEwxhMMOoz33DPUKNtmYK/QYbBSpLXJy78FGGs5yVoxVobEqPm4iW9MOIoz0A3bLTRQ==", - "dependencies": { - "lodash.camelcase": "^4.3.0", - "protobufjs": "^6.8.6" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "dependencies": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", - "dependencies": { - "accepts": "~1.3.7", - "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", - "content-type": "~1.0.4", - "cookie": "0.4.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.1.2", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", - "statuses": "~1.5.0", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/express/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "node_modules/fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true - }, - "node_modules/fast-safe-stringify": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" - }, - "node_modules/fecha": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.1.tgz", - "integrity": "sha512-MMMQ0ludy/nBs1/o0zVOiKTpG7qMbonKUzjJgQFEuvq6INZ1OraKPRAWkBq5vlKLOUMpmNYG1JoN3oDPUQ9m3Q==" - }, - "node_modules/file-entry-cache": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", - "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", - "dev": true, - "dependencies": { - "flat-cache": "^2.0.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "statuses": "~1.5.0", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "dev": true, - "bin": { - "flat": "cli.js" - } - }, - "node_modules/flat-cache": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", - "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", - "dev": true, - "dependencies": { - "flatted": "^2.0.0", - "rimraf": "2.6.3", - "write": "1.0.3" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/flat-cache/node_modules/rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - } - }, - "node_modules/flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", - "dev": true - }, - "node_modules/fn.name": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", - "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==" - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", - "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "node_modules/functional-red-black-tree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", - "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", - "dev": true - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-func-name": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", - "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" - } - }, - "node_modules/get-stdin": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", - "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - } - }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "dependencies": { - "type-fest": "^0.8.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/got": { - "version": "11.8.2", - "resolved": "https://registry.npmjs.org/got/-/got-11.8.2.tgz", - "integrity": "sha512-D0QywKgIe30ODs+fm8wMZiAcZjypcCodPNuMz5H9Mny7RJ+IjJ10BdmGW7OM7fHXP+O7r6ZwapQ/YQmMSvB0UQ==", - "dependencies": { - "@sindresorhus/is": "^4.0.0", - "@szmarczak/http-timer": "^4.0.5", - "@types/cacheable-request": "^6.0.1", - "@types/responselike": "^1.0.0", - "cacheable-lookup": "^5.0.3", - "cacheable-request": "^7.0.1", - "decompress-response": "^6.0.0", - "http2-wrapper": "^1.0.0-beta.5.2", - "lowercase-keys": "^2.0.0", - "p-cancelable": "^2.0.0", - "responselike": "^2.0.0" - }, - "engines": { - "node": ">=10.19.0" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==", - "dev": true - }, - "node_modules/growl": { - "version": "1.10.5", - "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", - "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", - "dev": true, - "engines": { - "node": ">=4.x" - } - }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-bigints": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", - "dev": true - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "dev": true, - "bin": { - "he": "bin/he" - } - }, - "node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, - "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" - }, - "node_modules/http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/http-errors/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, - "node_modules/http2-wrapper": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz", - "integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==", - "dependencies": { - "quick-lru": "^5.1.1", - "resolve-alpn": "^1.0.0" - }, - "engines": { - "node": ">=10.19.0" - } - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dev": true, - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true, - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/internal-slot": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", - "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", - "dev": true, - "dependencies": { - "get-intrinsic": "^1.1.0", - "has": "^1.0.3", - "side-channel": "^1.0.4" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - }, - "node_modules/is-bigint": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.2.tgz", - "integrity": "sha512-0JV5+SOCQkIdzjBK9buARcV804Ddu7A0Qet6sHi3FimE9ne6m4BGQZfRn+NZiXbBk4F4XmHfDZIipLj9pX8dSA==", - "dev": true - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-boolean-object": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.1.tgz", - "integrity": "sha512-bXdQWkECBUIAcCkeH1unwJLIpZYaa5VvuygSyS/c2lf719mTKZDU5UdDRlpd01UjADgmW8RfqaP+mRaVPdr/Ng==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-core-module": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.4.0.tgz", - "integrity": "sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A==", - "dependencies": { - "has": "^1.0.3" - } - }, - "node_modules/is-date-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.4.tgz", - "integrity": "sha512-/b4ZVsG7Z5XVtIxs/h9W8nvfLgSAyKYdtGWQLbqy6jA1icmgjf8WCoTKgeS4wy5tYaPePouzFMANbnj94c2Z+A==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dev": true, - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-negative-zero": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", - "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-number-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.5.tgz", - "integrity": "sha512-RU0lI/n95pMoUKu9v1BZP5MBcZuNSVJkMkAG2dJqC4z2GlkGUNeH68SuHuBKBD/XFe+LHZ+f9BKkLET60Niedw==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-regex": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.3.tgz", - "integrity": "sha512-qSVXFz28HM7y+IWX6vLCsexdlvzT1PJNFSBuaQLQ5o0IEw8UDYW6/2+eCMVyIsbM8CNLX2a/QWmSpyxYEHY7CQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "has-symbols": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-string": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.6.tgz", - "integrity": "sha512-2gdzbKUuqtQ3lYNrUTQYoClPhm7oQu4UdpSZMp1/DGgkHBT8E2Z1l0yMdb6D4zNAxwDiMv8MdulKROJGNl0Q0w==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-symbol": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", - "dev": true, - "dependencies": { - "has-symbols": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "node_modules/isomorphic-ws": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz", - "integrity": "sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==" - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "node_modules/jose": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/jose/-/jose-2.0.5.tgz", - "integrity": "sha512-BAiDNeDKTMgk4tvD0BbxJ8xHEHBZgpeRZ1zGPPsitSyMgjoMWiLGYAE7H7NpP5h0lPppQajQs871E8NHUrzVPA==", - "dependencies": { - "@panva/asn1.js": "^1.0.0" - }, - "engines": { - "node": ">=10.13.0 < 13 || >=13.7.0" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/js-yaml/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" - }, - "node_modules/json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true - }, - "node_modules/json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", - "dev": true - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "node_modules/json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dev": true, - "dependencies": { - "minimist": "^1.2.0" - }, - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/jsonpath-plus": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-0.19.0.tgz", - "integrity": "sha512-GSVwsrzW9LsA5lzsqe4CkuZ9wp+kxBb2GwNniaWzI2YFn5Ig42rSW8ZxVpWXaAfakXNrx5pgY5AbQq7kzX29kg==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "node_modules/jsx-ast-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.2.0.tgz", - "integrity": "sha512-EIsmt3O3ljsU6sot/J4E1zDRxfBNrhjyf/OKjlydwgEimQuznlM4Wv7U+ueONJMyEn1WRE0K8dhi3dVAXYT24Q==", - "dev": true, - "dependencies": { - "array-includes": "^3.1.2", - "object.assign": "^4.1.2" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/just-extend": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-4.2.1.tgz", - "integrity": "sha512-g3UB796vUFIY90VIv/WX3L2c8CS2MdWUww3CNrYmqza1Fg0DURc2K/O4YrnklBdQarSJ/y8JnJYDGc+1iumQjg==" - }, - "node_modules/keyv": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.0.3.tgz", - "integrity": "sha512-zdGa2TOpSZPq5mU6iowDARnMBZgtCqJ11dJROFi6tg6kTn4nuUdU09lFyLFSaHrWqpIJ+EBq4E8/Dc0Vx5vLdA==", - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/kuler": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", - "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==" - }, - "node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" - }, - "node_modules/lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=" - }, - "node_modules/log-symbols": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", - "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", - "dev": true, - "dependencies": { - "chalk": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/logform": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/logform/-/logform-2.2.0.tgz", - "integrity": "sha512-N0qPlqfypFx7UHNn4B3lzS/b0uLqt2hmuoa+PpuXNYgozdJYAyauF5Ky0BWVjrxDlMWiT3qN4zPq3vVAfZy7Yg==", - "dependencies": { - "colors": "^1.2.1", - "fast-safe-stringify": "^2.0.4", - "fecha": "^4.2.0", - "ms": "^2.1.1", - "triple-beam": "^1.3.0" - } - }, - "node_modules/logform/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "dev": true, - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } - }, - "node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/make-error": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", - "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", - "dependencies": { - "mime-db": "1.47.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true - }, - "node_modules/minipass": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.3.tgz", - "integrity": "sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg==", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mocha": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.4.0.tgz", - "integrity": "sha512-hJaO0mwDXmZS4ghXsvPVriOhsxQ7ofcpQdm8dE+jISUOKopitvnXFQmpRR7jd2K6VBG6E26gU3IAbXXGIbu4sQ==", - "dev": true, - "dependencies": { - "@ungap/promise-all-settled": "1.1.2", - "ansi-colors": "4.1.1", - "browser-stdout": "1.3.1", - "chokidar": "3.5.1", - "debug": "4.3.1", - "diff": "5.0.0", - "escape-string-regexp": "4.0.0", - "find-up": "5.0.0", - "glob": "7.1.6", - "growl": "1.10.5", - "he": "1.2.0", - "js-yaml": "4.0.0", - "log-symbols": "4.0.0", - "minimatch": "3.0.4", - "ms": "2.1.3", - "nanoid": "3.1.20", - "serialize-javascript": "5.0.1", - "strip-json-comments": "3.1.1", - "supports-color": "8.1.1", - "which": "2.0.2", - "wide-align": "1.1.3", - "workerpool": "6.1.0", - "yargs": "16.2.0", - "yargs-parser": "20.2.4", - "yargs-unparser": "2.0.0" - }, - "bin": { - "_mocha": "bin/_mocha", - "mocha": "bin/mocha" - }, - "engines": { - "node": ">= 10.12.0" - } - }, - "node_modules/mocha/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/mocha/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/mocha/node_modules/debug/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/mocha/node_modules/diff": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", - "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", - "dev": true, - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/mocha/node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/mocha/node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - } - }, - "node_modules/mocha/node_modules/js-yaml": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", - "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/mocha/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "node_modules/mocha/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mocha/node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/mocha/node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mocha/node_modules/yargs-parser": { - "version": "20.2.4", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", - "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/nanoid": { - "version": "3.1.20", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz", - "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw==", - "dev": true, - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/nats": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/nats/-/nats-2.0.4.tgz", - "integrity": "sha512-cICTjoL09YZnh6O4vg7PnKUH9P/w6xPs4iZns/VA6h8iPe1ZhOY6tHEdjZ/wJ1eAFZKX+gw1+CxId0RK5NUbqA==", - "dependencies": { - "nkeys.js": "^1.0.0-9" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", - "dev": true - }, - "node_modules/negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "node_modules/nise": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/nise/-/nise-4.1.0.tgz", - "integrity": "sha512-eQMEmGN/8arp0xsvGoQ+B1qvSkR73B1nWSCh7nOt5neMCtwcQVYQGdzQMhcNscktTsWB54xnlSQFzOAPJD8nXA==", - "dependencies": { - "@sinonjs/commons": "^1.7.0", - "@sinonjs/fake-timers": "^6.0.0", - "@sinonjs/text-encoding": "^0.7.1", - "just-extend": "^4.0.2", - "path-to-regexp": "^1.7.0" - } - }, - "node_modules/nise/node_modules/path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", - "dependencies": { - "isarray": "0.0.1" - } - }, - "node_modules/nkeys.js": { - "version": "1.0.0-9", - "resolved": "https://registry.npmjs.org/nkeys.js/-/nkeys.js-1.0.0-9.tgz", - "integrity": "sha512-m9O0NQT+3rUe1om6MWpxV77EuHql/LdorDH+FYQkoeARcM2V0sQ89kM36fArWaHWq/25EmNmQUW0MhLTcbqW1A==", - "dependencies": { - "@types/node": "^14.0.26", - "tweetnacl": "^1.0.3" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/nkeys.js/node_modules/@types/node": { - "version": "14.17.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.17.1.tgz", - "integrity": "sha512-/tpUyFD7meeooTRwl3sYlihx2BrJE7q9XF71EguPFIySj9B7qgnRtHsHTho+0AUm4m1SvWGm6uSncrR94q6Vtw==" - }, - "node_modules/nkeys.js/node_modules/tweetnacl": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", - "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" - }, - "node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "node_modules/normalize-package-data/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "dependencies": { - "path-key": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "engines": { - "node": "*" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-hash": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.1.1.tgz", - "integrity": "sha512-VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/object-inspect": { - "version": "1.10.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.10.3.tgz", - "integrity": "sha512-e5mCJlSH7poANfC8z8S9s9S2IN5/4Zb3aZ33f5s8YqoazCFzNLloLU8r5VCG+G7WoqLvAAZoVMcy3tp/3X0Plw==", - "dev": true - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.entries": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.3.tgz", - "integrity": "sha512-ym7h7OZebNS96hn5IJeyUmaWhaSM4SVtAPPfNLQEI2MYWCO2egsITb9nab2+i/Pwibx+R0mtn+ltKJXRSeTMGg==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1", - "has": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.fromentries": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.4.tgz", - "integrity": "sha512-EsFBshs5RUUpQEY1D4q/m59kMfz4YJvxuNCJcv/jWwOJr34EaVnG11ZrZa0UHB3wnzV1wx8m58T4hQL8IuNXlQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/oidc-token-hash": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-5.0.1.tgz", - "integrity": "sha512-EvoOtz6FIEBzE+9q253HsLCVRiK/0doEJ2HCvvqMQb3dHZrP3WlJKYtJ55CRTw4jmYomzH4wkPuCj/I3ZvpKxQ==", - "engines": { - "node": "^10.13.0 || >=12.0.0" - } - }, - "node_modules/on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/one-time": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", - "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", - "dependencies": { - "fn.name": "1.x.x" - } - }, - "node_modules/openid-client": { - "version": "4.7.3", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-4.7.3.tgz", - "integrity": "sha512-YLwZQLSjo3gdSVxw/G25ddoRp9oCpXkREZXssmenlejZQPsnTq+yQtFUcBmC7u3VVkx+gwqXZF7X0CtAAJrRRg==", - "dependencies": { - "aggregate-error": "^3.1.0", - "got": "^11.8.0", - "jose": "^2.0.5", - "lru-cache": "^6.0.0", - "make-error": "^1.3.6", - "object-hash": "^2.0.1", - "oidc-token-hash": "^5.0.1" - }, - "engines": { - "node": "^10.19.0 || >=12.0.0 < 13 || >=13.7.0 < 14 || >= 14.2.0" - } - }, - "node_modules/optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", - "dev": true, - "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/p-cancelable": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz", - "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", - "dev": true, - "dependencies": { - "error-ex": "^1.2.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "engines": { - "node": ">=4" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" - }, - "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "node_modules/path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "dev": true, - "dependencies": { - "pify": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pathval": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", - "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "node_modules/picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", - "dev": true, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pkg-conf": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-3.1.0.tgz", - "integrity": "sha512-m0OTbR/5VPNPqO1ph6Fqbj7Hv6QU7gR/tQW40ZqrL1rjgCU85W6C1bJn0BItuJqnR98PWzw7Z8hHeChD1WrgdQ==", - "dev": true, - "dependencies": { - "find-up": "^3.0.0", - "load-json-file": "^5.2.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/load-json-file": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", - "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.15", - "parse-json": "^4.0.0", - "pify": "^4.0.1", - "strip-bom": "^3.0.0", - "type-fest": "^0.3.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-conf/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-conf/node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-conf/node_modules/type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-dir": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", - "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", - "dev": true, - "dependencies": { - "find-up": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-dir/node_modules/find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "dependencies": { - "locate-path": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-dir/node_modules/locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "dependencies": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-dir/node_modules/p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "dependencies": { - "p-try": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-dir/node_modules/p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "dependencies": { - "p-limit": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pkg-dir/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "node_modules/progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/prop-types": { - "version": "15.7.2", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz", - "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==", - "dev": true, - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.8.1" - } - }, - "node_modules/protobufjs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", - "hasInstallScript": true, - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", - "@types/node": ">=13.7.0", - "long": "^4.0.0" - }, - "bin": { - "pbjs": "bin/pbjs", - "pbts": "bin/pbts" - } - }, - "node_modules/protobufjs/node_modules/@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - }, - "node_modules/proxy-addr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", - "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", - "dependencies": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" - }, - "node_modules/quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", - "engines": { - "node": ">=10" - } - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dev": true, - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", - "dependencies": { - "bytes": "3.1.0", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "dev": true - }, - "node_modules/read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "dev": true, - "dependencies": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "dev": true, - "dependencies": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "dependencies": { - "locate-path": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "dependencies": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "dependencies": { - "p-try": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "dependencies": { - "p-limit": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", - "dev": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", - "dependencies": { - "resolve": "^1.1.6" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/regexp.prototype.flags": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", - "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/request/node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" - }, - "node_modules/resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", - "dependencies": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" - } - }, - "node_modules/resolve-alpn": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.1.2.tgz", - "integrity": "sha512-8OyfzhAtA32LVUsJSke3auIyINcwdh5l3cvYKdKO0nvsYSKuiLfTM5i78PJswFPT8y6cPW+L1v6/hE95chcpDA==" - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/responselike": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.0.tgz", - "integrity": "sha512-xH48u3FTB9VsZw7R+vvgaKeLKzT6jOogbQhEe/jewwnZgzPcnyWui2Av6JpoYZF/91uueC+lqhWqeURw5/qhCw==", - "dependencies": { - "lowercase-keys": "^2.0.0" - } - }, - "node_modules/rfc4648": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.4.0.tgz", - "integrity": "sha512-3qIzGhHlMHA6PoT6+cdPKZ+ZqtxkIvg8DZGKA5z6PQ33/uuhoJ+Ws/D/J9rXW6gXodgH8QYlz2UCl+sdUDmNIg==" - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/semistandard": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-16.0.0.tgz", - "integrity": "sha512-pLETGjFyl0ETMDAEZxkC1OJBmNmPIMpMkayStGTgHMMh/5FM7Rbk5NWc1t7yfQ4PrRURQH8MUg3ZxvojJJifcw==", - "dev": true, - "dependencies": { - "eslint": "~7.12.1", - "eslint-config-semistandard": "15.0.1", - "eslint-config-standard": "16.0.0", - "eslint-config-standard-jsx": "10.0.0", - "eslint-plugin-import": "~2.22.1", - "eslint-plugin-node": "~11.1.0", - "eslint-plugin-promise": "~4.2.1", - "eslint-plugin-react": "~7.21.5", - "eslint-plugin-standard": "~4.0.2", - "standard-engine": "^14.0.0" - }, - "bin": { - "semistandard": "bin/cmd.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/semver": { - "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", - "dependencies": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "~1.7.2", - "mime": "1.6.0", - "ms": "2.1.1", - "on-finished": "~2.3.0", - "range-parser": "~1.2.1", - "statuses": "~1.5.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" - }, - "node_modules/serialize-javascript": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz", - "integrity": "sha512-SaaNal9imEO737H2c05Og0/8LUXG7EnsZyMa8MzkmuHoELfT6txuj0cMqRj6zfPKnmQ1yasR4PCJc8x+M4JSPA==", - "dev": true, - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.17.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" - }, - "node_modules/shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dependencies": { - "shebang-regex": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/shelljs": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz", - "integrity": "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==", - "dependencies": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - }, - "bin": { - "shjs": "bin/shjs" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - }, - "node_modules/signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "node_modules/simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/sinon": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-10.0.0.tgz", - "integrity": "sha512-XAn5DxtGVJBlBWYrcYKEhWCz7FLwZGdyvANRyK06419hyEpdT0dMc5A8Vcxg5SCGHc40CsqoKsc1bt1CbJPfNw==", - "dev": true, - "dependencies": { - "@sinonjs/commons": "^1.8.1", - "@sinonjs/fake-timers": "^6.0.1", - "@sinonjs/samsam": "^5.3.1", - "diff": "^4.0.2", - "nise": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "node_modules/sleep-promise": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/sleep-promise/-/sleep-promise-9.1.0.tgz", - "integrity": "sha512-UHYzVpz9Xn8b+jikYSD6bqvf754xL2uBUzDFwiU6NcdZeifPr6UfgU43xpkPu67VMS88+TI2PSI7Eohgqf2fKA==" - }, - "node_modules/slice-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", - "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.0", - "astral-regex": "^1.0.0", - "is-fullwidth-code-point": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/slice-ansi/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/slice-ansi/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/slice-ansi/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "node_modules/slice-ansi/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", - "dev": true, - "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "node_modules/spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-license-ids": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.9.tgz", - "integrity": "sha512-Ki212dKK4ogX+xDo4CtOZBVIwhsKBEfsEEcwmJfLQzirgc2jIWdzg40Unxz/HzEUqM1WFzVlQSMF9kZZ2HboLQ==", - "dev": true - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "node_modules/sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stack-trace": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", - "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=", - "engines": { - "node": "*" - } - }, - "node_modules/standard-engine": { - "version": "14.0.1", - "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", - "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", - "dev": true, - "dependencies": { - "get-stdin": "^8.0.0", - "minimist": "^1.2.5", - "pkg-conf": "^3.1.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8.10" - } - }, - "node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/stream-buffers": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", - "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==", - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string.prototype.matchall": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.4.tgz", - "integrity": "sha512-pknFIWVachNcyqRfaQSeu/FUfpvJTe4uskUSZ9Wc1RijsPuzbZ8TyYT8WCNnntCjUEqQ3vUHMAfVj2+wLAisPQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has-symbols": "^1.0.1", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.3.1", - "side-channel": "^1.0.4" - } - }, - "node_modules/string.prototype.trimend": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", - "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/string.prototype.trimstart": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", - "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/table": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", - "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", - "dev": true, - "dependencies": { - "ajv": "^6.10.2", - "lodash": "^4.17.14", - "slice-ansi": "^2.1.0", - "string-width": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/table/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/table/node_modules/emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "node_modules/table/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/table/node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/table/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dev": true, - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/tar": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.0.tgz", - "integrity": "sha512-DUCttfhsnLCjwoDoFcI+B2iJgYa93vBnDUATYEeRx6sntCTdN01VnqsIuTlALXla/LWooNg0yEGeB+Y8WdFxGA==", - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/text-hex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", - "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", - "dev": true - }, - "node_modules/tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dependencies": { - "rimraf": "^3.0.0" - }, - "engines": { - "node": ">=8.17.0" - } - }, - "node_modules/tmp-promise": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.2.tgz", - "integrity": "sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA==", - "dependencies": { - "tmp": "^0.2.0" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/triple-beam": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", - "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" - }, - "node_modules/ts-node": { - "version": "9.1.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-9.1.1.tgz", - "integrity": "sha512-hPlt7ZACERQGf03M253ytLY3dHbGNGrAq9qIHWUY9XHYl1z7wYngSr3OQ5xmui8o2AaxsONxIzjafLUiWBo1Fg==", - "dev": true, - "dependencies": { - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "source-map-support": "^0.5.17", - "yn": "3.1.1" - }, - "bin": { - "ts-node": "dist/bin.js", - "ts-node-script": "dist/bin-script.js", - "ts-node-transpile-only": "dist/bin-transpile.js", - "ts-script": "dist/bin-script-deprecated.js" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/ts-sinon": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ts-sinon/-/ts-sinon-2.0.1.tgz", - "integrity": "sha512-uI5huDCY6Gw6Yczmyd/Jcu8gZZYtWO0HakPShvDmlrgcywLyFZ7lgWt1y+gd/x79ReHh+rhMAJkhQkGRnPNikw==", - "dependencies": { - "@types/node": "^14.6.1", - "@types/sinon": "^9.0.5", - "@types/sinon-chai": "^3.2.4", - "sinon": "^9.0.3" - } - }, - "node_modules/ts-sinon/node_modules/@types/node": { - "version": "14.17.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.17.1.tgz", - "integrity": "sha512-/tpUyFD7meeooTRwl3sYlihx2BrJE7q9XF71EguPFIySj9B7qgnRtHsHTho+0AUm4m1SvWGm6uSncrR94q6Vtw==" - }, - "node_modules/ts-sinon/node_modules/sinon": { - "version": "9.2.4", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-9.2.4.tgz", - "integrity": "sha512-zljcULZQsJxVra28qIAL6ow1Z9tpattkCTEJR4RBP3TGc00FcttsP5pK284Nas5WjMZU5Yzy3kAIp3B3KRf5Yg==", - "dependencies": { - "@sinonjs/commons": "^1.8.1", - "@sinonjs/fake-timers": "^6.0.1", - "@sinonjs/samsam": "^5.3.1", - "diff": "^4.0.2", - "nise": "^4.0.4", - "supports-color": "^7.1.0" - } - }, - "node_modules/tsconfig-paths": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.9.0.tgz", - "integrity": "sha512-dRcuzokWhajtZWkQsDVKbWyY+jgcLC5sqJhg2PSgf4ZkH2aHPvaOY8YWGhmjb68b5qqTfasSsDO9k7RUiEmZAw==", - "dev": true, - "dependencies": { - "@types/json5": "^0.0.29", - "json5": "^1.0.1", - "minimist": "^1.2.0", - "strip-bom": "^3.0.0" - } - }, - "node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "engines": { - "node": ">=4" - } - }, - "node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typescript": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.2.4.tgz", - "integrity": "sha512-V+evlYHZnQkaz8TRBuxTA92yZBPotr5H+WhQ7bD3hZUndx5tGOa1fuCgeSjxAzM1RiN5IzvadIXTVefuuwZCRg==", - "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=4.2.0" - } - }, - "node_modules/unbox-primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", - "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1", - "has-bigints": "^1.0.1", - "has-symbols": "^1.0.2", - "which-boxed-primitive": "^1.0.2" - } - }, - "node_modules/underscore": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.1.tgz", - "integrity": "sha512-hzSoAVtJF+3ZtiFX0VgfFPHEDRm7Y/QPjGyNo4TVdnDTdft3tr8hEkD25a1jC+TjTuE7tkHGKkhwCgs9dgBB2g==" - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "bin": { - "uuid": "bin/uuid" - } - }, - "node_modules/v8-compile-cache": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", - "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", - "dev": true - }, - "node_modules/validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "dependencies": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dev": true, - "dependencies": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "node_modules/wide-align": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", - "dev": true, - "dependencies": { - "string-width": "^1.0.2 || 2" - } - }, - "node_modules/wide-align/node_modules/ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/wide-align/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/wide-align/node_modules/string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "dependencies": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/wide-align/node_modules/strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "dependencies": { - "ansi-regex": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/winston": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/winston/-/winston-3.3.3.tgz", - "integrity": "sha512-oEXTISQnC8VlSAKf1KYSSd7J6IWuRPQqDdo8eoRNaYKLvwSb5+79Z3Yi1lrl6KDpU6/VWaxpakDAtb1oQ4n9aw==", - "dependencies": { - "@dabh/diagnostics": "^2.0.2", - "async": "^3.1.0", - "is-stream": "^2.0.0", - "logform": "^2.2.0", - "one-time": "^1.0.0", - "readable-stream": "^3.4.0", - "stack-trace": "0.0.x", - "triple-beam": "^1.3.0", - "winston-transport": "^4.4.0" - }, - "engines": { - "node": ">= 6.4.0" - } - }, - "node_modules/winston-transport": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.4.0.tgz", - "integrity": "sha512-Lc7/p3GtqtqPBYYtS6KCN3c77/2QCev51DvcJKbkFPQNoj1sinkGwLGFDxkXY9J6p9+EPnYs+D90uwbnaiURTw==", - "dependencies": { - "readable-stream": "^2.3.7", - "triple-beam": "^1.2.0" - }, - "engines": { - "node": ">= 6.4.0" - } - }, - "node_modules/winston-transport/node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "node_modules/winston-transport/node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/winston-transport/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/winston-transport/node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/winston/node_modules/is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/workerpool": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.0.tgz", - "integrity": "sha512-toV7q9rWNYha963Pl/qyeZ6wG+3nnsyvolaNUS8+R5Wtw6qJPTxIlOP1ZSvcGhEJw+l3HMMmtiNo9Gl61G4GVg==", - "dev": true - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "node_modules/write": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", - "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", - "dev": true, - "dependencies": { - "mkdirp": "^0.5.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/write/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/ws": { - "version": "7.4.5", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.5.tgz", - "integrity": "sha512-xzyu3hFvomRfXKH8vOFMU3OguG6oOvhXMo3xsGy3xWExqaM2dxBbVxuD99O7m3ZUFMvvscsZDqxfgMaRr/Nr1g==", - "engines": { - "node": ">=8.3.0" - } - }, - "node_modules/wtfnode": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/wtfnode/-/wtfnode-0.8.4.tgz", - "integrity": "sha512-64GEKtMt/MUBuAm+8kHqP74ojjafzu00aT0JKsmkIwYmjRQ/odO0yhbzKLm+Z9v1gMla+8dwITRKzTAlHsB+Og==", - "dev": true, - "bin": { - "wtfnode": "proxy.js" - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "engines": { - "node": ">=10" - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - }, - "node_modules/yargs": { - "version": "17.0.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", - "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "20.2.7", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.7.tgz", - "integrity": "sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw==", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-unparser": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", - "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", - "dev": true, - "dependencies": { - "camelcase": "^6.0.0", - "decamelize": "^4.0.0", - "flat": "^5.0.2", - "is-plain-obj": "^2.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-unparser/node_modules/camelcase": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", - "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-unparser/node_modules/decamelize": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", - "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/yn": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", - "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true, - "engines": { - "node": ">=10" - } - } - }, - "dependencies": { - "@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", - "dev": true, - "requires": { - "@babel/highlight": "^7.12.13" - } - }, - "@babel/helper-validator-identifier": { - "version": "7.14.0", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.0.tgz", - "integrity": "sha512-V3ts7zMSu5lfiwWDVWzRDGIN+lnCEUdaXgtVHJgLb1rGaA6jMrtB9EmE7L18foXJIE8Un/A/h6NJfGQp/e1J4A==", - "dev": true - }, - "@babel/highlight": { - "version": "7.14.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.0.tgz", - "integrity": "sha512-YSCOwxvTYEIMSGaBQb5kDDsCopDdiUGsqpatp3fOlI4+2HQSkTmEVWnVuySdAC5EWCqSWWTv0ib63RjR7dTBdg==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.14.0", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "@dabh/diagnostics": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.2.tgz", - "integrity": "sha512-+A1YivoVDNNVCdfozHSR8v/jyuuLTMXwjWuxPFlFlUapXoGc+Gj9mDlTDDfrwl7rXCl2tNZ0kE8sIBO6YOn96Q==", - "requires": { - "colorspace": "1.1.x", - "enabled": "2.0.x", - "kuler": "^2.0.0" - } - }, - "@eslint/eslintrc": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.2.2.tgz", - "integrity": "sha512-EfB5OHNYp1F4px/LI/FEnGylop7nOqkQ1LRzCM0KccA2U8tvV8w01KBv37LbO7nW4H+YhKyo2LcJhRwjjV17QQ==", - "dev": true, - "requires": { - "ajv": "^6.12.4", - "debug": "^4.1.1", - "espree": "^7.3.0", - "globals": "^12.1.0", - "ignore": "^4.0.6", - "import-fresh": "^3.2.1", - "js-yaml": "^3.13.1", - "lodash": "^4.17.19", - "minimatch": "^3.0.4", - "strip-json-comments": "^3.1.1" - }, - "dependencies": { - "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dev": true, - "requires": { - "ms": "2.1.2" - } - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - } - } - }, - "@grpc/grpc-js": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.2.tgz", - "integrity": "sha512-UXepkOKCATJrhHGsxt+CGfpZy9zUn1q9mop5kfcXq1fBkTePxVNPOdnISlCbJFlCtld+pSLGyZCzr9/zVprFKA==", - "requires": { - "@types/node": ">=12.12.47" - } - }, - "@grpc/proto-loader": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.2.tgz", - "integrity": "sha512-q2Qle60Ht2OQBCp9S5hv1JbI4uBBq6/mqSevFNK3ZEgRDBCAkWqZPUhD/K9gXOHrHKluliHiVq2L9sw1mVyAIg==", - "requires": { - "@types/long": "^4.0.1", - "lodash.camelcase": "^4.3.0", - "long": "^4.0.0", - "protobufjs": "^6.10.0", - "yargs": "^16.1.1" - }, - "dependencies": { - "yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - } - } - }, - "@kubernetes/client-node": { - "version": "0.14.3", - "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.14.3.tgz", - "integrity": "sha512-9hHGDNm2JEFQcRTpDxVoAVr0fowU+JH/l5atCXY9VXwvFM18pW5wr2LzLP+Q2Rh+uQv7Moz4gEjEKSCgVKykEQ==", - "requires": { - "@types/js-yaml": "^3.12.1", - "@types/node": "^10.12.0", - "@types/request": "^2.47.1", - "@types/stream-buffers": "^3.0.3", - "@types/tar": "^4.0.3", - "@types/underscore": "^1.8.9", - "@types/ws": "^6.0.1", - "byline": "^5.0.0", - "execa": "1.0.0", - "isomorphic-ws": "^4.0.1", - "js-yaml": "^3.13.1", - "jsonpath-plus": "^0.19.0", - "openid-client": "^4.1.1", - "request": "^2.88.0", - "rfc4648": "^1.3.0", - "shelljs": "^0.8.2", - "stream-buffers": "^3.0.2", - "tar": "^6.0.2", - "tmp-promise": "^3.0.2", - "tslib": "^1.9.3", - "underscore": "^1.9.1", - "ws": "^7.3.1" - }, - "dependencies": { - "@types/node": { - "version": "10.17.60", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", - "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - } - } - }, - "@panva/asn1.js": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@panva/asn1.js/-/asn1.js-1.0.0.tgz", - "integrity": "sha512-UdkG3mLEqXgnlKsWanWcgb6dOjUzJ+XC5f+aWw30qrtjxeNUSfKX1cd5FBzOaXQumoe9nIqeZUvrRJS03HCCtw==" - }, - "@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" - }, - "@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" - }, - "@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" - }, - "@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" - }, - "@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", - "requires": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" - }, - "@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" - }, - "@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" - }, - "@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" - }, - "@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" - }, - "@sindresorhus/is": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.0.1.tgz", - "integrity": "sha512-Qm9hBEBu18wt1PO2flE7LPb30BHMQt1eQgbV76YntdNk73XZGpn3izvGTYxbGgzXKgbCjiia0uxTd3aTNQrY/g==" - }, - "@sinonjs/commons": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.3.tgz", - "integrity": "sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==", - "requires": { - "type-detect": "4.0.8" - } - }, - "@sinonjs/fake-timers": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz", - "integrity": "sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA==", - "requires": { - "@sinonjs/commons": "^1.7.0" - } - }, - "@sinonjs/samsam": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-5.3.1.tgz", - "integrity": "sha512-1Hc0b1TtyfBu8ixF/tpfSHTVWKwCBLY4QJbkgnE7HcwyvT2xArDxb4K7dMgqRm3szI+LJbzmW/s4xxEhv6hwDg==", - "requires": { - "@sinonjs/commons": "^1.6.0", - "lodash.get": "^4.4.2", - "type-detect": "^4.0.8" - } - }, - "@sinonjs/text-encoding": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.1.tgz", - "integrity": "sha512-+iTbntw2IZPb/anVDbypzfQa+ay64MW0Zo8aJ8gZPWMMK6/OubMVb6lUPMagqjOPnmtauXnFCACVl3O7ogjeqQ==" - }, - "@szmarczak/http-timer": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.5.tgz", - "integrity": "sha512-PyRA9sm1Yayuj5OIoJ1hGt2YISX45w9WcFbh6ddT0Z/0yaFxOtGLInr4jUfU1EAFVs0Yfyfev4RNwBlUaHdlDQ==", - "requires": { - "defer-to-connect": "^2.0.0" - } - }, - "@types/body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-W98JrE0j2K78swW4ukqMleo8R7h/pFETjM2DQ90MF6XK2i4LO4W3gQ71Lt4w3bfm2EvVSyWHplECvB5sK22yFQ==", - "requires": { - "@types/connect": "*", - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/cacheable-request": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.1.tgz", - "integrity": "sha512-ykFq2zmBGOCbpIXtoVbz4SKY5QriWPh3AjyU4G74RYbtt5yOc5OfaY75ftjg7mikMOla1CTGpX3lLbuJh8DTrQ==", - "requires": { - "@types/http-cache-semantics": "*", - "@types/keyv": "*", - "@types/node": "*", - "@types/responselike": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/caseless": { - "version": "0.12.2", - "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.2.tgz", - "integrity": "sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w==" - }, - "@types/chai": { - "version": "4.2.18", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.18.tgz", - "integrity": "sha512-rS27+EkB/RE1Iz3u0XtVL5q36MGDWbgYe7zWiodyKNUnthxY0rukK5V36eiUCtCisB7NN8zKYH6DO2M37qxFEQ==" - }, - "@types/connect": { - "version": "3.4.34", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.34.tgz", - "integrity": "sha512-ePPA/JuI+X0vb+gSWlPKOY0NdNAie/rPUqX2GUPpbZwiKTkSPhjXWuee47E4MtE54QVzGCQMQkAL6JhV2E1+cQ==", - "requires": { - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/express": { - "version": "4.17.11", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.11.tgz", - "integrity": "sha512-no+R6rW60JEc59977wIxreQVsIEOAYwgCqldrA/vkpCnbD7MqTefO97lmoBe4WE0F156bC4uLSP1XHDOySnChg==", - "requires": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.18", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "@types/express-serve-static-core": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.19.tgz", - "integrity": "sha512-DJOSHzX7pCiSElWaGR8kCprwibCB/3yW6vcT8VG3P0SJjnv19gnWG/AZMfM60Xj/YJIp/YCaDHyvzsFVeniARA==", - "requires": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/http-cache-semantics": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.0.tgz", - "integrity": "sha512-c3Xy026kOF7QOTn00hbIllV1dLR9hG9NkSrLQgCVs8NF6sBU+VGWjD3wLPhmh1TYAc7ugCFsvHYMN4VcBN1U1A==" - }, - "@types/js-yaml": { - "version": "3.12.6", - "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.6.tgz", - "integrity": "sha512-cK4XqrLvP17X6c0C8n4iTbT59EixqyXL3Fk8/Rsk4dF3oX4dg70gYUXrXVUUHpnsGMPNlTQMqf+TVmNPX6FmSQ==" - }, - "@types/json5": { - "version": "0.0.29", - "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", - "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", - "dev": true - }, - "@types/keyv": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.1.tgz", - "integrity": "sha512-MPtoySlAZQ37VoLaPcTHCu1RWJ4llDkULYZIzOYxlhxBqYPB0RsRlmMU0R6tahtFe27mIdkHV+551ZWV4PLmVw==", - "requires": { - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/lodash": { - "version": "4.14.170", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.170.tgz", - "integrity": "sha512-bpcvu/MKHHeYX+qeEN8GE7DIravODWdACVA1ctevD8CN24RhPZIKMn9ntfAsrvLfSX3cR5RrBKAbYm9bGs0A+Q==" - }, - "@types/long": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", - "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" - }, - "@types/mime": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", - "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" - }, - "@types/minipass": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@types/minipass/-/minipass-2.2.0.tgz", - "integrity": "sha512-wuzZksN4w4kyfoOv/dlpov4NOunwutLA/q7uc00xU02ZyUY+aoM5PWIXEKBMnm0NHd4a+N71BMjq+x7+2Af1fg==", - "requires": { - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/mocha": { - "version": "5.2.7", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-5.2.7.tgz", - "integrity": "sha512-NYrtPht0wGzhwe9+/idPaBB+TqkY9AhTvOLMkThm0IoEfLaiVQZwBwyJ5puCkO3AUCWrmcoePjp2mbFocKy4SQ==", - "dev": true - }, - "@types/node": { - "version": "12.20.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-12.20.13.tgz", - "integrity": "sha512-1x8W5OpxPq+T85OUsHRP6BqXeosKmeXRtjoF39STcdf/UWLqUsoehstZKOi0CunhVqHG17AyZgpj20eRVooK6A==" - }, - "@types/qs": { - "version": "6.9.6", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.6.tgz", - "integrity": "sha512-0/HnwIfW4ki2D8L8c9GVcG5I72s9jP5GSLVF0VIXDW00kmIpA6O33G7a8n59Tmh7Nz0WUC3rSb7PTY/sdW2JzA==" - }, - "@types/range-parser": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.3.tgz", - "integrity": "sha512-ewFXqrQHlFsgc09MK5jP5iR7vumV/BYayNC6PgJO2LPe8vrnNFyjQjSppfEngITi0qvfKtzFvgKymGheFM9UOA==" - }, - "@types/request": { - "version": "2.48.5", - "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.5.tgz", - "integrity": "sha512-/LO7xRVnL3DxJ1WkPGDQrp4VTV1reX9RkC85mJ+Qzykj2Bdw+mG15aAfDahc76HtknjzE16SX/Yddn6MxVbmGQ==", - "requires": { - "@types/caseless": "*", - "@types/node": "*", - "@types/tough-cookie": "*", - "form-data": "^2.5.0" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/responselike": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.0.tgz", - "integrity": "sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==", - "requires": { - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/serve-static": { - "version": "1.13.9", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.9.tgz", - "integrity": "sha512-ZFqF6qa48XsPdjXV5Gsz0Zqmux2PerNd3a/ktL45mHpa19cuMi/cL8tcxdAx497yRh+QtYPuofjT9oWw9P7nkA==", - "requires": { - "@types/mime": "^1", - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/sinon": { - "version": "9.0.11", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-9.0.11.tgz", - "integrity": "sha512-PwP4UY33SeeVKodNE37ZlOsR9cReypbMJOhZ7BVE0lB+Hix3efCOxiJWiE5Ia+yL9Cn2Ch72EjFTRze8RZsNtg==", - "requires": { - "@types/sinonjs__fake-timers": "*" - } - }, - "@types/sinon-chai": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/@types/sinon-chai/-/sinon-chai-3.2.5.tgz", - "integrity": "sha512-bKQqIpew7mmIGNRlxW6Zli/QVyc3zikpGzCa797B/tRnD9OtHvZ/ts8sYXV+Ilj9u3QRaUEM8xrjgd1gwm1BpQ==", - "requires": { - "@types/chai": "*", - "@types/sinon": "*" - } - }, - "@types/sinonjs__fake-timers": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-6.0.2.tgz", - "integrity": "sha512-dIPoZ3g5gcx9zZEszaxLSVTvMReD3xxyyDnQUjA6IYDG9Ba2AV0otMPs+77sG9ojB4Qr2N2Vk5RnKeuA0X/0bg==" - }, - "@types/stream-buffers": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.3.tgz", - "integrity": "sha512-NeFeX7YfFZDYsCfbuaOmFQ0OjSmHreKBpp7MQ4alWQBHeh2USLsj7qyMyn9t82kjqIX516CR/5SRHnARduRtbQ==", - "requires": { - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/tar": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/tar/-/tar-4.0.4.tgz", - "integrity": "sha512-0Xv+xcmkTsOZdIF4yCnd7RkOOyfyqPaqJ7RZFKnwdxfDbkN3eAAE9sHl8zJFqBz4VhxolW9EErbjR1oyH7jK2A==", - "requires": { - "@types/minipass": "*", - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@types/tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-I99sngh224D0M7XgW1s120zxCt3VYQ3IQsuw3P3jbq5GG4yc79+ZjyKznyOGIQrflfylLgcfekeZW/vk0yng6A==" - }, - "@types/underscore": { - "version": "1.11.2", - "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.11.2.tgz", - "integrity": "sha512-Ls2ylbo7++ITrWk2Yc3G/jijwSq5V3GT0tlgVXEl2kKYXY3ImrtmTCoE2uyTWFRI5owMBriloZFWbE1SXOsE7w==" - }, - "@types/ws": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-6.0.4.tgz", - "integrity": "sha512-PpPrX7SZW9re6+Ha8ojZG4Se8AZXgf0GK6zmfqEuCsY49LFDNXO3SByp44X3dFEqtB73lkCDAdUazhAjVPiNwg==", - "requires": { - "@types/node": "*" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "@ungap/promise-all-settled": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", - "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", - "dev": true - }, - "accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "requires": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" - } - }, - "acorn": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", - "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", - "dev": true - }, - "acorn-jsx": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", - "dev": true - }, - "aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "requires": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - } - }, - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ansi-colors": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", - "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", - "dev": true - }, - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "dev": true, - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - } - }, - "arg": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", - "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true - }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "array-includes": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.3.tgz", - "integrity": "sha512-gcem1KlBU7c9rB+Rq8/3PPKsK2kjqeEBa3bD5kkQo4nYlOHQCJqIJFqBXDEfwaRuYTT4E+FxA9xez7Gf/e3Q7A==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "get-intrinsic": "^1.1.1", - "is-string": "^1.0.5" - } - }, - "array.prototype.flat": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.4.tgz", - "integrity": "sha512-4470Xi3GAPAjZqFcljX2xzckv1qeKPizoNkiS0+O4IoPR2ZNpcjE0pkhdihlDouK+x6QOast26B4Q/O9DJnwSg==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1" - } - }, - "array.prototype.flatmap": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.4.tgz", - "integrity": "sha512-r9Z0zYoxqHz60vvQbWEdXIEtCwHF0yxaWfno9qzXeNHvfyl3BZqygmGzb84dsubyaXLH4husF+NFgMSdpZhk2Q==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1", - "function-bind": "^1.1.1" - } - }, - "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" - }, - "assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", - "dev": true - }, - "astral-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", - "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", - "dev": true - }, - "async": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", - "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" - }, - "aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" - }, - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "requires": { - "tweetnacl": "^0.14.3" - } - }, - "bignumber.js": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz", - "integrity": "sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA==" - }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "dev": true - }, - "body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", - "requires": { - "bytes": "3.1.0", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" - }, - "dependencies": { - "qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" - } - } - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "requires": { - "fill-range": "^7.0.1" - } - }, - "browser-stdout": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", - "dev": true - }, - "buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", - "dev": true - }, - "byline": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", - "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=" - }, - "bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" - }, - "cacheable-lookup": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", - "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==" - }, - "cacheable-request": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.1.tgz", - "integrity": "sha512-lt0mJ6YAnsrBErpTMWeu5kl/tg9xMAWjavYTN6VQXM1A/teBITuNcccXsCxF0tDQQJf9DfAaX5O4e0zp0KlfZw==", - "requires": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^4.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^2.0.0" - }, - "dependencies": { - "get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "requires": { - "pump": "^3.0.0" - } - } - } - }, - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true - }, - "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "chai": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.4.tgz", - "integrity": "sha512-yS5H68VYOCtN1cjfwumDSuzn/9c+yza4f3reKXlE5rUg7SFcCEy90gJvydNgOYtblyf4Zi6jIWRnXOgErta0KA==", - "dev": true, - "requires": { - "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^3.0.1", - "get-func-name": "^2.0.0", - "pathval": "^1.1.1", - "type-detect": "^4.0.5" - } - }, - "chalk": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", - "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", - "dev": true - }, - "chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", - "dev": true, - "requires": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "fsevents": "~2.3.1", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" - } - }, - "chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==" - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" - }, - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "cockatiel": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/cockatiel/-/cockatiel-1.1.1.tgz", - "integrity": "sha512-YO02ZhTcK2HOZodksWfg4tS1GYYt1j7R6U1unAkwcQf1uoIYKSBqPrfqXlLNbsyMvkDXMwa2nuwZDHbUkB1VbQ==" - }, - "color": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", - "requires": { - "color-convert": "^1.9.1", - "color-string": "^1.5.2" - }, - "dependencies": { - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - } - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", - "requires": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==" - }, - "colorspace": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.2.tgz", - "integrity": "sha512-vt+OoIP2d76xLhjwbBaucYlNSpPsrJWPlBTtwCpQKIu6/CSMutyzX93O/Do0qzpH3YoHEes8YEFXyZ797rEhzQ==", - "requires": { - "color": "3.0.x", - "text-hex": "1.0.x" - } - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "contains-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", - "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", - "dev": true - }, - "content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", - "requires": { - "safe-buffer": "5.1.2" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" - }, - "cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==" - }, - "cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "create-require": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", - "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true - }, - "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "requires": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } - } - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "requires": { - "mimic-response": "^3.1.0" - }, - "dependencies": { - "mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==" - } - } - }, - "deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", - "dev": true, - "requires": { - "type-detect": "^4.0.0" - } - }, - "deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true - }, - "defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==" - }, - "define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dev": true, - "requires": { - "object-keys": "^1.0.12" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" - }, - "depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=" - }, - "destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==" - }, - "dirty-chai": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/dirty-chai/-/dirty-chai-2.0.1.tgz", - "integrity": "sha512-ys79pWKvDMowIDEPC6Fig8d5THiC0DJ2gmTeGzVAoEH18J8OzLud0Jh7I9IWg3NSk8x2UocznUuFmfHCXYZx9w==", - "dev": true - }, - "doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "enabled": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", - "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==" - }, - "encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "requires": { - "once": "^1.4.0" - } - }, - "enquirer": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", - "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", - "dev": true, - "requires": { - "ansi-colors": "^4.1.1" - } - }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "requires": { - "is-arrayish": "^0.2.1" - }, - "dependencies": { - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - } - } - }, - "es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "get-intrinsic": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", - "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.2", - "string.prototype.trimend": "^1.0.4", - "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" - } - }, - "es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" - }, - "escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "eslint": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.12.1.tgz", - "integrity": "sha512-HlMTEdr/LicJfN08LB3nM1rRYliDXOmfoO4vj39xN6BLpFzF00hbwBoqHk8UcJ2M/3nlARZWy/mslvGEuZFvsg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "@eslint/eslintrc": "^0.2.1", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.0.1", - "doctrine": "^3.0.0", - "enquirer": "^2.3.5", - "eslint-scope": "^5.1.1", - "eslint-utils": "^2.1.0", - "eslint-visitor-keys": "^2.0.0", - "espree": "^7.3.0", - "esquery": "^1.2.0", - "esutils": "^2.0.2", - "file-entry-cache": "^5.0.1", - "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", - "globals": "^12.1.0", - "ignore": "^4.0.6", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "js-yaml": "^3.13.1", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash": "^4.17.19", - "minimatch": "^3.0.4", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "progress": "^2.0.0", - "regexpp": "^3.1.0", - "semver": "^7.2.1", - "strip-ansi": "^6.0.0", - "strip-json-comments": "^3.1.0", - "table": "^5.2.3", - "text-table": "^0.2.0", - "v8-compile-cache": "^2.0.3" - }, - "dependencies": { - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dev": true, - "requires": { - "ms": "2.1.2" - } - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "eslint-config-semistandard": { - "version": "15.0.1", - "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-15.0.1.tgz", - "integrity": "sha512-sfV+qNBWKOmF0kZJll1VH5XqOAdTmLlhbOl9WKI11d2eMEe+Kicxnpm24PQWHOqAfk5pAWU2An0LjNCXKa4Usg==", - "dev": true - }, - "eslint-config-standard": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.0.tgz", - "integrity": "sha512-kMCehB9yXIG+LNsu9uXfm06o6Pt63TFAOzn9tUOzw4r/hFIxHhNR1Xomxy+B5zMrXhqyfHVEcmanzttEjGei9w==", - "dev": true - }, - "eslint-config-standard-jsx": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", - "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", - "dev": true - }, - "eslint-import-resolver-node": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz", - "integrity": "sha512-ogtf+5AB/O+nM6DIeBUNr2fuT7ot9Qg/1harBfBtaP13ekEWFQEEMP94BCB7zaNW3gyY+8SHYF00rnqYwXKWOA==", - "dev": true, - "requires": { - "debug": "^2.6.9", - "resolve": "^1.13.1" - } - }, - "eslint-module-utils": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.1.tgz", - "integrity": "sha512-ZXI9B8cxAJIH4nfkhTwcRTEAnrVfobYqwjWy/QMCZ8rHkZHFjf9yO4BzpiF9kCSfNlMG54eKigISHpX0+AaT4A==", - "dev": true, - "requires": { - "debug": "^3.2.7", - "pkg-dir": "^2.0.0" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - } - } - }, - "eslint-plugin-es": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", - "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", - "dev": true, - "requires": { - "eslint-utils": "^2.0.0", - "regexpp": "^3.0.0" - } - }, - "eslint-plugin-import": { - "version": "2.22.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.22.1.tgz", - "integrity": "sha512-8K7JjINHOpH64ozkAhpT3sd+FswIZTfMZTjdx052pnWrgRCVfp8op9tbjpAk3DdUeI/Ba4C8OjdC0r90erHEOw==", - "dev": true, - "requires": { - "array-includes": "^3.1.1", - "array.prototype.flat": "^1.2.3", - "contains-path": "^0.1.0", - "debug": "^2.6.9", - "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.4", - "eslint-module-utils": "^2.6.0", - "has": "^1.0.3", - "minimatch": "^3.0.4", - "object.values": "^1.1.1", - "read-pkg-up": "^2.0.0", - "resolve": "^1.17.0", - "tsconfig-paths": "^3.9.0" - }, - "dependencies": { - "doctrine": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", - "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", - "dev": true, - "requires": { - "esutils": "^2.0.2", - "isarray": "^1.0.0" - } - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - } - } - }, - "eslint-plugin-node": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", - "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", - "dev": true, - "requires": { - "eslint-plugin-es": "^3.0.0", - "eslint-utils": "^2.0.0", - "ignore": "^5.1.1", - "minimatch": "^3.0.4", - "resolve": "^1.10.1", - "semver": "^6.1.0" - }, - "dependencies": { - "ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", - "dev": true - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "eslint-plugin-promise": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", - "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==", - "dev": true - }, - "eslint-plugin-react": { - "version": "7.21.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.21.5.tgz", - "integrity": "sha512-8MaEggC2et0wSF6bUeywF7qQ46ER81irOdWS4QWxnnlAEsnzeBevk1sWh7fhpCghPpXb+8Ks7hvaft6L/xsR6g==", - "dev": true, - "requires": { - "array-includes": "^3.1.1", - "array.prototype.flatmap": "^1.2.3", - "doctrine": "^2.1.0", - "has": "^1.0.3", - "jsx-ast-utils": "^2.4.1 || ^3.0.0", - "object.entries": "^1.1.2", - "object.fromentries": "^2.0.2", - "object.values": "^1.1.1", - "prop-types": "^15.7.2", - "resolve": "^1.18.1", - "string.prototype.matchall": "^4.0.2" - }, - "dependencies": { - "doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - } - } - }, - "eslint-plugin-standard": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.2.tgz", - "integrity": "sha512-nKptN8l7jksXkwFk++PhJB3cCDTcXOEyhISIN86Ue2feJ1LFyY3PrY3/xT2keXlJSY5bpmbiTG0f885/YKAvTA==", - "dev": true - }, - "eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dev": true, - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - } - }, - "eslint-utils": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", - "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^1.1.0" - }, - "dependencies": { - "eslint-visitor-keys": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", - "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", - "dev": true - } - } - }, - "eslint-visitor-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", - "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", - "dev": true - }, - "espree": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", - "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", - "dev": true, - "requires": { - "acorn": "^7.4.0", - "acorn-jsx": "^5.3.1", - "eslint-visitor-keys": "^1.3.0" - }, - "dependencies": { - "eslint-visitor-keys": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", - "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", - "dev": true - } - } - }, - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" - }, - "esquery": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz", - "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==", - "dev": true, - "requires": { - "estraverse": "^5.1.0" - }, - "dependencies": { - "estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "dev": true - } - } - }, - "esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "requires": { - "estraverse": "^5.2.0" - }, - "dependencies": { - "estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "dev": true - } - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" - }, - "etcd3": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/etcd3/-/etcd3-1.1.0.tgz", - "integrity": "sha512-9SnJvaPyW5IYdJHJWX91CYo1QZCAev2B7PxrQWIe2tGbutZOmsXHfjNDKwEltFWoG5h++K3/JfWPjJdOGX90hg==", - "requires": { - "@grpc/grpc-js": "^1.1.7", - "@grpc/proto-loader": "^0.5.5", - "bignumber.js": "^9.0.0", - "cockatiel": "^1.1.1" - }, - "dependencies": { - "@grpc/proto-loader": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.6.tgz", - "integrity": "sha512-DT14xgw3PSzPxwS13auTEwxhMMOoz33DPUKNtmYK/QYbBSpLXJy78FGGs5yVoxVobEqPm4iW9MOIoz0A3bLTRQ==", - "requires": { - "lodash.camelcase": "^4.3.0", - "protobufjs": "^6.8.6" - } - } - } - }, - "execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", - "requires": { - "accepts": "~1.3.7", - "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", - "content-type": "~1.0.4", - "cookie": "0.4.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.1.2", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", - "statuses": "~1.5.0", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "dependencies": { - "qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" - }, - "fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true - }, - "fast-safe-stringify": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" - }, - "fecha": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.1.tgz", - "integrity": "sha512-MMMQ0ludy/nBs1/o0zVOiKTpG7qMbonKUzjJgQFEuvq6INZ1OraKPRAWkBq5vlKLOUMpmNYG1JoN3oDPUQ9m3Q==" - }, - "file-entry-cache": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", - "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", - "dev": true, - "requires": { - "flat-cache": "^2.0.1" - } - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", - "requires": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "statuses": "~1.5.0", - "unpipe": "~1.0.0" - } - }, - "find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "requires": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - } - }, - "flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "dev": true - }, - "flat-cache": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", - "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", - "dev": true, - "requires": { - "flatted": "^2.0.0", - "rimraf": "2.6.3", - "write": "1.0.3" - }, - "dependencies": { - "rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", - "dev": true - }, - "fn.name": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", - "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==" - }, - "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" - }, - "form-data": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", - "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - }, - "forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" - }, - "fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" - }, - "fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "requires": { - "minipass": "^3.0.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "dev": true, - "optional": true - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "functional-red-black-tree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", - "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", - "dev": true - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" - }, - "get-func-name": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", - "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", - "dev": true - }, - "get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" - } - }, - "get-stdin": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", - "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", - "dev": true - }, - "get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "requires": { - "pump": "^3.0.0" - } - }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - }, - "globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "requires": { - "type-fest": "^0.8.1" - } - }, - "got": { - "version": "11.8.2", - "resolved": "https://registry.npmjs.org/got/-/got-11.8.2.tgz", - "integrity": "sha512-D0QywKgIe30ODs+fm8wMZiAcZjypcCodPNuMz5H9Mny7RJ+IjJ10BdmGW7OM7fHXP+O7r6ZwapQ/YQmMSvB0UQ==", - "requires": { - "@sindresorhus/is": "^4.0.0", - "@szmarczak/http-timer": "^4.0.5", - "@types/cacheable-request": "^6.0.1", - "@types/responselike": "^1.0.0", - "cacheable-lookup": "^5.0.3", - "cacheable-request": "^7.0.1", - "decompress-response": "^6.0.0", - "http2-wrapper": "^1.0.0-beta.5.2", - "lowercase-keys": "^2.0.0", - "p-cancelable": "^2.0.0", - "responselike": "^2.0.0" - } - }, - "graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==", - "dev": true - }, - "growl": { - "version": "1.10.5", - "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", - "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", - "dev": true - }, - "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" - }, - "har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "requires": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - } - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-bigints": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "has-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", - "dev": true - }, - "he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "dev": true - }, - "hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, - "http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" - }, - "http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", - "requires": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "dependencies": { - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - } - } - }, - "http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - } - }, - "http2-wrapper": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz", - "integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==", - "requires": { - "quick-lru": "^5.1.1", - "resolve-alpn": "^1.0.0" - } - }, - "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true - }, - "import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dev": true, - "requires": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - } - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "internal-slot": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", - "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", - "dev": true, - "requires": { - "get-intrinsic": "^1.1.0", - "has": "^1.0.3", - "side-channel": "^1.0.4" - } - }, - "interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" - }, - "ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" - }, - "is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - }, - "is-bigint": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.2.tgz", - "integrity": "sha512-0JV5+SOCQkIdzjBK9buARcV804Ddu7A0Qet6sHi3FimE9ne6m4BGQZfRn+NZiXbBk4F4XmHfDZIipLj9pX8dSA==", - "dev": true - }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "requires": { - "binary-extensions": "^2.0.0" - } - }, - "is-boolean-object": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.1.tgz", - "integrity": "sha512-bXdQWkECBUIAcCkeH1unwJLIpZYaa5VvuygSyS/c2lf719mTKZDU5UdDRlpd01UjADgmW8RfqaP+mRaVPdr/Ng==", - "dev": true, - "requires": { - "call-bind": "^1.0.2" - } - }, - "is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==", - "dev": true - }, - "is-core-module": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.4.0.tgz", - "integrity": "sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A==", - "requires": { - "has": "^1.0.3" - } - }, - "is-date-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.4.tgz", - "integrity": "sha512-/b4ZVsG7Z5XVtIxs/h9W8nvfLgSAyKYdtGWQLbqy6jA1icmgjf8WCoTKgeS4wy5tYaPePouzFMANbnj94c2Z+A==", - "dev": true - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dev": true, - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-negative-zero": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", - "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==", - "dev": true - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true - }, - "is-number-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.5.tgz", - "integrity": "sha512-RU0lI/n95pMoUKu9v1BZP5MBcZuNSVJkMkAG2dJqC4z2GlkGUNeH68SuHuBKBD/XFe+LHZ+f9BKkLET60Niedw==", - "dev": true - }, - "is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", - "dev": true - }, - "is-regex": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.3.tgz", - "integrity": "sha512-qSVXFz28HM7y+IWX6vLCsexdlvzT1PJNFSBuaQLQ5o0IEw8UDYW6/2+eCMVyIsbM8CNLX2a/QWmSpyxYEHY7CQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-symbols": "^1.0.2" - } - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" - }, - "is-string": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.6.tgz", - "integrity": "sha512-2gdzbKUuqtQ3lYNrUTQYoClPhm7oQu4UdpSZMp1/DGgkHBT8E2Z1l0yMdb6D4zNAxwDiMv8MdulKROJGNl0Q0w==", - "dev": true - }, - "is-symbol": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", - "dev": true, - "requires": { - "has-symbols": "^1.0.2" - } - }, - "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "isomorphic-ws": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz", - "integrity": "sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==" - }, - "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "jose": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/jose/-/jose-2.0.5.tgz", - "integrity": "sha512-BAiDNeDKTMgk4tvD0BbxJ8xHEHBZgpeRZ1zGPPsitSyMgjoMWiLGYAE7H7NpP5h0lPppQajQs871E8NHUrzVPA==", - "requires": { - "@panva/asn1.js": "^1.0.0" - } - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "requires": { - "argparse": "^2.0.1" - }, - "dependencies": { - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - } - } - }, - "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" - }, - "json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true - }, - "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", - "dev": true - }, - "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dev": true, - "requires": { - "minimist": "^1.2.0" - } - }, - "jsonpath-plus": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-0.19.0.tgz", - "integrity": "sha512-GSVwsrzW9LsA5lzsqe4CkuZ9wp+kxBb2GwNniaWzI2YFn5Ig42rSW8ZxVpWXaAfakXNrx5pgY5AbQq7kzX29kg==" - }, - "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "jsx-ast-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.2.0.tgz", - "integrity": "sha512-EIsmt3O3ljsU6sot/J4E1zDRxfBNrhjyf/OKjlydwgEimQuznlM4Wv7U+ueONJMyEn1WRE0K8dhi3dVAXYT24Q==", - "dev": true, - "requires": { - "array-includes": "^3.1.2", - "object.assign": "^4.1.2" - } - }, - "just-extend": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-4.2.1.tgz", - "integrity": "sha512-g3UB796vUFIY90VIv/WX3L2c8CS2MdWUww3CNrYmqza1Fg0DURc2K/O4YrnklBdQarSJ/y8JnJYDGc+1iumQjg==" - }, - "keyv": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.0.3.tgz", - "integrity": "sha512-zdGa2TOpSZPq5mU6iowDARnMBZgtCqJ11dJROFi6tg6kTn4nuUdU09lFyLFSaHrWqpIJ+EBq4E8/Dc0Vx5vLdA==", - "requires": { - "json-buffer": "3.0.1" - } - }, - "kuler": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", - "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==" - }, - "levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - } - }, - "load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" - } - }, - "locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "requires": { - "p-locate": "^5.0.0" - } - }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" - }, - "lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=" - }, - "log-symbols": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", - "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", - "dev": true, - "requires": { - "chalk": "^4.0.0" - } - }, - "logform": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/logform/-/logform-2.2.0.tgz", - "integrity": "sha512-N0qPlqfypFx7UHNn4B3lzS/b0uLqt2hmuoa+PpuXNYgozdJYAyauF5Ky0BWVjrxDlMWiT3qN4zPq3vVAfZy7Yg==", - "requires": { - "colors": "^1.2.1", - "fast-safe-stringify": "^2.0.4", - "fecha": "^4.2.0", - "ms": "^2.1.1", - "triple-beam": "^1.3.0" - }, - "dependencies": { - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - } - } - }, - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, - "loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "dev": true, - "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" - } - }, - "lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "requires": { - "yallist": "^4.0.0" - } - }, - "make-error": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", - "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==" - }, - "media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" - }, - "merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=" - }, - "mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" - }, - "mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==" - }, - "mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", - "requires": { - "mime-db": "1.47.0" - } - }, - "mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true - }, - "minipass": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.3.tgz", - "integrity": "sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg==", - "requires": { - "yallist": "^4.0.0" - } - }, - "minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "requires": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - } - }, - "mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" - }, - "mocha": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.4.0.tgz", - "integrity": "sha512-hJaO0mwDXmZS4ghXsvPVriOhsxQ7ofcpQdm8dE+jISUOKopitvnXFQmpRR7jd2K6VBG6E26gU3IAbXXGIbu4sQ==", - "dev": true, - "requires": { - "@ungap/promise-all-settled": "1.1.2", - "ansi-colors": "4.1.1", - "browser-stdout": "1.3.1", - "chokidar": "3.5.1", - "debug": "4.3.1", - "diff": "5.0.0", - "escape-string-regexp": "4.0.0", - "find-up": "5.0.0", - "glob": "7.1.6", - "growl": "1.10.5", - "he": "1.2.0", - "js-yaml": "4.0.0", - "log-symbols": "4.0.0", - "minimatch": "3.0.4", - "ms": "2.1.3", - "nanoid": "3.1.20", - "serialize-javascript": "5.0.1", - "strip-json-comments": "3.1.1", - "supports-color": "8.1.1", - "which": "2.0.2", - "wide-align": "1.1.3", - "workerpool": "6.1.0", - "yargs": "16.2.0", - "yargs-parser": "20.2.4", - "yargs-unparser": "2.0.0" - }, - "dependencies": { - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dev": true, - "requires": { - "ms": "2.1.2" - }, - "dependencies": { - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - } - } - }, - "diff": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", - "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", - "dev": true - }, - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true - }, - "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "js-yaml": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", - "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", - "dev": true, - "requires": { - "argparse": "^2.0.1" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - }, - "yargs-parser": { - "version": "20.2.4", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", - "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", - "dev": true - } - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "nanoid": { - "version": "3.1.20", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz", - "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw==", - "dev": true - }, - "nats": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/nats/-/nats-2.0.4.tgz", - "integrity": "sha512-cICTjoL09YZnh6O4vg7PnKUH9P/w6xPs4iZns/VA6h8iPe1ZhOY6tHEdjZ/wJ1eAFZKX+gw1+CxId0RK5NUbqA==", - "requires": { - "nkeys.js": "^1.0.0-9" - } - }, - "natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", - "dev": true - }, - "negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==" - }, - "nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "nise": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/nise/-/nise-4.1.0.tgz", - "integrity": "sha512-eQMEmGN/8arp0xsvGoQ+B1qvSkR73B1nWSCh7nOt5neMCtwcQVYQGdzQMhcNscktTsWB54xnlSQFzOAPJD8nXA==", - "requires": { - "@sinonjs/commons": "^1.7.0", - "@sinonjs/fake-timers": "^6.0.0", - "@sinonjs/text-encoding": "^0.7.1", - "just-extend": "^4.0.2", - "path-to-regexp": "^1.7.0" - }, - "dependencies": { - "path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", - "requires": { - "isarray": "0.0.1" - } - } - } - }, - "nkeys.js": { - "version": "1.0.0-9", - "resolved": "https://registry.npmjs.org/nkeys.js/-/nkeys.js-1.0.0-9.tgz", - "integrity": "sha512-m9O0NQT+3rUe1om6MWpxV77EuHql/LdorDH+FYQkoeARcM2V0sQ89kM36fArWaHWq/25EmNmQUW0MhLTcbqW1A==", - "requires": { - "@types/node": "^14.0.26", - "tweetnacl": "^1.0.3" - }, - "dependencies": { - "@types/node": { - "version": "14.17.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.17.1.tgz", - "integrity": "sha512-/tpUyFD7meeooTRwl3sYlihx2BrJE7q9XF71EguPFIySj9B7qgnRtHsHTho+0AUm4m1SvWGm6uSncrR94q6Vtw==" - }, - "tweetnacl": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", - "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" - } - } - }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true - } - } - }, - "normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true - }, - "normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==" - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "requires": { - "path-key": "^2.0.0" - } - }, - "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true - }, - "object-hash": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.1.1.tgz", - "integrity": "sha512-VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ==" - }, - "object-inspect": { - "version": "1.10.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.10.3.tgz", - "integrity": "sha512-e5mCJlSH7poANfC8z8S9s9S2IN5/4Zb3aZ33f5s8YqoazCFzNLloLU8r5VCG+G7WoqLvAAZoVMcy3tp/3X0Plw==", - "dev": true - }, - "object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true - }, - "object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - } - }, - "object.entries": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.3.tgz", - "integrity": "sha512-ym7h7OZebNS96hn5IJeyUmaWhaSM4SVtAPPfNLQEI2MYWCO2egsITb9nab2+i/Pwibx+R0mtn+ltKJXRSeTMGg==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1", - "has": "^1.0.3" - } - }, - "object.fromentries": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.4.tgz", - "integrity": "sha512-EsFBshs5RUUpQEY1D4q/m59kMfz4YJvxuNCJcv/jWwOJr34EaVnG11ZrZa0UHB3wnzV1wx8m58T4hQL8IuNXlQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - } - }, - "object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - } - }, - "oidc-token-hash": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-5.0.1.tgz", - "integrity": "sha512-EvoOtz6FIEBzE+9q253HsLCVRiK/0doEJ2HCvvqMQb3dHZrP3WlJKYtJ55CRTw4jmYomzH4wkPuCj/I3ZvpKxQ==" - }, - "on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "requires": { - "ee-first": "1.1.1" - } - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "requires": { - "wrappy": "1" - } - }, - "one-time": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", - "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", - "requires": { - "fn.name": "1.x.x" - } - }, - "openid-client": { - "version": "4.7.3", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-4.7.3.tgz", - "integrity": "sha512-YLwZQLSjo3gdSVxw/G25ddoRp9oCpXkREZXssmenlejZQPsnTq+yQtFUcBmC7u3VVkx+gwqXZF7X0CtAAJrRRg==", - "requires": { - "aggregate-error": "^3.1.0", - "got": "^11.8.0", - "jose": "^2.0.5", - "lru-cache": "^6.0.0", - "make-error": "^1.3.6", - "object-hash": "^2.0.1", - "oidc-token-hash": "^5.0.1" - } - }, - "optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", - "dev": true, - "requires": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - } - }, - "p-cancelable": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz", - "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==" - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "requires": { - "p-limit": "^3.0.2" - } - }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true - }, - "parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "requires": { - "callsites": "^3.0.0" - } - }, - "parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", - "dev": true, - "requires": { - "error-ex": "^1.2.0" - } - }, - "parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" - }, - "path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" - }, - "path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "dev": true, - "requires": { - "pify": "^2.0.0" - } - }, - "pathval": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", - "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", - "dev": true - }, - "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", - "dev": true - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - }, - "pkg-conf": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-3.1.0.tgz", - "integrity": "sha512-m0OTbR/5VPNPqO1ph6Fqbj7Hv6QU7gR/tQW40ZqrL1rjgCU85W6C1bJn0BItuJqnR98PWzw7Z8hHeChD1WrgdQ==", - "dev": true, - "requires": { - "find-up": "^3.0.0", - "load-json-file": "^5.2.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "load-json-file": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", - "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.15", - "parse-json": "^4.0.0", - "pify": "^4.0.1", - "strip-bom": "^3.0.0", - "type-fest": "^0.3.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", - "dev": true - } - } - }, - "pkg-dir": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", - "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", - "dev": true, - "requires": { - "find-up": "^2.1.0" - }, - "dependencies": { - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "requires": { - "locate-path": "^2.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "requires": { - "p-limit": "^1.1.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - } - } - }, - "prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true - }, - "prop-types": { - "version": "15.7.2", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz", - "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==", - "dev": true, - "requires": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.8.1" - } - }, - "protobufjs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", - "requires": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", - "@types/node": ">=13.7.0", - "long": "^4.0.0" - }, - "dependencies": { - "@types/node": { - "version": "15.6.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-15.6.1.tgz", - "integrity": "sha512-7EIraBEyRHEe7CH+Fm1XvgqU6uwZN8Q7jppJGcqjROMT29qhAuuOxYB1uEY5UMYQKEmA5D+5tBnhdaPXSsLONA==" - } - } - }, - "proxy-addr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", - "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", - "requires": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.9.1" - } - }, - "psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" - }, - "querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" - }, - "quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==" - }, - "randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dev": true, - "requires": { - "safe-buffer": "^5.1.0" - } - }, - "range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" - }, - "raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", - "requires": { - "bytes": "3.1.0", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - } - }, - "react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "dev": true - }, - "read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "dev": true, - "requires": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - } - }, - "read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "dev": true, - "requires": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - }, - "dependencies": { - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "requires": { - "locate-path": "^2.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "requires": { - "p-limit": "^1.1.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - } - } - }, - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, - "readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", - "dev": true, - "requires": { - "picomatch": "^2.2.1" - } - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", - "requires": { - "resolve": "^1.1.6" - } - }, - "regexp.prototype.flags": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", - "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", - "dev": true - }, - "request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "dependencies": { - "form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - } - } - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" - }, - "requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" - }, - "resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", - "requires": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" - } - }, - "resolve-alpn": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.1.2.tgz", - "integrity": "sha512-8OyfzhAtA32LVUsJSke3auIyINcwdh5l3cvYKdKO0nvsYSKuiLfTM5i78PJswFPT8y6cPW+L1v6/hE95chcpDA==" - }, - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true - }, - "responselike": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.0.tgz", - "integrity": "sha512-xH48u3FTB9VsZw7R+vvgaKeLKzT6jOogbQhEe/jewwnZgzPcnyWui2Av6JpoYZF/91uueC+lqhWqeURw5/qhCw==", - "requires": { - "lowercase-keys": "^2.0.0" - } - }, - "rfc4648": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.4.0.tgz", - "integrity": "sha512-3qIzGhHlMHA6PoT6+cdPKZ+ZqtxkIvg8DZGKA5z6PQ33/uuhoJ+Ws/D/J9rXW6gXodgH8QYlz2UCl+sdUDmNIg==" - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "requires": { - "glob": "^7.1.3" - } - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "semistandard": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-16.0.0.tgz", - "integrity": "sha512-pLETGjFyl0ETMDAEZxkC1OJBmNmPIMpMkayStGTgHMMh/5FM7Rbk5NWc1t7yfQ4PrRURQH8MUg3ZxvojJJifcw==", - "dev": true, - "requires": { - "eslint": "~7.12.1", - "eslint-config-semistandard": "15.0.1", - "eslint-config-standard": "16.0.0", - "eslint-config-standard-jsx": "10.0.0", - "eslint-plugin-import": "~2.22.1", - "eslint-plugin-node": "~11.1.0", - "eslint-plugin-promise": "~4.2.1", - "eslint-plugin-react": "~7.21.5", - "eslint-plugin-standard": "~4.0.2", - "standard-engine": "^14.0.0" - } - }, - "semver": { - "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", - "requires": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "~1.7.2", - "mime": "1.6.0", - "ms": "2.1.1", - "on-finished": "~2.3.0", - "range-parser": "~1.2.1", - "statuses": "~1.5.0" - }, - "dependencies": { - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" - } - } - }, - "serialize-javascript": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz", - "integrity": "sha512-SaaNal9imEO737H2c05Og0/8LUXG7EnsZyMa8MzkmuHoELfT6txuj0cMqRj6zfPKnmQ1yasR4PCJc8x+M4JSPA==", - "dev": true, - "requires": { - "randombytes": "^2.1.0" - } - }, - "serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", - "requires": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.17.1" - } - }, - "setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" - }, - "shelljs": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz", - "integrity": "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==", - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - } - }, - "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - }, - "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "requires": { - "is-arrayish": "^0.3.1" - } - }, - "sinon": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-10.0.0.tgz", - "integrity": "sha512-XAn5DxtGVJBlBWYrcYKEhWCz7FLwZGdyvANRyK06419hyEpdT0dMc5A8Vcxg5SCGHc40CsqoKsc1bt1CbJPfNw==", - "dev": true, - "requires": { - "@sinonjs/commons": "^1.8.1", - "@sinonjs/fake-timers": "^6.0.1", - "@sinonjs/samsam": "^5.3.1", - "diff": "^4.0.2", - "nise": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "sleep-promise": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/sleep-promise/-/sleep-promise-9.1.0.tgz", - "integrity": "sha512-UHYzVpz9Xn8b+jikYSD6bqvf754xL2uBUzDFwiU6NcdZeifPr6UfgU43xpkPu67VMS88+TI2PSI7Eohgqf2fKA==" - }, - "slice-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", - "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "astral-regex": "^1.0.0", - "is-fullwidth-code-point": "^2.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - } - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - }, - "source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", - "dev": true, - "requires": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.9.tgz", - "integrity": "sha512-Ki212dKK4ogX+xDo4CtOZBVIwhsKBEfsEEcwmJfLQzirgc2jIWdzg40Unxz/HzEUqM1WFzVlQSMF9kZZ2HboLQ==", - "dev": true - }, - "sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, - "stack-trace": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", - "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" - }, - "standard-engine": { - "version": "14.0.1", - "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", - "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", - "dev": true, - "requires": { - "get-stdin": "^8.0.0", - "minimist": "^1.2.5", - "pkg-conf": "^3.1.0", - "xdg-basedir": "^4.0.0" - } - }, - "statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=" - }, - "stream-buffers": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", - "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==" - }, - "string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "requires": { - "safe-buffer": "~5.2.0" - } - }, - "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "string.prototype.matchall": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.4.tgz", - "integrity": "sha512-pknFIWVachNcyqRfaQSeu/FUfpvJTe4uskUSZ9Wc1RijsPuzbZ8TyYT8WCNnntCjUEqQ3vUHMAfVj2+wLAisPQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has-symbols": "^1.0.1", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.3.1", - "side-channel": "^1.0.4" - } - }, - "string.prototype.trimend": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", - "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "string.prototype.trimstart": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", - "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - }, - "strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=" - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - }, - "table": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", - "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", - "dev": true, - "requires": { - "ajv": "^6.10.2", - "lodash": "^4.17.14", - "slice-ansi": "^2.1.0", - "string-width": "^3.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "dev": true - }, - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - } - } - } - }, - "tar": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.0.tgz", - "integrity": "sha512-DUCttfhsnLCjwoDoFcI+B2iJgYa93vBnDUATYEeRx6sntCTdN01VnqsIuTlALXla/LWooNg0yEGeB+Y8WdFxGA==", - "requires": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - } - }, - "text-hex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", - "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" - }, - "text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", - "dev": true - }, - "tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "requires": { - "rimraf": "^3.0.0" - } - }, - "tmp-promise": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.2.tgz", - "integrity": "sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA==", - "requires": { - "tmp": "^0.2.0" - } - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "requires": { - "is-number": "^7.0.0" - } - }, - "toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==" - }, - "tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - } - }, - "triple-beam": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", - "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" - }, - "ts-node": { - "version": "9.1.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-9.1.1.tgz", - "integrity": "sha512-hPlt7ZACERQGf03M253ytLY3dHbGNGrAq9qIHWUY9XHYl1z7wYngSr3OQ5xmui8o2AaxsONxIzjafLUiWBo1Fg==", - "dev": true, - "requires": { - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "source-map-support": "^0.5.17", - "yn": "3.1.1" - } - }, - "ts-sinon": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ts-sinon/-/ts-sinon-2.0.1.tgz", - "integrity": "sha512-uI5huDCY6Gw6Yczmyd/Jcu8gZZYtWO0HakPShvDmlrgcywLyFZ7lgWt1y+gd/x79ReHh+rhMAJkhQkGRnPNikw==", - "requires": { - "@types/node": "^14.6.1", - "@types/sinon": "^9.0.5", - "@types/sinon-chai": "^3.2.4", - "sinon": "^9.0.3" - }, - "dependencies": { - "@types/node": { - "version": "14.17.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.17.1.tgz", - "integrity": "sha512-/tpUyFD7meeooTRwl3sYlihx2BrJE7q9XF71EguPFIySj9B7qgnRtHsHTho+0AUm4m1SvWGm6uSncrR94q6Vtw==" - }, - "sinon": { - "version": "9.2.4", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-9.2.4.tgz", - "integrity": "sha512-zljcULZQsJxVra28qIAL6ow1Z9tpattkCTEJR4RBP3TGc00FcttsP5pK284Nas5WjMZU5Yzy3kAIp3B3KRf5Yg==", - "requires": { - "@sinonjs/commons": "^1.8.1", - "@sinonjs/fake-timers": "^6.0.1", - "@sinonjs/samsam": "^5.3.1", - "diff": "^4.0.2", - "nise": "^4.0.4", - "supports-color": "^7.1.0" - } - } - } - }, - "tsconfig-paths": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.9.0.tgz", - "integrity": "sha512-dRcuzokWhajtZWkQsDVKbWyY+jgcLC5sqJhg2PSgf4ZkH2aHPvaOY8YWGhmjb68b5qqTfasSsDO9k7RUiEmZAw==", - "dev": true, - "requires": { - "@types/json5": "^0.0.29", - "json5": "^1.0.1", - "minimist": "^1.2.0", - "strip-bom": "^3.0.0" - } - }, - "tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" - }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1" - } - }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==" - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - }, - "type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "requires": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - } - }, - "typescript": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.2.4.tgz", - "integrity": "sha512-V+evlYHZnQkaz8TRBuxTA92yZBPotr5H+WhQ7bD3hZUndx5tGOa1fuCgeSjxAzM1RiN5IzvadIXTVefuuwZCRg==", - "dev": true - }, - "unbox-primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", - "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "has-bigints": "^1.0.1", - "has-symbols": "^1.0.2", - "which-boxed-primitive": "^1.0.2" - } - }, - "underscore": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.1.tgz", - "integrity": "sha512-hzSoAVtJF+3ZtiFX0VgfFPHEDRm7Y/QPjGyNo4TVdnDTdft3tr8hEkD25a1jC+TjTuE7tkHGKkhwCgs9dgBB2g==" - }, - "unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" - }, - "uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "requires": { - "punycode": "^2.1.0" - } - }, - "url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", - "requires": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" - }, - "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" - }, - "v8-compile-cache": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", - "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" - }, - "verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - }, - "which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dev": true, - "requires": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "wide-align": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", - "dev": true, - "requires": { - "string-width": "^1.0.2 || 2" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "winston": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/winston/-/winston-3.3.3.tgz", - "integrity": "sha512-oEXTISQnC8VlSAKf1KYSSd7J6IWuRPQqDdo8eoRNaYKLvwSb5+79Z3Yi1lrl6KDpU6/VWaxpakDAtb1oQ4n9aw==", - "requires": { - "@dabh/diagnostics": "^2.0.2", - "async": "^3.1.0", - "is-stream": "^2.0.0", - "logform": "^2.2.0", - "one-time": "^1.0.0", - "readable-stream": "^3.4.0", - "stack-trace": "0.0.x", - "triple-beam": "^1.3.0", - "winston-transport": "^4.4.0" - }, - "dependencies": { - "is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==" - } - } - }, - "winston-transport": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.4.0.tgz", - "integrity": "sha512-Lc7/p3GtqtqPBYYtS6KCN3c77/2QCev51DvcJKbkFPQNoj1sinkGwLGFDxkXY9J6p9+EPnYs+D90uwbnaiURTw==", - "requires": { - "readable-stream": "^2.3.7", - "triple-beam": "^1.2.0" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true - }, - "workerpool": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.0.tgz", - "integrity": "sha512-toV7q9rWNYha963Pl/qyeZ6wG+3nnsyvolaNUS8+R5Wtw6qJPTxIlOP1ZSvcGhEJw+l3HMMmtiNo9Gl61G4GVg==", - "dev": true - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "write": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", - "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", - "dev": true, - "requires": { - "mkdirp": "^0.5.1" - }, - "dependencies": { - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "requires": { - "minimist": "^1.2.5" - } - } - } - }, - "ws": { - "version": "7.4.5", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.5.tgz", - "integrity": "sha512-xzyu3hFvomRfXKH8vOFMU3OguG6oOvhXMo3xsGy3xWExqaM2dxBbVxuD99O7m3ZUFMvvscsZDqxfgMaRr/Nr1g==" - }, - "wtfnode": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/wtfnode/-/wtfnode-0.8.4.tgz", - "integrity": "sha512-64GEKtMt/MUBuAm+8kHqP74ojjafzu00aT0JKsmkIwYmjRQ/odO0yhbzKLm+Z9v1gMla+8dwITRKzTAlHsB+Og==", - "dev": true - }, - "xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "dev": true - }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - }, - "yargs": { - "version": "17.0.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", - "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - }, - "yargs-parser": { - "version": "20.2.7", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.7.tgz", - "integrity": "sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw==" - }, - "yargs-unparser": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", - "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", - "dev": true, - "requires": { - "camelcase": "^6.0.0", - "decamelize": "^4.0.0", - "flat": "^5.0.2", - "is-plain-obj": "^2.1.0" - }, - "dependencies": { - "camelcase": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", - "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", - "dev": true - }, - "decamelize": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", - "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", - "dev": true - } - } - }, - "yn": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", - "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", - "dev": true - }, - "yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true - } - } -} diff --git a/csi/moac/package.json b/csi/moac/package.json deleted file mode 100644 index 3061c192b..000000000 --- a/csi/moac/package.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "name": "moac", - "version": "0.1.0", - "description": "Mayastor's control plane", - "main": "moac", - "bin": { - "moac": "./moac", - "mbus": "./mbus.js" - }, - "repository": { - "type": "git", - "url": "https://github.com/openebs/Mayastor.git", - "directory": "csi/moac" - }, - "scripts": { - "prepare": "./bundle_protos.sh", - "clean": "rm -f dist", - "purge": "rm -rf node_modules proto dist", - "compile": "tsc --pretty", - "start": "./moac", - "test": "mocha test/index.ts", - "check": "semistandard --verbose", - "fix": "semistandard --fix" - }, - "license": "ISC", - "dependencies": { - "@grpc/grpc-js": "^1.3.2", - "@grpc/proto-loader": "^0.6.2", - "@kubernetes/client-node": "^0.14.3", - "@types/express": "^4.17.11", - "@types/lodash": "^4.14.169", - "etcd3": "^1.1.0", - "express": "^4.17.1", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "nats": "^2.0.4", - "sleep-promise": "^9.1.0", - "ts-sinon": "^2.0.1", - "url-parse": "^1.5.1", - "winston": "^3.3.3", - "yargs": "^17.0.1" - }, - "devDependencies": { - "@types/chai": "^4.1.3", - "@types/mocha": "^5.2.7", - "@types/node": "^12.12.2", - "chai": "^4.2.0", - "dirty-chai": "^2.0.1", - "mocha": "^8.1.3", - "semistandard": "^16.0.0", - "sinon": "^10.0.0", - "source-map-support": "^0.5.19", - "ts-node": "9.1.1", - "typescript": "^4.2.4", - "wtfnode": "^0.8.4" - }, - "files": [ - "*.js", - "crds/*.yaml", - "bundle_protos.sh" - ], - "semistandard": { - "env": [ - "mocha", - "node" - ] - } -} diff --git a/csi/moac/src/csi.ts b/csi/moac/src/csi.ts deleted file mode 100644 index 342222964..000000000 --- a/csi/moac/src/csi.ts +++ /dev/null @@ -1,731 +0,0 @@ -// Implementation of K8S CSI controller interface which is mostly -// about volume creation and destruction and few other methods. - -import assert from 'assert'; -import * as _ from 'lodash'; -import * as path from 'path'; -import { grpcCode, GrpcError } from './grpc_client'; -import { Volume } from './volume'; -import { Volumes } from './volumes'; -import { Logger } from './logger'; -import * as grpc from '@grpc/grpc-js'; -import { loadSync } from '@grpc/proto-loader'; -import { Workq } from './workq'; - -const log = Logger('csi'); - -const fs = require('fs').promises; - -const PLUGIN_NAME = 'io.openebs.csi-mayastor'; -const PROTO_PATH = path.join(__dirname, '../proto/csi.proto'); -// TODO: can we generate version with commit SHA dynamically? -const VERSION = '0.1'; -const PVC_RE = /pvc-([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/; -const YAML_TRUE_VALUE = [ - 'y', 'Y', 'yes', 'Yes', 'YES', - 'true', 'True', 'TRUE', - 'on', 'On', 'ON', -]; - -// Load csi proto file with controller and identity services -const packageDefinition = loadSync(PROTO_PATH, { - keepCase: false, - longs: Number, - enums: String, - defaults: true, - oneofs: true, - // this is to load google/descriptor.proto, otherwise you would see error: - // unresolvable extensions: 'extend google.protobuf.FieldOptions' in .csi.v1 - includeDirs: [path.join(__dirname, '/node_modules/protobufjs')] -}); -// TODO: figure out how to remove any -const csi = ( grpc.loadPackageDefinition(packageDefinition).csi).v1; - -// Done callback in CSI methods -type CsiDoneCb = (err: any, resp?: any) => void; -// CSI method signature -type CsiMethod = (args: any, cb: CsiDoneCb) => void; -type CsiMethodImpl = (args: any) => Promise; - -// Limited definition of topology key from CSI spec. -type TopologyKeys = { - segments: Record -}; - -// Simplified definition of K8s object as defined in the CSI spec. -type K8sVolume = { - volumeId: string, - capacityBytes: number, - accessibleTopology: TopologyKeys[], -}; - -// When list volumes method does not fit into one reply we store the context -// for the next retrieval. -type ListContext = { - volumes: { - volume: K8sVolume - }[] -}; - -// Parse mayastor node ID (i.e. mayastor://node-name) and return the node name. -function parseMayastorNodeId (nodeId: string) { - const parts = nodeId.split('/'); - - if ( - parts.length !== 3 || - parts[0] !== 'mayastor:' || - parts[1] !== '' || - !parts[2] - ) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'Invalid mayastor node ID: ' + nodeId - ); - } - return parts[2]; -} - -// Check that the list of volume capabilities does not contain unsupported -// capability. Throws grpc error if a capability is not supported. -// -// @param caps Volume capabilities as described in CSI spec. -function checkCapabilities (caps: any[]) { - if (!caps) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'Missing volume capabilities' - ); - } - for (let i = 0; i < caps.length; i++) { - const cap = caps[i]; - - // TODO: Check that FS type is supported and mount options? - if (cap.accessMode.mode !== 'SINGLE_NODE_WRITER') { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - `Access mode ${cap.accessMode.mode} not supported` - ); - } - } -} - -// Generate CSI access constraits for the volume. -function getAccessibleTopology (volume: Volume): TopologyKeys[] { - if (volume.spec.local) { - // We impose a hard requirement on k8s to schedule the app to one of the - // nodes with replica to make use of the locality. The nexus will follow - // the app during the publish. - return volume.getReplicas().map((r) => { - return { - segments: { 'kubernetes.io/hostname': r.pool!.node!.name } - }; - }); - } else { - // access from anywhere - return []; - } -} - -// Create k8s volume object as returned by CSI list volumes method. -// -// @param {object} volume Volume object. -// @returns {object} K8s CSI volume object. -function createK8sVolumeObject (volume: Volume): K8sVolume { - const obj: K8sVolume = { - volumeId: volume.uuid, - capacityBytes: volume.getSize(), - accessibleTopology: getAccessibleTopology(volume), - }; - return obj; -} - -// Duplicate request cache entry helps to detect retransmits of the same request -// -// This may seem like a useless thing but k8s is agressive on retransmitting -// requests. The first retransmit happens just a tens of ms after the original -// request. Having many requests that are the same in progress creates havoc -// and forces mayastor to execute repeating code. -// -// NOTE: Assumption is that k8s doesn't submit duplicate request for the same -// volume (the same uuid) with different parameters. -// -class Request { - args: string; // stringified method args - method: string; // CSI method name - callbacks: CsiDoneCb[]; // callbacks to call when done - - constructor (args: any, method: string, cb: CsiDoneCb) { - this.args = JSON.stringify(args); - this.method = method; - this.callbacks = [cb]; - } - - wait (cb: CsiDoneCb) { - this.callbacks.push(cb); - } - - done (err: any, resp?: any) { - this.callbacks.forEach((cb) => cb(err, resp)); - } -} - -// CSI Controller implementation. -// -// It implements Identity and Controller grpc services from csi proto file. -// It relies on volume manager, when serving incoming CSI requests, that holds -// information about volumes and provides methods to manipulate them. -export class CsiServer { - private server: any; - private ready: boolean; - private registry: any; - private volumes: Volumes | null; - private sockPath: string; - private nextListContextId: number; - private listContexts: Record; - private duplicateRequestCache: Request[]; - private serializationQueue: Workq; - - // Creates new csi server - // - // @param sockPath Unix domain socket for csi server to listen on. - constructor (sockPath: string) { - this.server = new grpc.Server(); - this.ready = false; - this.registry = null; - this.volumes = null; - this.sockPath = sockPath; - this.nextListContextId = 1; - this.listContexts = {}; - this.duplicateRequestCache = []; - this.serializationQueue = new Workq('serial-csi'); - - // The data returned by identity service should be kept in sync with - // responses for the same methods on storage node. - this.server.addService(csi.Identity.service, { - getPluginInfo: this.getPluginInfo.bind(this), - getPluginCapabilities: this.getPluginCapabilities.bind(this), - probe: this.probe.bind(this) - }); - - // Wrap all controller methods by a check for readiness of the csi server - // and request/response logging to avoid repeating code. - const controllerMethods: Record = {}; - let methodNames = [ - 'createVolume', - 'deleteVolume', - 'controllerPublishVolume', - 'controllerUnpublishVolume', - 'validateVolumeCapabilities', - 'listVolumes', - 'getCapacity', - 'controllerGetCapabilities' - ]; - // Note: what used to be elegant in JS is a type disaster in TS. - // Dynamic wrapper for calling methods defined on an object. - methodNames.forEach((name) => { - controllerMethods[name] = ( - call: grpc.ServerUnaryCall, - cb: (err: Error | undefined, resp?: any, - ) => void) => { - const args = call.request; - log.trace(`CSI ${name} request: ${JSON.stringify(args)}`); - - if (!this.ready) { - return cb( - new GrpcError( - grpcCode.UNAVAILABLE, - 'Not ready for serving requests' - ) - ); - } - - // detect duplicate method - let request = this._beginRequest(args, name, cb); - if (!request) { - // cb will be called when the original request completes - nothing to do - return; - } - - let csiMethodImpl = (args: any) => { - return ( this[name as keyof CsiServer].bind(this))(args) - .then((resp: any) => { - log.trace(`CSI ${name} response: ${JSON.stringify(resp)}`); - assert(request); - this._endRequest(request, undefined, resp); - }) - .catch((err: any) => { - if (!(err instanceof GrpcError)) { - err = new GrpcError( - grpcCode.UNKNOWN, - `Unexpected error in ${name} method: ` + err.stack - ); - } - log.error(`CSI ${name} failed: ${err}`); - assert(request); - this._endRequest(request, err); - }); - }; - - // We have to serialize create and publish volume requests because: - // 1. create requests create havoc in space accounting - // 2. concurrent publish reqs triggers blocking connect bug in mayastor - // 3. they make the log file difficult to follow - if (['createVolume', 'controllerPublishVolume'].indexOf(name) >= 0) { - this.serializationQueue.push(args, (args) => csiMethodImpl(args)); - } else { - csiMethodImpl(args); - } - }; - }); - // unimplemented methods - methodNames = [ - 'createSnapshot', - 'deleteSnapshot', - 'listSnapshots', - 'controllerExpandVolume' - ]; - methodNames.forEach((name) => { - controllerMethods[name] = function notImplemented (_, cb) { - const msg = `CSI method ${name} not implemented`; - log.error(msg); - cb(new GrpcError(grpcCode.UNIMPLEMENTED, msg)); - }; - }); - this.server.addService(csi.Controller.service, controllerMethods); - } - - // Listen on UDS - async start (): Promise { - try { - await fs.lstat(this.sockPath); - log.info('Removing stale socket file ' + this.sockPath); - await fs.unlink(this.sockPath); - } catch (err) { - // the file does not exist which is ok - } - return new Promise((resolve, reject) => { - this.server.bindAsync( - 'unix://' + this.sockPath, - grpc.ServerCredentials.createInsecure(), - (err: Error) => { - if (err) { - log.error(`CSI server failed to bind at ${this.sockPath}`); - reject(new Error(`Bind failed: ${err}`)); - } else { - log.info('CSI server listens at ' + this.sockPath); - this.server.start(); - resolve(); - } - } - ); - }); - } - - // Stop the grpc server. - async stop () { - return new Promise((resolve, reject) => { - log.info('Shutting down grpc server'); - this.server.tryShutdown(resolve); - }); - } - - // Switch csi server to ready state (returned by identity.probe() method). - // This will enable serving grpc controller service requests. - // - // @param registry Object holding node, replica, pool and nexus objects. - // @param volumes Volume manager. - makeReady (registry: any, volumes: Volumes) { - this.ready = true; - this.registry = registry; - this.volumes = volumes; - } - - // Stop serving controller requests, but the identity service still works. - // This is usually preparation for a shutdown. - undoReady () { - this.ready = false; - } - - // Find outstanding request by uuid and operation type. - _findRequest (args: any, method: string): Request | undefined { - args = JSON.stringify(args); - return this.duplicateRequestCache.find( - (e) => e.args === args && e.method === method - ); - } - - _beginRequest (args: any, method: string, cb: CsiDoneCb): Request | undefined { - let request = this._findRequest(args, method); - if (request) { - log.debug(`Duplicate ${method} volume request detected`); - request.wait(cb); - return; - } - request = new Request(args, method, cb); - this.duplicateRequestCache.push(request); - return request; - } - - // Remove request entry from the cache and call done callbacks. - _endRequest (request: Request, err: any, resp?: any) { - let idx = this.duplicateRequestCache.indexOf(request); - if (idx >= 0) { - this.duplicateRequestCache.splice(idx, 1); - } - request.done(err, resp); - } - - // - // Implementation of CSI identity methods - // - - getPluginInfo (_: any, cb: CsiDoneCb) { - log.debug( - `getPluginInfo request (name=${PLUGIN_NAME}, version=${VERSION})` - ); - cb(null, { - name: PLUGIN_NAME, - vendorVersion: VERSION, - manifest: {} - }); - } - - getPluginCapabilities (_: any, cb: CsiDoneCb) { - const caps = ['CONTROLLER_SERVICE', 'VOLUME_ACCESSIBILITY_CONSTRAINTS']; - log.debug('getPluginCapabilities request: ' + caps.join(', ')); - cb(null, { - capabilities: caps.map((c) => { - return { service: { type: c } }; - }) - }); - } - - probe (_: any, cb: CsiDoneCb) { - log.debug(`probe request (ready=${this.ready})`); - cb(null, { ready: { value: this.ready } }); - } - - // - // Implementation of CSI controller methods - // - - async controllerGetCapabilities (_: any) { - const caps = [ - 'CREATE_DELETE_VOLUME', - 'PUBLISH_UNPUBLISH_VOLUME', - 'LIST_VOLUMES', - 'GET_CAPACITY' - ]; - log.debug('get capabilities request: ' + caps.join(', ')); - return { - capabilities: caps.map((c) => { - return { rpc: { type: c } }; - }) - }; - } - - async createVolume (args: any): Promise { - assert(this.volumes); - - log.debug( - `Request to create volume "${args.name}" with size ` + - args.capacityRange.requiredBytes + - ` (limit ${args.capacityRange.limitBytes})` - ); - - if (args.volumeContentSource) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'Source for create volume is not supported' - ); - } - // k8s uses names pvc-{uuid} and we use uuid further as ID in SPDK so we - // must require it. - const m = args.name.match(PVC_RE); - if (!m) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - `Expected the volume name in pvc-{uuid} format: ${args.name}` - ); - } - const uuid = m[1]; - checkCapabilities(args.volumeCapabilities); - - // Storage protocol for accessing nexus is a required parameter - const protocol = args.parameters && args.parameters.protocol; - if (!protocol) { - throw new GrpcError(grpcCode.INVALID_ARGUMENT, 'missing storage protocol'); - } - const ioTimeout = args.parameters.ioTimeout; - if (ioTimeout !== undefined) { - if (protocol !== 'nvmf') { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'ioTimeout is valid only for nvmf protocol' - ); - } - if (Object.is(parseInt(ioTimeout), NaN)) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'ioTimeout must be an integer' - ); - } - } - - // For exaplanation of accessibilityRequirements refer to a table at - // https://github.com/kubernetes-csi/external-provisioner. - // Our case is WaitForFirstConsumer = true, strict-topology = false. - // - // The first node in preferred array the node that was chosen for running - // the app by the k8s scheduler. The rest of the entries are in random - // order and perhaps don't even run mayastor csi node plugin. - // - // The requisite array contains all nodes in the cluster irrespective - // of what node was chosen for running the app. - // - const mustNodes = []; - const shouldNodes = []; - - if (args.accessibilityRequirements) { - for ( - let i = 0; - i < args.accessibilityRequirements.requisite.length; - i++ - ) { - const reqs = args.accessibilityRequirements.requisite[i]; - for (const key in reqs.segments) { - // We are not able to evaluate any other topology requirements than - // the hostname req. Reject all others. - if (key !== 'kubernetes.io/hostname') { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'Volume topology other than hostname not supported' - ); - } else { - mustNodes.push(reqs.segments[key]); - } - } - } - for ( - let i = 0; - i < args.accessibilityRequirements.preferred.length; - i++ - ) { - const reqs = args.accessibilityRequirements.preferred[i]; - for (const key in reqs.segments) { - // ignore others than hostname (it's only preferred) - if (key === 'kubernetes.io/hostname') { - shouldNodes.push(reqs.segments[key]); - } - } - } - } - - let count = args.parameters.repl; - if (count) { - count = parseInt(count); - if (isNaN(count) || count <= 0) { - throw new GrpcError(grpcCode.INVALID_ARGUMENT, 'Invalid replica count'); - } - } else { - count = 1; - } - - // create the volume - let volume = await this.volumes.createVolume(uuid, { - replicaCount: count, - local: YAML_TRUE_VALUE.indexOf(args.parameters.local) >= 0, - preferredNodes: shouldNodes, - requiredNodes: mustNodes, - requiredBytes: args.capacityRange.requiredBytes, - limitBytes: args.capacityRange.limitBytes, - protocol: protocol - }); - - return { - volume: { - capacityBytes: volume.getSize(), - volumeId: uuid, - accessibleTopology: getAccessibleTopology(volume), - // parameters defined in the storage class are only presented - // to the CSI driver createVolume method. - // Propagate them to other CSI driver methods involved in - // standing up a volume, using the volume context. - volumeContext: args.parameters - } - }; - } - - async deleteVolume (args: any) { - assert(this.volumes); - - log.debug(`Request to destroy volume "${args.volumeId}"`); - - await this.volumes.destroyVolume(args.volumeId); - log.info(`Volume "${args.volumeId}" destroyed`); - } - - async listVolumes (args: any) { - assert(this.volumes); - let ctx: ListContext; - - if (args.startingToken) { - ctx = this.listContexts[args.startingToken]; - delete this.listContexts[args.startingToken]; - if (!ctx) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'Paging context for list volumes is gone' - ); - } - } else { - log.debug('Request to list volumes'); - ctx = { - volumes: this.volumes - .list() - .map(createK8sVolumeObject) - .map((v) => { - return { volume: v }; - }) - }; - } - // default max entries - if (!args.maxEntries) { - args.maxEntries = 1000; - } - - const entries = ctx.volumes.splice(0, args.maxEntries); - - // TODO: purge list contexts older than .. (1 min) - if (ctx.volumes.length > 0) { - const ctxId = (this.nextListContextId++).toString(); - this.listContexts[ctxId] = ctx; - return { - entries: entries, - nextToken: ctxId, - }; - } else { - return { entries: entries }; - } - } - - async controllerPublishVolume (args: any): Promise { - assert(this.volumes); - const publishContext: any = {}; - - log.debug( - `Request to publish volume "${args.volumeId}" for "${args.nodeId}"` - ); - - const volume = this.volumes.get(args.volumeId); - if (!volume) { - throw new GrpcError( - grpcCode.NOT_FOUND, - `Volume "${args.volumeId}" does not exist` - ); - } - let nodeId; - nodeId = parseMayastorNodeId(args.nodeId); - const ioTimeout = args.volumeContext?.ioTimeout; - if (ioTimeout !== undefined) { - // The value has been checked during the createVolume - publishContext.ioTimeout = ioTimeout; - } - if (args.readonly) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'readonly volumes are unsupported' - ); - } - if (!args.volumeCapability) { - throw new GrpcError(grpcCode.INVALID_ARGUMENT, 'missing volume capability'); - } - checkCapabilities([args.volumeCapability]); - - try { - publishContext.uri = await volume.publish(nodeId); - } catch (err) { - if (err.code !== grpcCode.ALREADY_EXISTS) { - throw err; - } - log.debug(`Volume "${args.volumeId}" already published on this node`); - return { publishContext }; - } - - log.info(`Published "${args.volumeId}" at ${publishContext.uri}`); - return { publishContext }; - } - - async controllerUnpublishVolume (args: any) { - assert(this.volumes); - - log.debug(`Request to unpublish volume "${args.volumeId}"`); - - const volume = this.volumes.get(args.volumeId); - if (!volume) { - log.warn( - `Request to unpublish volume "${args.volumeId}" which does not exist` - ); - return; - } - parseMayastorNodeId(args.nodeId); - - await volume.unpublish(); - log.info(`Unpublished volume "${args.volumeId}"`); - } - - async validateVolumeCapabilities (args: any): Promise { - assert(this.volumes); - - log.debug(`Request to validate volume capabilities for "${args.volumeId}"`); - - if (!this.volumes.get(args.volumeId)) { - throw new GrpcError( - grpcCode.NOT_FOUND, - `Volume "${args.volumeId}" does not exist` - ); - } - const caps = args.volumeCapabilities.filter( - (cap: any) => cap.accessMode.mode === 'SINGLE_NODE_WRITER' - ); - const resp: any = {}; - if (caps.length > 0) { - resp.confirmed = { volumeCapabilities: caps }; - } else { - resp.message = 'The only supported capability is SINGLE_NODE_WRITER'; - } - return resp; - } - - // We understand just one topology segment type and that is hostname. - // So if it is specified we return capacity of storage pools on the node - // or capacity of all pools in the cluster. - // - // XXX Is the caller interested in total capacity (sum of all pools) or - // a capacity usable by a single volume? - async getCapacity (args: any) { - let nodeName; - - if (args.volumeCapabilities) { - checkCapabilities(args.volumeCapabilities); - } - if (args.accessibleTopology) { - for (const key in args.accessibleTopology.segments) { - if (key === 'kubernetes.io/hostname') { - nodeName = args.accessibleTopology.segments[key]; - break; - } - } - } - - const capacity = this.registry.getCapacity(nodeName); - log.debug(`Get total capacity of node "${nodeName}": ${capacity} bytes`); - return { availableCapacity: capacity }; - } -} - -module.exports = { - CsiServer, - // this is exported for the tests - csi -}; diff --git a/csi/moac/src/event_stream.ts b/csi/moac/src/event_stream.ts deleted file mode 100644 index ae56f43fb..000000000 --- a/csi/moac/src/event_stream.ts +++ /dev/null @@ -1,224 +0,0 @@ -// Stream of events from registry and/or volume manager. -// -// The implementation is not as clean as it should be because there can be two -// type of objects serving as a source of events: registry and volume manager. -// -// TODO: Solution #1: make volume objects part of registry (though that bears -// its own problems). -// TODO: Solution #2: abstract event stream from source object type by providing -// hooks with source specific code when calling the constructor (the hooks -// for registry source object need to be shared to avoid code duplication) - -import * as _ from 'lodash'; -import assert from 'assert'; -import { Readable } from 'stream'; - -import { Node } from './node'; -import { Pool } from './pool'; -import { Replica } from './replica'; -import { Nexus } from './nexus'; -import { Volume } from './volume'; -import { Volumes } from './volumes'; -import { Registry } from './registry'; - -type ReceivedEventObject = { - eventType: string, - object: any, -}; - -type EventObject = { - kind: string, - eventType: string, - object: any, -}; - -type EventSource = { - registry?: Registry, // Registry object. - volumes?: Volumes, // Volume manager. -}; - -// Stream of events from registry and/or volume manager. Each event object -// retrieved from the stream is in the following form: -// -// { -// eventType: "sync", "new", "mod", "del" -// kind: "node", "pool", "replica", "nexus" or "volume" -// object: node, pool, replica, nexus or volume object -// } -// -// When reading the first time all node objects that exist in the cache before -// the stream was created are returned using the "new" event. That makes the -// stream suitable for populating the caches at the beginning. -// -// The primary motivation for introducing the class is to have a common code -// buffering registry events without duplicating it in all event consumers. -// -// TODO: End the stream when registry is stopped (requires new registry event). -// Is there equivalent for the volume manager? -// -export class EventStream extends Readable { - events: EventObject[]; - waiting: boolean; - started: boolean; - destroyed: boolean; - registry?: Registry; - volumes?: Volumes; - registryEventListeners: Record void>; - volumesEventListeners: Record void>; - - // Create the stream. - // - // @param source Source object for the events. - // @param [opts] nodejs stream options. - // - constructor (source: EventSource, opts?: any) { - assert(source); - super(_.assign({ objectMode: true }, opts || {})); - this.events = []; - this.waiting = false; - this.started = false; - this.destroyed = false; - if (source.registry) { - this.registry = source.registry; - } - if (source.volumes) { - this.volumes = source.volumes; - } - assert(this.registry || this.volumes); - // we save the listener functions in order to clear them at the end - this.registryEventListeners = { - node: this._onEvent.bind(this, 'node'), - nexus: this._onEvent.bind(this, 'nexus'), - pool: this._onEvent.bind(this, 'pool'), - replica: this._onEvent.bind(this, 'replica') - }; - this.volumesEventListeners = { - volume: this._onEvent.bind(this, 'volume') - }; - } - - // Start listeners and emit events about existing objects. - _start () { - assert(!this.waiting); - assert(this.events.length === 0); - this.started = true; - if (this.registry) { - for (const kind in this.registryEventListeners) { - this.registry.on(kind, this.registryEventListeners[kind]); - } - } - if (this.volumes) { - for (const kind in this.volumesEventListeners) { - this.volumes.on(kind, this.volumesEventListeners[kind]); - } - } - // Populate stream with objects which already exist but for consumer - // they appear as new. - const self = this; - if (self.registry) { - self.registry.getNodes().forEach((node: Node) => { - self.events.push({ - kind: 'node', - eventType: 'new', - object: node - }); - // First we emit replica and then pool events. Otherwise volume manager - // could start creating new volume on imported pool although that the - // volume is already there. - node.pools.forEach((obj: Pool) => { - obj.replicas.forEach((obj: Replica) => { - self.events.push({ - kind: 'replica', - eventType: 'new', - object: obj - }); - }); - self.events.push({ - kind: 'pool', - eventType: 'new', - object: obj - }); - }); - node.nexus.forEach((obj: Nexus) => { - self.events.push({ - kind: 'nexus', - eventType: 'new', - object: obj - }); - }); - // generate artificial 'sync' event for the node so that the reader knows - // that all "new" events for initial objects have been generated. - self.events.push({ - kind: 'node', - eventType: 'sync', - object: node - }); - }); - } - if (self.volumes) { - self.volumes.list().forEach((volume: Volume) => { - self.events.push({ - kind: 'volume', - eventType: 'new', - object: volume - }); - }); - } - if (self.waiting) { - self.waiting = false; - self._read(); - } - } - - _onEvent (kind: string, ev: ReceivedEventObject) { - this.events.push({ - kind: kind, - eventType: ev.eventType, - object: ev.object - }); - if (this.waiting) { - this.waiting = false; - this._read(); - } - } - - _read (_size?: number) { - if (!this.started) { - this._start(); - } - let cont = true; - while (cont) { - const ev = this.events.shift(); - if (ev) { - cont = this.push(ev); - } else { - this.waiting = true; - cont = false; - if (this.destroyed) { - this.push(null); - } - } - } - } - - _destroy (err: Error, cb: (err: Error) => void) { - if (this.started) { - if (this.registry) { - for (const kind in this.registryEventListeners) { - this.registry.removeListener(kind, this.registryEventListeners[kind]); - } - } - if (this.volumes) { - for (const kind in this.volumesEventListeners) { - this.volumes.removeListener(kind, this.volumesEventListeners[kind]); - } - } - } - this.destroyed = true; - // end the stream if it is waiting for more data but there are none - if (this.waiting) { - this.push(null); - } - cb(err); - } -} diff --git a/csi/moac/src/grpc_client.ts b/csi/moac/src/grpc_client.ts deleted file mode 100644 index c79214661..000000000 --- a/csi/moac/src/grpc_client.ts +++ /dev/null @@ -1,161 +0,0 @@ -// gRPC client related utilities - -import assert from 'assert'; -import * as path from 'path'; -import * as grpc from '@grpc/grpc-js'; -import { loadSync } from '@grpc/proto-loader'; - -import { Logger } from './logger'; -import { ServiceClient, ServiceClientConstructor } from '@grpc/grpc-js/build/src/make-client'; - -const log = Logger('grpc'); - -const MAYASTOR_PROTO_PATH: string = path.join(__dirname, '../proto/mayastor.proto'); -const DEFAULT_TIMEOUT_MS: number = 15000; -const SOFT_TIMEOUT_SLACK_MS: number = 1000; - -// Result of loadPackageDefinition() when run on mayastor proto file. -class MayastorDef { - // Constructor for mayastor grpc service client. - clientConstructor: ServiceClientConstructor; - // All enums that occur in mayastor proto file indexed by name - enums: Record; - - constructor() { - // Load mayastor proto file - const proto = loadSync(MAYASTOR_PROTO_PATH, { - // this is to load google/descriptor.proto - includeDirs: ['./node_modules/protobufjs'], - keepCase: false, - longs: Number, - enums: String, - defaults: true, - oneofs: true - }); - - const pkgDef = grpc.loadPackageDefinition(proto).mayastor as grpc.GrpcObject; - assert(pkgDef && pkgDef.Mayastor !== undefined); - this.clientConstructor = pkgDef.Mayastor as ServiceClientConstructor; - this.enums = {}; - Object.values(pkgDef).forEach((ent: any) => { - if (ent.format && ent.format.indexOf('EnumDescriptorProto') >= 0) { - ent.type.value.forEach((variant: any) => { - this.enums[variant.name] = variant.number; - }); - } - }); - } -} - -export const mayastor = new MayastorDef(); - -// This whole dance is done to satisfy typescript's type checking -// (not all values in grpc.status are numbers) -export const grpcCode: Record = (() => { - let codes: Record = {}; - for (let prop in grpc.status) { - let val = grpc.status[prop]; - if (typeof val === 'number') { - codes[prop] = val; - } - } - return codes; -})(); - -// Grpc error object. -// -// List of grpc status codes: -// OK: 0, -// CANCELLED: 1, -// UNKNOWN: 2, -// INVALID_ARGUMENT: 3, -// DEADLINE_EXCEEDED: 4, -// NOT_FOUND: 5, -// ALREADY_EXISTS: 6, -// PERMISSION_DENIED: 7, -// RESOURCE_EXHAUSTED: 8, -// FAILED_PRECONDITION: 9, -// ABORTED: 10, -// OUT_OF_RANGE: 11, -// UNIMPLEMENTED: 12, -// INTERNAL: 13, -// UNAVAILABLE: 14, -// DATA_LOSS: 15, -// UNAUTHENTICATED: 16 -// -export class GrpcError extends Error { - code: number; - - constructor (code: number, msg: string) { - assert(Object.values(grpcCode).indexOf(code) >= 0); - super(msg); - this.code = code; - } -} - -// Implementation of gRPC client encapsulating common code for calling a grpc -// method on a storage node (the node running mayastor). -export class GrpcClient { - private handle: ServiceClient; - private timeout: number; // timeout in milliseconds - - // Create promise-friendly grpc client handle. - // - // @param endpoint Host and port that mayastor server listens on. - // @param [timeout] Default timeout for grpc methods in millis. - constructor (endpoint: string, timeout?: number) { - this.handle = new mayastor.clientConstructor( - endpoint, - grpc.credentials.createInsecure() - ); - this.timeout = (timeout === undefined) ? DEFAULT_TIMEOUT_MS : timeout; - } - - private promiseWithTimeout = (prom: Promise, timeoutMs: number, exception: any) => { - let timer: NodeJS.Timeout; - return Promise.race([ - prom, - new Promise((_r, rej) => timer = setTimeout(rej, timeoutMs, exception)) - ]).finally(() => clearTimeout(timer)); - } - - // Call a grpc method with arguments. - // - // @param method Name of the grpc method. - // @param args Arguments of the grpc method. - // @param [timeout] Timeout in ms if the default should not be used. - // @returns Return value of the grpc method. - call (method: string, args: any, timeout?: number): Promise { - log.trace( - `Calling grpc method ${method} with arguments: ${JSON.stringify(args)}` - ); - if (timeout === undefined) { - timeout = this.timeout; - } - let promise = new Promise((resolve, reject) => { - const metadata = new grpc.Metadata(); - metadata.set('grpc-timeout', `${timeout}m`); - this.handle[method](args, metadata, (err: Error, val: any) => { - if (err) { - log.trace(`Grpc method ${method} failed: ${err}`); - reject(err); - } else { - log.trace(`Grpc method ${method} returned: ${JSON.stringify(val)}`); - resolve(val); - } - }); - }); - - // In some conditions, the grpc-timeout we've set above is not respected and the call simply gets stuck. - // When the grpc-timeout is not triggered then trigger our own soft timeout which is the original - // timeout plus some added slack. - const softTimeout = timeout + SOFT_TIMEOUT_SLACK_MS; - const error = new GrpcError(grpcCode.DEADLINE_EXCEEDED, `Soft timeout after ${softTimeout}ms`); - return this.promiseWithTimeout(promise, softTimeout, error); - } - - // Close the grpc handle. The client should not be used after that. - close () { - this.handle.close(); - } -} \ No newline at end of file diff --git a/csi/moac/src/index.ts b/csi/moac/src/index.ts deleted file mode 100644 index 772a9018a..000000000 --- a/csi/moac/src/index.ts +++ /dev/null @@ -1,249 +0,0 @@ -// Main file of our control plane for mayastor. -// It binds all components together to create a meaningful whole. - -const { KubeConfig } = require('@kubernetes/client-node'); -const yargs = require('yargs'); - - -import * as fs from 'fs'; -import { NodeOperator } from './node_operator'; -import { PoolOperator } from './pool_operator'; -import { Registry } from './registry'; -import { ApiServer } from './rest_api'; -import { MessageBus } from './nats'; -import { Volumes } from './volumes'; -import { VolumeOperator } from './volume_operator'; -import { CsiServer } from './csi'; -import { PersistentStore } from './persistent_store'; -import * as logger from './logger'; - -const log = logger.Logger(); - -const NAMESPACE_FILE = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'; - -// Load k8s config file. -// -// @param {string} [kubefile] Kube config file. -// @returns {object} k8s client object. -function createKubeConfig (kubefile: string): any { - const kubeConfig = new KubeConfig(); - try { - if (kubefile) { - log.info('Reading k8s configuration from file ' + kubefile); - kubeConfig.loadFromFile(kubefile); - } else { - kubeConfig.loadFromDefault(); - } - } catch (e) { - log.error('Cannot get k8s client configuration: ' + e); - process.exit(1); - } - return kubeConfig; -} - -export async function main () { - let apiServer: any; - let poolOper: PoolOperator; - let volumeOper: VolumeOperator; - let nodeOper: NodeOperator; - let kubeConfig: any; - let warmupTimer: NodeJS.Timeout | undefined; - - const opts = yargs - .options({ - a: { - alias: 'csi-address', - describe: 'Socket path where to listen for incoming CSI requests', - default: '/var/tmp/csi.sock', - string: true - }, - e: { - alias: 'etcd-endpoint', - describe: 'ETCD endpoint in host[:port] form', - default: '127.0.0.1:2379', - string: true - }, - i: { - alias: 'heartbeat-interval', - describe: 'Interval used by storage nodes for registration messages (seconds)', - default: 5, - number: true - }, - k: { - alias: 'kubeconfig', - describe: 'Path to kubeconfig file', - string: true - }, - n: { - alias: 'namespace', - describe: 'Override default namespace of mayastor custom resources', - string: true - }, - m: { - alias: 'message-bus', - describe: 'NATS server endpoint in host[:port] form', - default: '127.0.0.1:4222', - string: true - }, - p: { - alias: 'port', - describe: 'Port the REST API server should listen on', - default: 3000, - number: true - }, - s: { - alias: 'skip-k8s', - describe: - 'Skip k8s client and k8s operators initialization (only for debug purpose)', - default: false, - boolean: true - }, - 'sync-period': { - describe: 'Sync period for a storage node that is known to be healthy (in seconds)', - default: 60, - number: true - }, - 'sync-retry': { - describe: 'Sync period for a storage nodes that is known to be bad (in seconds)', - default: 10, - number: true - }, - 'sync-bad-limit': { - describe: 'Storage node moves to offline state after this many retries (0 means immediately when it fails)', - default: 0, - number: true - }, - v: { - alias: 'verbose', - describe: 'Print debug log messages', - count: true - }, - w: { - alias: 'watcher-idle-timeout', - describe: 'Restart watcher connections after this many seconds if idle', - default: 0, - number: true - } - }) - .help('help') - .strict().argv; - - switch (opts.v) { - case 0: - logger.setLevel('info'); - break; - case 1: - logger.setLevel('debug'); - break; - default: - logger.setLevel('silly'); - break; - } - - // Determine the namespace that should be used for CRDs - let namespace: string = 'default'; - if (opts.namespace) { - namespace = opts.namespace; - } else if (!opts.s) { - try { - namespace = fs.readFileSync(NAMESPACE_FILE).toString(); - } catch (err) { - log.error(`Cannot read pod namespace from ${NAMESPACE_FILE}: ${err}`); - process.exit(1); - } - } - log.debug(`Operating in namespace "${namespace}"`); - - - // We must install signal handlers before grpc lib does it. - async function cleanUp () { - if (warmupTimer) clearTimeout(warmupTimer); - if (csiServer) csiServer.undoReady(); - if (apiServer) apiServer.stop(); - if (!opts.s) { - if (volumeOper) volumeOper.stop(); - } - if (volumes) volumes.stop(); - if (!opts.s) { - if (poolOper) poolOper.stop(); - if (nodeOper) nodeOper.stop(); - } - if (messageBus) messageBus.stop(); - if (registry) registry.close(); - if (csiServer) await csiServer.stop(); - process.exit(0); - } - process.on('SIGTERM', async () => { - log.info('SIGTERM signal received.'); - await cleanUp(); - }); - process.on('SIGINT', async () => { - log.info('SIGINT signal received.'); - await cleanUp(); - }); - - // Create csi server before starting lengthy initialization so that we can - // serve csi.identity() calls while getting ready. - const csiServer = new CsiServer(opts.csiAddress); - const persistentStore = new PersistentStore([opts.e]); - - await csiServer.start(); - let registry = new Registry({ - syncPeriod: opts.syncPeriod * 1000, - syncRetry: opts.syncRetry * 1000, - syncBadLimit: opts.syncBadLimit, - }, persistentStore); - - // Listen to register and deregister messages from mayastor nodes - const messageBus = new MessageBus(registry); - messageBus.start(opts.m); - - if (!opts.s) { - // Create k8s client and load openAPI spec from k8s api server - kubeConfig = createKubeConfig(opts.kubeconfig); - - // Start k8s operators - nodeOper = new NodeOperator( - namespace, - kubeConfig, - registry, - opts.watcherIdleTimeout - ); - await nodeOper.init(kubeConfig); - await nodeOper.start(); - - poolOper = new PoolOperator( - namespace, - kubeConfig, - registry, - opts.watcherIdleTimeout - ); - await poolOper.init(kubeConfig); - await poolOper.start(); - } - - const volumes = new Volumes(registry); - volumes.start(); - - const warmupSecs = Math.floor(1.5 * opts.i); - log.info(`Warming up will take ${warmupSecs} seconds ...`); - warmupTimer = setTimeout(async () => { - warmupTimer = undefined; - if (!opts.s) { - volumeOper = new VolumeOperator( - namespace, - kubeConfig, - volumes, - opts.watcherIdleTimeout - ); - await volumeOper.init(kubeConfig); - await volumeOper.start(); - } - - apiServer = new ApiServer(registry); - await apiServer.start(opts.port); - - csiServer.makeReady(registry, volumes); - log.info('MOAC is warmed up and ready to 🚀'); - }, warmupSecs * 1000); -} diff --git a/csi/moac/src/logger.ts b/csi/moac/src/logger.ts deleted file mode 100644 index 0bf21c6cf..000000000 --- a/csi/moac/src/logger.ts +++ /dev/null @@ -1,114 +0,0 @@ -// Common logger instance which is configured once and can be included in -// all files where logging is needed. - -const winston = require('winston'); - -const monthShortNames = [ - 'Jan', - 'Feb', - 'Mar', - 'Apr', - 'May', - 'Jun', - 'Jul', - 'Aug', - 'Sep', - 'Oct', - 'Nov', - 'Dec' -]; - -// This will convert ISO timestamp string to following format: -// Oct 10 19:49:29.027 -function toLocalTime (isoTs: string) { - const dt = new Date(Date.parse(isoTs)); - const pad = function (num: number) { - return (num < 10 ? '0' : '') + num; - }; - const pad2 = function (num: number) { - if (num < 10) { - return '00' + num; - } else if (num < 100) { - return '0' + num; - } else { - return num; - } - }; - return ( - monthShortNames[dt.getMonth()] + - ' ' + - pad(dt.getDate()) + - ' ' + - pad(dt.getHours()) + - ':' + - pad(dt.getMinutes()) + - ':' + - pad(dt.getSeconds()) + - '.' + - pad2(dt.getMilliseconds()) - ); -} - -type PrintfArg = { - level: string; - message: string; - label: string; - timestamp: string; -}; - -const myFormat = winston.format.printf((arg: PrintfArg) => { - const result = [toLocalTime(arg.timestamp)]; - - // silly -> trace - if (arg.level.match(/silly/)) { - arg.level = arg.level.replace(/silly/, 'trace'); - } - result.push(arg.level); - - if (arg.label) { - result.push('[' + arg.label + ']:'); - } else { - result[result.length - 1] += ':'; - } - result.push(arg.message); - return result.join(' '); -}); - -const formats = [winston.format.timestamp(), myFormat]; -if (process.stdout.isTTY) { - formats.unshift(winston.format.colorize()); -} -const logger = winston.createLogger({ - level: 'info', - format: winston.format.combine(...formats), - transports: [new winston.transports.Console()] -}); - -export function setLevel (level: string) { - logger.level = level; -} - -// Purpose of the wrapper is to add component prefix to each log message -export function Logger (component?: string): any { - const obj = Object.create(Logger.prototype); - obj.component = component; - obj.logger = logger; - return obj; -} - -const levels = ['debug', 'info', 'warn', 'error']; -levels.forEach((lvl) => { - Logger.prototype[lvl] = function (msg: string) { - logger[lvl].call(this.logger, { - label: this.component, - message: msg - }); - }; -}); -// rename trace to silly -Logger.prototype.trace = function (msg: string) { - logger.silly.call(this.logger, { - component: this.component, - message: msg - }); -}; \ No newline at end of file diff --git a/csi/moac/src/nats.ts b/csi/moac/src/nats.ts deleted file mode 100644 index fec996fef..000000000 --- a/csi/moac/src/nats.ts +++ /dev/null @@ -1,162 +0,0 @@ -// Interface to the NATS server where mayastor instances send registration -// requests and events. - -import assert from 'assert'; -import * as nats from 'nats'; - -import { Registry } from './registry'; -import { Logger } from './logger'; - -const log = Logger('nats'); - -// If NATS server is not available then keep trying to connect in this interval -const RECONNECT_DELAY = 10000; // in ms - -type RegistrationMsg = { - id: string; - grpcEndpoint: string; -}; - -type DeregistrationMsg = { - id: string; -}; - -// Message bus object subscribes to messages from NATS server and handles each -// message by dispatching it further to other moac components. -export class MessageBus { - registry: Registry; - endpoint: string; - nc: nats.NatsConnection | null; - timeout: NodeJS.Timeout | null; - connected: boolean; - reconnectDelay: number; - - // Create a new message bus object. - // - // @param {object} registry Object registry used for adding/removing of nodes. - // @param {object} reconnectDelay If NATS srv is unavailable, keep trying with this delay. - constructor (registry: Registry, reconnectDelay?: number) { - assert(registry); - this.registry = registry; - this.endpoint = ''; - this.nc = null; - this.timeout = null; - this.connected = false; - this.reconnectDelay = reconnectDelay || RECONNECT_DELAY; - } - - // Connect to the NATS server - // - // @param {string} endpoint NATS server's address and port. - start (endpoint: string) { - assert(!this.nc); - this.endpoint = endpoint; - this._connect(); - } - - // Disconnect from the NATS server - stop () { - if (this.timeout) { - clearTimeout(this.timeout); - this.timeout = null; - } - this._disconnect(); - } - - // Return if the bus is connected to the NATS server. - // - // @returns {boolean} true if connected otherwise false. - isConnected (): boolean { - return this.connected; - } - - // The method is async but returns immediately. - // However it's up to caller if she wants to wait for it. - _connect () { - log.debug(`Connecting to NATS at "${this.endpoint}" ...`); - if (this.timeout) clearTimeout(this.timeout); - assert(!this.nc); - nats.connect({ - servers: [`nats://${this.endpoint}`] - }) - .then((nc) => { - log.info(`Connected to NATS message bus at "${this.endpoint}"`); - this.nc = nc; - this.connected = true; - this._subscribe(); - }) - .catch((err) => { - log.error(`${err}`); - this._disconnect(); - log.debug(`Reconnecting after ${this.reconnectDelay}ms`); - // reconnect but give it some time to recover to prevent spinning in loop - this.timeout = setTimeout(this._connect.bind(this), this.reconnectDelay); - }); - } - - _disconnect () { - if (this.nc) { - this.nc.close(); - this.nc = null; - this.connected = false; - log.info('Disconnected from NATS message bus'); - } - } - - _parsePayload (msg: nats.Msg) { - const sc = nats.StringCodec(); - try { - return JSON.parse(sc.decode(msg.data)); - } catch (e) { - log.error(`Invalid payload in ${msg.subject} message: not a JSON`); - } - } - - _registrationReceived (data: RegistrationMsg) { - const ep = data.grpcEndpoint; - if (typeof ep !== 'string' || ep.length === 0) { - log.error('Invalid grpc endpoint in registration message'); - return; - } - const id = data.id; - if (typeof id !== 'string' || id.length === 0) { - log.error('Invalid node name in registration message'); - return; - } - log.trace(`"${id}" with "${ep}" requested registration`); - this.registry.addNode(id, ep); - } - - _deregistrationReceived (data: DeregistrationMsg) { - const id = data.id; - if (typeof id !== 'string' || id.length === 0) { - log.error('Invalid node name in deregistration message'); - return; - } - log.trace(`"${id}" requested deregistration`); - this.registry.disconnectNode(id); - } - - _subscribe () { - assert(this.nc); - const registrySub = this.nc.subscribe('v0/registry'); - this._registryHandler(registrySub); - } - - async _registryHandler (sub: nats.Subscription) { - for await (const m of sub) { - const payload = this._parsePayload(m); - if (!payload) { - return; - } - if (payload.id === 'v0/register') { - this._registrationReceived(payload.data); - } else if (payload.id === 'v0/deregister') { - this._deregistrationReceived(payload.data); - } else { - const id = payload.id; - log.error(`Unknown registry message: ${id}`); - } - } - } -} \ No newline at end of file diff --git a/csi/moac/src/nexus.ts b/csi/moac/src/nexus.ts deleted file mode 100644 index 48221cd5b..000000000 --- a/csi/moac/src/nexus.ts +++ /dev/null @@ -1,338 +0,0 @@ -// Nexus object implementation. - -import assert from 'assert'; -import * as _ from 'lodash'; - -import { grpcCode, GrpcError, mayastor } from './grpc_client'; -import { Node } from './node'; -import { Replica } from './replica'; -import { Logger } from './logger'; - -const log = Logger('nexus'); - -// We increase timeout value to nexus destroy method because it involves -// updating etcd state in mayastor. Mayastor itself uses 30s timeout for etcd. -const NEXUS_DESTROY_TIMEOUT_MS = 60000; - -// Protocol used to export nexus (volume) -export enum Protocol { - Unknown = 'unknown', - Iscsi = 'iscsi', - Nvmf = 'nvmf', -} - -export function protocolFromString(val: string): Protocol { - if (val == Protocol.Iscsi) { - return Protocol.Iscsi; - } else if (val == Protocol.Nvmf) { - return Protocol.Nvmf; - } else { - return Protocol.Unknown; - } -} - -// Represents a child with uri and state properties. -// TODO: define state as enum. -export class Child { - constructor(public uri: string, public state: string) { - assert(uri); - assert(state); - } - isEqual(ch: Child) { - return (ch.uri === this.uri && ch.state === this.state); - } -} - -// Used with .sort() method to enforce deterministic order of children. -function compareChildren(a: Child, b: Child) { - return a.uri.localeCompare(b.uri); -} - -export class Nexus { - node?: Node; - uuid: string; - size: number; - deviceUri: string; - state: string; - children: Child[]; - - // Construct new nexus object. - // - // @param {object} props Nexus properties as obtained from the storage node. - // @param {string} props.uuid ID of the nexus. - // @param {number} props.size Capacity of the nexus in bytes. - // @param {string} props.deviceUri Block device path to the nexus. - // @param {string} props.state State of the nexus. - // @param {object[]} props.children Replicas comprising the nexus (uri and state). - // - constructor(props: any) { - this.node = undefined; // set by registerNexus method on node - this.uuid = props.uuid; - this.size = props.size; - this.deviceUri = props.deviceUri; - this.state = props.state; - // children of the nexus (replica URIs and their state) - this.children = (props.children || []) - .map((ch: any) => new Child(ch.uri, ch.state)) - .sort(compareChildren); - } - - // Stringify the nexus - toString() { - return this.uuid + '@' + (this.node ? this.node.name : 'nowhere'); - } - - // Update object based on fresh properties obtained from mayastor storage node. - // - // @param {object} props Properties defining the nexus. - // @param {string} props.uuid ID of the nexus. - // @param {number} props.size Capacity of the nexus in bytes. - // @param {string} props.deviceUri Block device URI of the nexus. - // @param {string} props.state State of the nexus. - // @param {object[]} props.children Replicas comprising the nexus (uri and state). - // - merge(props: any) { - let changed = false; - - if (this.size !== props.size) { - this.size = props.size; - changed = true; - } - if (this.deviceUri !== props.deviceUri) { - this.deviceUri = props.deviceUri; - changed = true; - } - if (this.state !== props.state) { - this.state = props.state; - changed = true; - } - const children = props.children - .map((ch: any) => new Child(ch.uri, ch.state)) - .sort(compareChildren); - let childrenChanged = false; - if (this.children.length !== children.length) { - childrenChanged = true; - } else { - for (let i = 0; i < this.children.length; i++) { - if (!this.children[i].isEqual(children[i])) { - childrenChanged = true; - break; - } - } - } - if (childrenChanged) { - this.children = children; - changed = true; - } - if (changed) { - this._emitMod(); - } - } - - // When anything in nexus changes, this can be called to emit mod event - // (a shortcut for frequently used code). - _emitMod() { - this.node!.emit('nexus', { - eventType: 'mod', - object: this - }); - } - - // Bind nexus to the node. - // - // @param {object} node Node to bind the nexus to. - // - bind(node: any) { - this.node = node; - log.debug(`Adding "${this.uuid}" to the nexus list of node "${node}"`); - this.node!.emit('nexus', { - eventType: 'new', - object: this - }); - } - - // Unbind the previously bound nexus from the node. - unbind() { - log.debug(`Removing "${this}" from the nexus list`); - this.node!.unregisterNexus(this); - this.node!.emit('nexus', { - eventType: 'del', - object: this - }); - this.node = undefined; - } - - // Set state of the nexus to offline. - // This is typically called when mayastor stops running on the node and - // the pool becomes inaccessible. - offline() { - log.warn(`Nexus "${this}" got offline`); - this.state = 'NEXUS_OFFLINE'; - this._emitMod(); - } - - // Return true if the nexus is down (unreachable). - isOffline() { - return !(this.node && this.node.isSynced()); - } - - // Publish the nexus to make accessible for IO. - // @params protocol The nexus share protocol. - // @returns The device path of nexus block device. - // - async publish(protocol: Protocol): Promise { - var res; - - if (this.deviceUri) { - throw new GrpcError( - grpcCode.ALREADY_EXISTS, - `Nexus ${this} has been already published` - ); - } - - const nexusProtocol = 'NEXUS_'.concat(protocol.toUpperCase()); - var shareNumber = mayastor.enums[nexusProtocol]; - if (shareNumber === undefined) { - throw new GrpcError( - grpcCode.NOT_FOUND, - `Cannot find protocol "${protocol}" for Nexus ${this}` - ); - } - log.info(`Publishing nexus "${this}" with protocol=${protocol} ...`); - try { - res = await this.node!.call('publishNexus', { - uuid: this.uuid, - key: '', - share: shareNumber - }); - } catch (err) { - throw new GrpcError( - grpcCode.INTERNAL, - `Failed to publish nexus "${this}": ${err}` - ); - } - log.info(`Nexus "${this}" is published at "${res.deviceUri}"`); - this.deviceUri = res.deviceUri; - this._emitMod(); - return res.deviceUri; - } - - // Unpublish nexus. - async unpublish() { - log.debug(`Unpublishing nexus "${this}" ...`); - - if (!this.node!.isSynced()) { - // We don't want to block the volume life-cycle in case that the node - // is down - it may never come back online. - log.warn(`Faking the unpublish of "${this}" because it is unreachable`); - } else { - try { - await this.node!.call('unpublishNexus', { uuid: this.uuid }); - } catch (err) { - if (err.code === grpcCode.NOT_FOUND) { - log.warn(`The nexus "${this}" does not exist`); - } else { - throw new GrpcError( - grpcCode.INTERNAL, - `Failed to unpublish nexus "${this}": ${err}` - ); - } - } - log.info(`Nexus "${this}" was unpublished`); - } - this.deviceUri = ''; - this._emitMod(); - } - - // Get URI under which the nexus is published or "undefined" if it hasn't been - // published. - getUri(): string | undefined { - return this.deviceUri || undefined; - } - - // Add replica to the nexus. - // - // @param {object} replica Replica object to add to the nexus. - // - async addReplica(replica: Replica): Promise { - const uri = replica.uri; - let ch = this.children.find((ch) => ch.uri === uri); - if (ch) { - return ch; - } - log.debug(`Adding uri "${uri}" to nexus "${this}" ...`); - - var childInfo; - try { - // TODO: validate the output - childInfo = await this.node!.call('addChildNexus', { - uuid: this.uuid, - uri: uri, - norebuild: false - }); - } catch (err) { - throw new GrpcError( - grpcCode.INTERNAL, - `Failed to add uri "${uri}" to nexus "${this}": ${err}` - ); - } - // The child will need to be rebuilt when added, but until we get - // confirmation back from the nexus, set it as pending - ch = new Child(childInfo.uri, childInfo.state); - this.children.push(ch); - this.children.sort(compareChildren); - this.state = "NEXUS_DEGRADED" - log.info(`Replica uri "${uri}" added to the nexus "${this}"`); - this._emitMod(); - return ch; - } - - // Remove replica from nexus. - // - // @param {string} uri URI of the replica to be removed from the nexus. - // - async removeReplica(uri: string) { - if (!this.children.find((ch) => ch.uri === uri)) { - return; - } - - log.debug(`Removing uri "${uri}" from nexus "${this}" ...`); - - try { - await this.node!.call('removeChildNexus', { - uuid: this.uuid, - uri: uri - }); - } catch (err) { - throw new GrpcError( - grpcCode.INTERNAL, - `Failed to remove uri "${uri}" from nexus "${this}": ${err}` - ); - } - // get index again in case the list changed in the meantime - const idx = this.children.findIndex((ch) => ch.uri === uri); - if (idx >= 0) { - this.children.splice(idx, 1); - } - log.info(`Replica uri "${uri}" removed from the nexus "${this}"`); - this._emitMod(); - } - - // Destroy nexus on storage node. - async destroy() { - log.debug(`Destroying nexus "${this}" ...`); - if (!this.node!.isSynced()) { - // We don't want to block the volume life-cycle in case that the node - // is down - it may never come back online. - log.warn(`Faking the destroy of "${this}" because it is unreachable`); - } else { - await this.node!.call( - 'destroyNexus', - { uuid: this.uuid }, - NEXUS_DESTROY_TIMEOUT_MS, - ); - log.info(`Destroyed nexus "${this}"`); - } - this.unbind(); - } -} \ No newline at end of file diff --git a/csi/moac/src/node.ts b/csi/moac/src/node.ts deleted file mode 100644 index 3c62269a2..000000000 --- a/csi/moac/src/node.ts +++ /dev/null @@ -1,435 +0,0 @@ -// Abstraction representing a storage node with its objects (nexus, pools, -// replicas). Consumers can use it to receive information about the storage -// objects and notifications about the changes. - -import assert from 'assert'; -import events = require('events'); - -import { grpcCode, GrpcError, GrpcClient } from './grpc_client'; -import { Pool } from './pool'; -import { Nexus } from './nexus'; -import { Replica } from './replica'; -import { Workq } from './workq'; -import { Logger } from './logger'; - -const log = Logger('node'); - -// We increase timeout value to nexus create method because it involves -// updating etcd state in mayastor. Mayastor itself uses 30s timeout for etcd. -const NEXUS_CREATE_TIMEOUT_MS = 60000; - -// Type returned by stats grpc call -export type ReplicaStat = { - timestamp: number, - // tags - uuid: string, - pool: string, - // counters - num_read_ops: number, - num_write_ops: number, - bytes_read: number, - bytes_written: number, -} - -// Node options when created. -export type NodeOpts = { - // How often to sync healthy node (in ms). - syncPeriod?: number; - // How often to retry sync if it failed (in ms). - syncRetry?: number; - // Flip the node to offline state after this many retries have failed. - syncBadLimit?: number; -} - -// Object represents mayastor storage node. -// -// Node emits following events: -// "node": node related events with payload { eventType: "sync", object: node } -// when the node is sync'd after previous sync failure(s). -// "pool", "replica", "nexus": with eventType "new", "mod", "del". -export class Node extends events.EventEmitter { - name: string; - syncPeriod: number; - syncRetry: number; - syncBadLimit: number; - endpoint: string | null; - client: any; - workq: Workq; - syncFailed: number; - syncTimer: NodeJS.Timeout | null; - nexus: Nexus[]; - pools: Pool[]; - - // Create a storage node object. - // - // @param {string} name Node name. - // @param {Object} [opts] Options - constructor (name: string, opts?: NodeOpts) { - opts = opts || {}; - - super(); - this.name = name; - this.syncPeriod = opts.syncPeriod || 60000; - this.syncRetry = opts.syncRetry || 10000; - this.syncBadLimit = opts.syncBadLimit || 0; - - this.endpoint = null; - this.client = null; // grpc client handle - this.workq = new Workq('grpc call'); // work queue for serializing grpc calls - // We don't want to switch all objects to offline state when moac starts - // just because a node is not reachable from the beginning. That's why we - // set syncFailed to syncBadLimit + 1. - this.syncFailed = this.syncBadLimit + 1; // 0 if last sync was successful - this.syncTimer = null; // timer for periodic sync of the node - - // cache of objects from storage node - this.nexus = []; - this.pools = []; - } - - // Stringify node object. - toString(): string { - return this.name; - } - - // Create grpc connection to the mayastor server - connect(endpoint: string) { - if (this.client) { - if (this.endpoint === endpoint) { - // nothing changed - return; - } else { - log.info( - `mayastor endpoint on node "${this.name}" changed from "${this.endpoint}" to "${endpoint}"` - ); - this.emit('node', { - eventType: 'mod', - object: this - }); - this.client.close(); - if (this.syncTimer) { - clearTimeout(this.syncTimer); - this.syncTimer = null; - } - } - } else { - log.info(`new mayastor node "${this.name}" with endpoint "${endpoint}"`); - } - this.endpoint = endpoint; - this.client = new GrpcClient(endpoint); - this.sync(); - } - - // Close the grpc connection - disconnect() { - if (!this.client) return; - log.info(`mayastor on node "${this.name}" is gone`); - this.client.close(); - this.client = null; - this.endpoint = null; - if (this.syncTimer) { - clearTimeout(this.syncTimer); - this.syncTimer = null; - } - this.syncFailed = this.syncBadLimit + 1; - this._offline(); - } - - unbind() { - // todo: on user explicit removal should we destroy the pools as well? - this.pools.forEach((pool) => pool.unbind()); - this.nexus.forEach((nexus) => nexus.unbind()); - } - - // The node is considered broken, emit offline events on all objects - // that are present on the node. - _offline() { - this.emit('node', { - eventType: 'mod', - object: this - }); - this.pools.forEach((pool) => pool.offline()); - this.nexus.forEach((nexus) => nexus.offline()); - } - - // Call grpc method on storage node. The calls are serialized in order - // to prevent race conditions and inconsistencies. - // - // @param method gRPC method name. - // @param args Arguments for gRPC method. - // @param [timeout] Optional timeout in ms. - // @returns A promise that evals to return value of gRPC method. - // - async call(method: string, args: any, timeout?: number): Promise { - return this.workq.push({ method, args, timeout }, ({method, args, timeout}) => { - return this._call(method, args, timeout); - }); - } - - async _call(method: string, args: any, timeout?: number): Promise { - if (!this.client) { - throw new GrpcError( - grpcCode.INTERNAL, - `Broken connection to mayastor on node "${this.name}"` - ); - } - return this.client.call(method, args, timeout); - } - - // Sync triggered by the timer. It ensures that the sync does run in - // parallel with any other rpc call or another sync. - async sync() { - let nextSync; - this.syncTimer = null; - - try { - await this.workq.push(null, () => { - return this._sync(); - }); - nextSync = this.syncPeriod; - } catch (err) { - // We don't want to cover up unexpected errors. But it's hard to - // differenciate between expected and unexpected errors. At least we try. - if (!(err instanceof GrpcError) && !err.code) { - throw err; - } - nextSync = this.syncRetry; - if (this.syncFailed++ === this.syncBadLimit) { - log.error(`The node "${this.name}" is out of sync: ${err}`); - this._offline(); - } else if (this.syncFailed <= this.syncBadLimit) { - log.warn(`Failed to sync the node "${this.name}": ${err}`); - } else { - log.debug(`Failed to sync the node "${this.name}": ${err}`); - } - } - - // if still connected then schedule next sync - if (!this.syncTimer && this.client) { - this.syncTimer = setTimeout(this.sync.bind(this), nextSync); - } - } - - // Synchronize nexus, replicas and pools. Called from work queue so it cannot - // interfere with other grpc calls. - async _sync() { - log.debug(`Syncing the node "${this.name}"`); - - // TODO: Harden checking of outputs of the methods below - let reply = await this._call('listNexus', {}); - const nexus = reply.nexusList; - reply = await this._call('listPools', {}); - const pools = reply.pools; - reply = await this._call('listReplicas', {}); - const replicas = reply.replicas; - - // Move the the node to online state before we attempt to merge objects - // because they might need to invoke rpc methods on the node. - const wasOffline = this.syncFailed > 0; - if (wasOffline) { - this.syncFailed = 0; - } - // merge pools and replicas - this._mergePoolsAndReplicas(pools, replicas); - // merge nexus - this._mergeNexus(nexus); - - log.debug(`The node "${this.name}" was successfully synced`); - - if (wasOffline) { - this.emit('node', { - eventType: 'mod', - object: this - }); - } - } - - // Merge information about pools and replicas obtained from storage node - // with the information we knew before. Add, remove and update existing - // objects as necessary. - // - // @param {object[]} pools New pools with properties. - // @param {object[]} replicas New replicas with properties. - // - _mergePoolsAndReplicas(pools: any[], replicas: any[]) { - // detect modified and new pools - pools.forEach((props) => { - const poolReplicas = replicas.filter((r) => r.pool === props.name); - const pool = this.pools.find((p) => p.name === props.name); - if (pool) { - // the pool already exists - update it - pool.merge(props, poolReplicas); - } else { - // it is a new pool - this._registerPool(new Pool(props), poolReplicas); - } - }); - // remove pools that no longer exist - this.pools - .filter((p) => !pools.find((ent) => ent.name === p.name)) - .forEach((p) => p.unbind()); - } - - // Compare list of existing nexus with nexus properties obtained from - // storage node and: - // - // 1. call merge nexus method if the nexus was found - // 2. create a new nexus based on the properties if not found - // 3. remove the nexus if it no longer exists - // - // These actions will further emit new/mod/del events to inform other - // components about the changes. - // - // @param {object[]} nexusList List of nexus obtained from storage node. - // - _mergeNexus(nexusList: any[]) { - // detect modified and new pools - nexusList.forEach((props) => { - const nexus = this.nexus.find((n) => n.uuid === props.uuid); - if (nexus) { - // the nexus already exists - update it - nexus.merge(props); - } else { - // it is a new nexus - this._registerNexus(new Nexus(props)); - } - }); - // remove nexus that no longer exist - const removedNexus = this.nexus.filter( - (n) => !nexusList.find((ent) => ent.uuid === n.uuid) - ); - removedNexus.forEach((n) => n.destroy()); - } - - // Push the new pool to a list of pools of this node. - // - // @param {object} pool New pool object. - // @param {object[]} [replicas] New replicas on the pool. - // - _registerPool(pool: Pool, replicas: any) { - assert(!this.pools.find((p) => p.name === pool.name)); - this.pools.push(pool); - pool.bind(this); - replicas = replicas || []; - replicas.forEach((r: any) => pool.registerReplica(new Replica(r))); - } - - // Remove the pool from list of pools of this node. - // - // @param {object} pool The pool to be deregistered from the node. - // - unregisterPool(pool: Pool) { - const idx = this.pools.indexOf(pool); - if (idx >= 0) { - this.pools.splice(idx, 1); - } else { - log.warn( - `Pool "${pool}" is being deregistered and not assigned to the node "${this.name}"` - ); - } - } - - // Push the new nexus to a nexus list of this node. - // - // @param {object} nexus New nexus object. - // - _registerNexus(nexus: Nexus) { - assert(!this.nexus.find((p) => p.uuid === nexus.uuid)); - this.nexus.push(nexus); - nexus.bind(this); - } - - // Remove the nexus from list of nexus's for the node. - // - // @param {object} nexus The nexus to be deregistered from the node. - // - unregisterNexus(nexus: Nexus) { - const idx = this.nexus.indexOf(nexus); - if (idx >= 0) { - this.nexus.splice(idx, 1); - } else { - log.warn( - `Nexus "${nexus}" is being deregistered and not assigned to the node "${this.name}"` - ); - } - } - - // Get all replicas across all pools on this node. - // - // @returns All replicas on this node. - getReplicas(): Replica[] { - return this.pools.reduce( - (acc: Replica[], pool: Pool) => acc.concat(pool.replicas), []); - } - - // Return true if the node is considered healthy which means that its state - // is synchronized with the state maintained on behalf of this node object. - // - // @returns True if the node is healthy, false otherwise. - // - isSynced(): boolean { - return this.syncFailed <= this.syncBadLimit; - } - - // Create storage pool on this node. - // - // @param name Name of the new pool. - // @param disks List of disk devices for the pool. - // @returns New pool object. - // - async createPool(name: string, disks: string[]): Promise { - log.debug(`Creating pool "${name}@${this.name}" ...`); - - const poolInfo = await this.call('createPool', { name, disks }); - log.info(`Created pool "${name}@${this.name}"`); - - const newPool = new Pool(poolInfo); - this._registerPool(newPool, []); - return newPool; - } - - // Create nexus on this node. - // - // @param uuid ID of the new nexus. - // @param size Size of nexus in bytes. - // @param replicas Replica objects comprising the nexus. - // @returns New nexus object. - async createNexus(uuid: string, size: number, replicas: Replica[]): Promise { - const children = replicas.map((r) => r.uri); - log.debug(`Creating nexus "${uuid}@${this.name}"`); - - const nexusInfo = await this.call( - 'createNexus', - { uuid, size, children }, - NEXUS_CREATE_TIMEOUT_MS, - ); - log.info(`Created nexus "${uuid}@${this.name}"`); - - const newNexus = new Nexus(nexusInfo); - this._registerNexus(newNexus); - return newNexus; - } - - // Get IO statistics for all replicas on the node. - // - // @returns Array of stats - one entry for each replica on the node. - async getStats(): Promise { - log.debug(`Retrieving replica stats from node "${this}"`); - const reply = await this.call('statReplicas', {}); - const timestamp = new Date().toISOString(); - return reply.replicas.map((r: any) => { - return { - timestamp, - // tags - uuid: r.uuid, - node: this.name, - pool: r.pool, - // counters - num_read_ops: r.stats.numReadOps, - num_write_ops: r.stats.numWriteOps, - bytes_read: r.stats.bytesRead, - bytes_written: r.stats.bytesWritten - }; - }); - } -} diff --git a/csi/moac/src/node_operator.ts b/csi/moac/src/node_operator.ts deleted file mode 100644 index 2f719d389..000000000 --- a/csi/moac/src/node_operator.ts +++ /dev/null @@ -1,280 +0,0 @@ -// Node operator is responsible for managing mayastor node custom resources -// that represent nodes in the cluster that run mayastor (storage nodes). -// -// Roles: -// * The operator creates/modifies/deletes the resources to keep them up to date. -// * A user can delete a stale resource (can happen that moac doesn't know) - -import assert from 'assert'; -import * as fs from 'fs'; -import * as path from 'path'; -import { - ApiextensionsV1Api, - KubeConfig, -} from '@kubernetes/client-node'; -import { - CustomResource, - CustomResourceCache, - CustomResourceMeta, -} from './watcher'; -import { EventStream } from './event_stream'; -import { Workq } from './workq'; -import { Logger } from './logger'; - -const log = Logger('node-operator'); - -const yaml = require('js-yaml'); - -const RESOURCE_NAME: string = 'mayastornode'; -const crdNode = yaml.load( - fs.readFileSync(path.join(__dirname, '../crds/mayastornode.yaml'), 'utf8') -); - -// State of a storage node. -enum NodeState { - Unknown = "unknown", - Online = "online", - Offline = "offline", -} - -// Object defines properties of node resource. -export class NodeResource extends CustomResource { - apiVersion?: string; - kind?: string; - metadata: CustomResourceMeta; - spec: { grpcEndpoint: string }; - status?: NodeState; - - constructor(cr: CustomResource) { - super(); - this.apiVersion = cr.apiVersion; - this.kind = cr.kind; - if (cr.status === NodeState.Online) { - this.status = NodeState.Online; - } else if (cr.status === NodeState.Offline) { - this.status = NodeState.Offline; - } else { - this.status = NodeState.Unknown; - } - if (cr.metadata === undefined) { - throw new Error('missing metadata'); - } else { - this.metadata = cr.metadata; - } - if (cr.spec === undefined) { - throw new Error('missing spec'); - } else { - let grpcEndpoint = (cr.spec as any).grpcEndpoint; - if (!grpcEndpoint) { - grpcEndpoint = ''; - } - this.spec = { grpcEndpoint }; - } - } -} - -export class NodeOperator { - watcher: CustomResourceCache; // k8s resource watcher for nodes - registry: any; - namespace: string; - workq: Workq; // for serializing node operations - eventStream: any; // events from the registry - - // Create node operator object. - // - // @param namespace Namespace the operator should operate on. - // @param kubeConfig KubeConfig. - // @param registry Registry with node objects. - // @param [idleTimeout] Timeout for restarting watcher connection when idle. - constructor ( - namespace: string, - kubeConfig: KubeConfig, - registry: any, - idleTimeout: number | undefined, - ) { - assert(registry); - this.namespace = namespace; - this.workq = new Workq('mayastornode'); - this.registry = registry; - this.watcher = new CustomResourceCache( - this.namespace, - RESOURCE_NAME, - kubeConfig, - NodeResource, - { idleTimeout } - ); - } - - // Create node CRD if it doesn't exist. - // - // @param kubeConfig KubeConfig. - async init (kubeConfig: KubeConfig) { - log.info('Initializing node operator'); - let k8sExtApi = kubeConfig.makeApiClient(ApiextensionsV1Api); - try { - await k8sExtApi.createCustomResourceDefinition(crdNode); - log.info(`Created CRD ${RESOURCE_NAME}`); - } catch (err) { - // API returns a 409 Conflict if CRD already exists. - if (err.statusCode !== 409) throw err; - } - } - - // Bind watcher's new/del events to node operator's callbacks. - // - // Not interested in mod events as the operator is the only who should - // be doing modifications to these objects. - // - // @param {object} watcher k8s node resource watcher. - // - _bindWatcher (watcher: CustomResourceCache) { - watcher.on('new', (obj: NodeResource) => { - if (obj.metadata && obj.spec.grpcEndpoint) { - this.registry.addNode(obj.metadata.name, obj.spec.grpcEndpoint); - } - }); - watcher.on('del', (obj: NodeResource) => { - this.registry.removeNode(obj.metadata.name); - }); - } - - // Start node operator's watcher loop. - async start () { - // install event handlers to follow changes to resources. - this._bindWatcher(this.watcher); - await this.watcher.start(); - - // This will start async processing of node events. - this.eventStream = new EventStream({ registry: this.registry }); - this.eventStream.on('data', async (ev: any) => { - if (ev.kind !== 'node') return; - await this.workq.push(ev, this._onNodeEvent.bind(this)); - }); - } - - async _onNodeEvent (ev: any) { - const name = ev.object.name; - if (ev.eventType === 'new') { - const grpcEndpoint = ev.object.endpoint || ''; - let origObj = this.watcher.get(name); - if (origObj === undefined) { - await this._createResource(name, grpcEndpoint); - } else { - await this._updateSpec(name, grpcEndpoint); - } - await this._updateStatus( - name, - ev.object.isSynced() ? NodeState.Online : NodeState.Offline, - ); - } else if (ev.eventType === 'mod') { - const grpcEndpoint = ev.object.endpoint || ''; - let origObj = this.watcher.get(name); - // The node might be just going away - do nothing if not in the cache - if (origObj !== undefined) { - await this._updateSpec(name, grpcEndpoint); - await this._updateStatus( - name, - ev.object.isSynced() ? NodeState.Online : NodeState.Offline, - ); - } - } else if (ev.eventType === 'del') { - await this._deleteResource(ev.object.name); - } else { - assert.strictEqual(ev.eventType, 'sync'); - } - } - - async _createResource(name: string, grpcEndpoint: string) { - log.info(`Creating node resource "${name}"`); - try { - await this.watcher.create({ - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: { - name, - namespace: this.namespace - }, - spec: { grpcEndpoint } - }); - } catch (err) { - log.error(`Failed to create node resource "${name}": ${err}`); - } - } - - // Update properties of k8s CRD object or create it if it does not exist. - // - // @param name Name of the updated node. - // @param grpcEndpoint Endpoint property of the object. - // - async _updateSpec (name: string, grpcEndpoint: string) { - try { - await this.watcher.update(name, (orig: NodeResource) => { - // Update object only if it has really changed - if (orig.spec.grpcEndpoint === grpcEndpoint) { - return; - } - log.info(`Updating spec of node resource "${name}"`); - return { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: orig.metadata, - spec: { grpcEndpoint } - }; - }); - } catch (err) { - log.error(`Failed to update node resource "${name}": ${err}`); - } - } - - // Update state of the resource. - // - // NOTE: This method does not throw if the operation fails as there is nothing - // we can do if it fails. Though we log an error message in such a case. - // - // @param name UUID of the resource. - // @param status State of the node. - // - async _updateStatus (name: string, status: NodeState) { - try { - await this.watcher.updateStatus(name, (orig: NodeResource) => { - // avoid unnecessary status updates - if (orig.status === status) { - return; - } - log.debug(`Updating status of node resource "${name}"`); - return { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: orig.metadata, - spec: orig.spec, - status: status, - }; - }); - } catch (err) { - log.error(`Failed to update status of node resource "${name}": ${err}`); - } - } - - // Delete node resource with specified name. - // - // @param {string} name Name of the node resource to delete. - // - async _deleteResource (name: string) { - try { - log.info(`Deleting node resource "${name}"`); - await this.watcher.delete(name); - } catch (err) { - log.error(`Failed to delete node resource "${name}": ${err}`); - } - } - - // Stop listening for watcher and node events and reset the cache - stop () { - this.watcher.stop(); - this.watcher.removeAllListeners(); - if (this.eventStream) { - this.eventStream.destroy(); - this.eventStream = null; - } - } -} diff --git a/csi/moac/src/persistent_store.ts b/csi/moac/src/persistent_store.ts deleted file mode 100644 index 198b6edd7..000000000 --- a/csi/moac/src/persistent_store.ts +++ /dev/null @@ -1,211 +0,0 @@ -// Interface to the persistent store (etcd) where mayastor instances register nexus information -// such as the list of nexus children and their health state and whether the nexus was shutdown -// cleanly or not. - -import assert from 'assert'; -import { Etcd3, IOptions } from 'etcd3'; -import { defaults } from 'lodash'; -import { Replica } from './replica'; -import { Logger } from './logger'; - -const log = Logger('store'); - -// Definition of the nexus information that gets saved in the persistent store. -export class NexusInfo { - // Nexus destroyed successfully. - cleanShutdown: boolean; - // Information about children. - children: ChildInfo[]; - - constructor (object: { [k: string]: any }) { - this.cleanShutdown = 'clean_shutdown' in object ? object['clean_shutdown'] : object['cleanShutdown']; - this.children = object['children']; - } -} - -// Definition of the child information that gets saved in the persistent store. -export class ChildInfo { - // UUID of the child. - uuid: string; - // Child's state of health. - healthy: boolean; - - constructor (object: { [k: string]: any }) { - this.uuid = object['uuid']; - this.healthy = object['healthy']; - } -} - -export class PersistentStore implements NexusCreateInfo { - private client: Etcd3; - private endpoints: string[]; - private externClient?: () => Etcd3; - // 1 minute timeout by default - private timeoutMs: number = 60000; - // In some conditions, the grpc call to etcd may take up to 15 minutes to fail, even when the etcd - // is already up again. Forcing a cancelation and allowing a quicker retry seems to alliviate this issue. - private promiseWithTimeout = (prom: Promise, timeoutMs: number, exception: any) => { - let timer: NodeJS.Timeout; - return Promise.race([ - prom, - new Promise((_r, rej) => timer = setTimeout(rej, timeoutMs, exception)) - ]).finally(() => clearTimeout(timer)); - } - - // Get default etcd client options, could be used to configure retries and timeouts... - // @param {string[]} endpoints List of etcd endpoints to connect to - // @returns {IOptions} Options for the etcd client - // - private getOptions (endpoints: string[]): IOptions { - return { - hosts: endpoints, - ...defaults - }; - } - - // Returns a new etcd client - // @param {Etcd3} client New etcd client object - // - private newClient (): Etcd3 { - log.debug("Creating a new etcd client..."); - - if (this.externClient !== undefined) - return this.externClient(); - else - return new Etcd3(this.getOptions(this.endpoints)); - } - - // Sets up the persistent store (note, at this point it does not wait for any connection to be established) - // @param {string[]} endpoints List of etcd endpoints to connect to - // @param {?number} timeoutMs Promise timeout while waiting for a reply from etcd - // @param {?()=>Etcd3?} client Alternative etcd client, used by the mock tests - // - constructor (endpoints: string[], timeoutMs?: number, client?: () => Etcd3) { - this.endpoints = endpoints.map((e) => e.includes(':') ? e : `${e}:2379`); - - this.externClient = client; - this.client = this.newClient(); - - if (timeoutMs !== undefined) { - this.timeoutMs = timeoutMs; - } - } - - // Validates that the info object is a valid NexusInfo object, as per the proto files - // @param {INexusInfo | null} info Nexus info returned by etcd. A null value indicates it does not exist - // @returns { NexusInfo | null} Validated Nexus info object with valid parameters, if it exists - // - private validateNexusInfo (info: NexusInfo | null): NexusInfo | null { - // it doesn't exist, just signal that back - if (!info) - return info; - - // verify if the inner fields exist - assert(info.cleanShutdown != null); - assert(info.children != null); - - // validation passed - // (no protobuf now, means we can just return the validated object as is) - return info; - } - - // Get nexus information from the persistent store - // @param {string} nexusUuid The uuid of the nexus - // @returns {NexusInfo | null} Validated Nexus info object with valid parameters, if it exists - // or throws an error in case of failure of time out - // - private async get_nexus_info (nexusUuid: string): Promise { - // get the nexus info as a JSON object - const promise = this.client.get(nexusUuid).json(); - const timeoutMsg = `Timed out after ${this.timeoutMs}ms while getting the persistent nexus "${nexusUuid}" information from etcd`; - const timeoutError = Symbol(timeoutMsg); - try { - log.debug(`Getting the persistent nexus "${nexusUuid}" information from etcd`); - const nexusRaw = await this.promiseWithTimeout(promise, this.timeoutMs, timeoutError); - return this.validateNexusInfo(nexusRaw ? new NexusInfo(nexusRaw) : null); - } catch (error: any) { - if (error === timeoutError) { - this.client = this.newClient(); - throw timeoutMsg; - } - throw error; - } - } - - // Delete the nexus from the persistent store - // @param {string} nexusUuid The uuid of the nexus - // @returns {Promise} Returns on success or throws an error if it failed|timed out - // - private async deleteNexusInfo (nexusUuid: string): Promise { - const timeoutMsg = `Timed out after ${this.timeoutMs}ms while deleting the persistent nexus "${nexusUuid}" information from etcd`; - const timeoutError = Symbol(timeoutMsg); - const promise = this.client.delete().key(nexusUuid).exec(); - try { - log.debug(`Deleting the persistent nexus "${nexusUuid}" information from etcd`); - const _deleted = await this.promiseWithTimeout(promise, this.timeoutMs, timeoutError); - } catch (error: any) { - if (error === timeoutError) { - this.client = this.newClient(); - throw timeoutMsg; - } - throw error; - } - return true; - } - - async filterReplicas (nexusUuid: string, replicas: Replica[]): Promise { - const nexus = await this.get_nexus_info(nexusUuid); - - // we have a client AND a nexus does exist for the given uuid - if (nexus !== null) { - let filteredReplicas = replicas.filter((r) => { - - let childInfo = nexus.children.find((c) => { - return c.uuid === r.realUuid; - }); - - // Add only healthy children - return childInfo?.healthy === true; - }); - - // If the shutdown was not clean then only add 1 healthy child to the create call. - // This is because children might have inconsistent data. - if (!nexus.cleanShutdown && filteredReplicas.length > 1) { - // prefer to keep a local replica, if it exists - const localReplica = filteredReplicas.findIndex((r) => r.share === 'REPLICA_NONE'); - const singleReplicaIndex = localReplica != -1 ? localReplica : 0; - filteredReplicas = filteredReplicas.slice(singleReplicaIndex, singleReplicaIndex+1); - } - - return filteredReplicas; - } else { - // If the nexus has never been created then it does not exist in etcd and so we can create - // it with all the available children as there is no preexisting data. - return replicas; - } - } - - async destroyNexus (nexusUuid: string): Promise { - return this.deleteNexusInfo(nexusUuid); - } -} - -// Exposes persistent Nexus information in a very simplistic manner -export interface NexusCreateInfo { - // Filter out replicas that cannot be used in the nexus create call, returning only healthy replicas. - // The remaining replicas may be added later, subject to a rebuild. - // Throws under error conditions: - // 1. when it cannot connect to the backing store within a construction timeout. - // 2. when the client library gives up trying to connect/waiting for the data. - // 3. when the data retrived from the backing store is invalid. - // @param {string} nexusUuid The uuid of the nexus - // @param {Replica[]} replicas Array of replicas to filter on - // @returns {Promise} Returns filtered replicas or throws an error if it failed|timed out - // - filterReplicas (nexusUuid: string, replicas: Replica[]): Promise; - // Destroy the nexus information from the backing store when it is no long required. - // @param {string} nexusUuid The uuid of the nexus - // @returns {Promise} Returns on success or throws an error if it failed|timed out - // - destroyNexus (nexusUuid: string): Promise; -} diff --git a/csi/moac/src/pool.ts b/csi/moac/src/pool.ts deleted file mode 100644 index b7a8ba67d..000000000 --- a/csi/moac/src/pool.ts +++ /dev/null @@ -1,251 +0,0 @@ -// Pool object implementation. - -import assert from 'assert'; -import * as _ from 'lodash'; - -import { grpcCode, GrpcError } from './grpc_client'; -import { Node } from './node'; -import { Replica } from './replica'; -import { Logger } from './logger'; - -const log = Logger('pool'); - -const URI_REGEX = /^([^:]+):\/\/(.+)$/; - -// Utility function to strip URI prefix from a string. -// -// Normally we should not be stripping URIs but because mayastor gRPC does -// not support URIs when creating a pool yet, we have to. -function _stripUri(str: string) { - const match = URI_REGEX.exec(str); - return match ? match[2] : str; -} - -export class Pool { - node?: Node; - name: string; - disks: [string]; - // TODO: define an enum - state: string; - capacity: number; - used: number; - replicas: Replica[]; - - // Build pool object from JSON object received from mayastor storage node. - // - // @param {object} props Pool properties defining the pool. - // @param {string} props.name Pool name. - // @param {string[]} props.disks List of disks comprising the pool. - // @param {string} props.state State of the pool. - // @param {number} props.capacity Capacity of the pool in bytes. - // @param {number} props.used How many bytes are used in the pool. - constructor(props: any) { - this.node = undefined; // set by registerPool method on node - this.name = props.name; - this.disks = props.disks.sort(); - this.state = props.state; - this.capacity = props.capacity; - this.used = props.used; - this.replicas = []; - } - - toString() { - return this.name + '@' + (this.node ? this.node.name : 'nowhere'); - } - - // Update object based on fresh properties obtained from mayastor storage node. - // - // @param {object} props Pool properties defining the pool. - // @param {string} props.name Pool name. - // @param {string[]} props.disks List of disks comprising the pool. - // @param {string} props.state State of the pool. - // @param {number} props.capacity Capacity of the pool in bytes. - // @param {number} props.used How many bytes are used in the pool. - // @param {object[]} replicas Replicas on the pool. - merge(props: any, replicas: any[]) { - let changed = false; - - // If access protocol to the disk has changed, it is ok and allowed. - // Though if device has changed then it is at least unusual and we log - // a warning message. - props.disks.sort(); - if (!_.isEqual(this.disks, props.disks)) { - let oldDisks = this.disks.map(_stripUri).sort(); - let newDisks = props.disks.map(_stripUri).sort(); - if (!_.isEqual(oldDisks, newDisks)) { - log.warn( - `Unexpected disk change in the pool "${this}" from ${oldDisks} to ${newDisks}` - ); - } - this.disks = props.disks; - changed = true; - } - if (this.state !== props.state) { - this.state = props.state; - changed = true; - } - if (this.capacity !== props.capacity) { - this.capacity = props.capacity; - changed = true; - } - if (this.used !== props.used) { - this.used = props.used; - changed = true; - } - if (changed && this.node) { - this.node.emit('pool', { - eventType: 'mod', - object: this - }); - } - - this._mergeReplicas(replicas); - } - - // Merge old and new list of replicas. - // - // @param {object[]} replicas New list of replicas properties for the pool. - // - _mergeReplicas(replicas: any[]) { - var self = this; - // detect modified and new replicas - replicas.forEach((props) => { - const replica = self.replicas.find((r) => r.uuid === props.uuid); - if (replica) { - // the replica already exists - update it - replica.merge(props); - } else { - // it is a new replica - self.registerReplica(new Replica(props)); - } - }); - // remove replicas that no longer exist - const removedReplicas = self.replicas.filter( - (r) => !replicas.find((ent) => ent.uuid === r.uuid) - ); - removedReplicas.forEach((r) => r.unbind()); - } - - // Add new replica to a list of replicas for this pool and emit new event - // for the replica. - // - // @param {object} replica New replica object. - // - registerReplica(replica: Replica) { - assert(!this.replicas.find((r) => r.uuid === replica.uuid)); - assert(replica.realUuid !== undefined); - this.replicas.push(replica); - replica.bind(this); - } - - // Remove replica from the list of replicas for this pool. - // - // @param {object} replica Replica object to remove. - // - unregisterReplica(replica: Replica) { - const idx = this.replicas.indexOf(replica); - if (idx >= 0) { - this.replicas.splice(idx, 1); - } else { - log.warn( - `Replica "${replica}" is being deregistered and not assigned to the pool "${this}"` - ); - } - } - - // Assign the pool to a node. It should be done right after creating - // the pool object. - // - // @param node Node object to assign the pool to. - // - bind(node: Node) { - this.node = node; - log.debug(`Adding pool "${this.name}" to the list of pools on "${node}"`); - this.node.emit('pool', { - eventType: 'new', - object: this - }); - } - - // Unbind the previously bound pool from the node. - unbind() { - if (!this.node) return; - log.debug(`Removing pool "${this}" from the list of pools`); - this.replicas.forEach((r) => r.unbind()); - this.node.unregisterPool(this); - - this.node.emit('pool', { - eventType: 'del', - object: this - }); - this.node = undefined; - } - - // Return amount of free space in the storage pool. - // - // @returns {number} Free space in bytes. - freeBytes() { - return this.capacity - this.used; - } - - // Destroy the pool and remove it from the list of pools on the node. - async destroy() { - if (!this.node) { - throw new GrpcError( - grpcCode.INTERNAL, - `Cannot destroy disassociated pool "${this}"`, - ); - } - log.debug(`Destroying pool "${this}" ...`); - await this.node.call('destroyPool', { name: this.name }); - log.info(`Destroyed pool "${this}"`); - this.unbind(); - } - - // Set state of the pool to offline and the same for all replicas on the pool. - // This is typically called when mayastor stops running on the node and - // the pool becomes inaccessible. - offline() { - log.warn(`Pool "${this}" got offline`); - this.replicas.forEach((r) => r.offline()); - // artificial state that does not appear in grpc protocol - this.state = 'POOL_OFFLINE'; - if (this.node) { - this.node.emit('pool', { - eventType: 'mod', - object: this - }); - } - } - - // Return true if pool exists and is accessible, otherwise false. - isAccessible() { - return this.state === 'POOL_ONLINE' || this.state === 'POOL_DEGRADED'; - } - - // Create replica in this storage pool. - // - // @param {string} uuid ID of the new replica. - // @param {number} size Size of the replica in bytes. - // - async createReplica(uuid: string, size: number) { - if (!this.node) { - throw new GrpcError( - grpcCode.INTERNAL, - `Cannot create replica on disassociated pool "${this}"`, - ); - } - const pool = this.name; - const thin = false; - const share = 'REPLICA_NONE'; - - log.debug(`Creating replica "${uuid}" on the pool "${this}" ...`); - - var replicaInfo = await this.node.call('createReplica', { uuid, pool, size, thin, share }); - log.info(`Created replica "${uuid}" on the pool "${this}"`); - - const newReplica = new Replica(replicaInfo); - this.registerReplica(newReplica); - return newReplica; - } -} \ No newline at end of file diff --git a/csi/moac/src/pool_operator.ts b/csi/moac/src/pool_operator.ts deleted file mode 100644 index 9a50a5468..000000000 --- a/csi/moac/src/pool_operator.ts +++ /dev/null @@ -1,537 +0,0 @@ -// Pool operator monitors k8s pool resources (desired state). It creates -// and destroys pools on storage nodes to reflect the desired state. - -import * as fs from 'fs'; -import * as _ from 'lodash'; -import * as path from 'path'; -import { - ApiextensionsV1Api, - KubeConfig, -} from '@kubernetes/client-node'; -import { - CustomResource, - CustomResourceCache, - CustomResourceMeta, -} from './watcher'; -import { EventStream } from './event_stream'; -import { Workq } from './workq'; -import { Logger } from './logger'; - -const log = Logger('pool-operator'); - -const yaml = require('js-yaml'); - -const RESOURCE_NAME: string = 'mayastorpool'; -const POOL_FINALIZER = 'finalizer.mayastor.openebs.io'; - -// Load custom resource definition -const crdPool = yaml.load( - fs.readFileSync(path.join(__dirname, '../crds/mayastorpool.yaml'), 'utf8') -); - -// Set of possible pool states. Some of them come from mayastor and -// offline, pending and error are deduced in the control plane itself. -enum PoolState { - Unknown = "unknown", - Online = "online", - Degraded = "degraded", - Faulted = "faulted", - Offline = "offline", - Pending = "pending", - Error = "error", -} - -function poolStateFromString(val: string): PoolState { - if (val === PoolState.Online) { - return PoolState.Online; - } else if (val === PoolState.Degraded) { - return PoolState.Degraded; - } else if (val === PoolState.Faulted) { - return PoolState.Faulted; - } else if (val === PoolState.Offline) { - return PoolState.Offline; - } else if (val === PoolState.Pending) { - return PoolState.Pending; - } else if (val === PoolState.Error) { - return PoolState.Error; - } else { - return PoolState.Unknown; - } -} - -// Object defines spec properties of a pool resource. -export class PoolSpec { - node: string; - disks: string[]; - - // Create and validate pool custom resource. - constructor(node: string, disks: string[]) { - this.node = node; - this.disks = disks; - } -} - -// Object defines properties of pool resource. -export class PoolResource extends CustomResource { - apiVersion?: string; - kind?: string; - metadata: CustomResourceMeta; - spec: PoolSpec; - status: { - spec?: PoolSpec, - state: PoolState, - reason?: string, - disks?: string[], - capacity?: number, - used?: number - }; - - // Create and validate pool custom resource. - constructor(cr: CustomResource) { - super(); - this.apiVersion = cr.apiVersion; - this.kind = cr.kind; - if (cr.metadata === undefined) { - throw new Error('missing metadata'); - } else { - this.metadata = cr.metadata; - } - if (cr.spec === undefined) { - throw new Error('missing spec'); - } else { - let node = (cr.spec as any).node; - if (typeof node !== 'string') { - throw new Error('missing or invalid node in spec'); - } - let disks = (cr.spec as any).disks; - if (!Array.isArray(disks)) { - throw new Error('missing or invalid disks in spec'); - } - disks = disks.slice(0).sort(); - //if (typeof disks !== 'string') { - this.spec = { node, disks }; - } - this.status = { - state: poolStateFromString(cr.status?.state), - spec: cr.status?.spec, - reason: cr.status?.reason, - disks: cr.status?.disks, - capacity: cr.status?.capacity, - used: cr.status?.used, - }; - } - - // Extract name of the pool from the resource metadata. - getName(): string { - if (this.metadata.name === undefined) { - throw Error("Resource object does not have a name") - } else { - return this.metadata.name; - } - } - - // Get the pool spec - // If the pool has not been created yet, the user spec is returned - // If the pool has already been created, then the initial spec (cached in the status) is returned - getSpec(): PoolSpec { - if (this.status.spec !== undefined) { - return this.status.spec - } else { - return this.spec - } - } - - // Get the pool disk device - // If the pool has been created once already, then it's the initial URI returned by mayastor - // Otherwise, it's the disk device from the SPEC - getDisks(): string[] { - if (this.status.disks !== undefined) { - return this.status.disks - } else { - return this.getSpec().disks - } - } -} - -// Pool operator tries to bring the real state of storage pools on mayastor -// nodes in sync with mayastorpool custom resources in k8s. -export class PoolOperator { - namespace: string; - watcher: CustomResourceCache; // k8s resource watcher for pools - registry: any; // registry containing info about mayastor nodes - eventStream: any; // A stream of node and pool events. - workq: Workq; // for serializing pool operations - - // Create pool operator. - // - // @param namespace Namespace the operator should operate on. - // @param kubeConfig KubeConfig. - // @param registry Registry with node objects. - // @param [idleTimeout] Timeout for restarting watcher connection when idle. - constructor ( - namespace: string, - kubeConfig: KubeConfig, - registry: any, - idleTimeout: number | undefined, - ) { - this.namespace = namespace; - this.registry = registry; // registry containing info about mayastor nodes - this.eventStream = null; // A stream of node and pool events. - this.workq = new Workq('mayastorpool'); // for serializing pool operations - this.watcher = new CustomResourceCache( - this.namespace, - RESOURCE_NAME, - kubeConfig, - PoolResource, - { idleTimeout } - ); - } - - // Create pool CRD if it doesn't exist. - // - // @param kubeConfig KubeConfig. - async init (kubeConfig: KubeConfig) { - log.info('Initializing pool operator'); - let k8sExtApi = kubeConfig.makeApiClient(ApiextensionsV1Api); - try { - await k8sExtApi.createCustomResourceDefinition(crdPool); - log.info(`Created CRD ${RESOURCE_NAME}`); - } catch (err) { - // API returns a 409 Conflict if CRD already exists. - if (err.statusCode !== 409) throw err; - } - } - - // Start pool operator's watcher loop. - // - // NOTE: Not getting the start sequence right can have catastrophic - // consequence leading to unintended pool destruction and data loss - // (i.e. when node info is available before the pool CRD is). - // - // The right order of steps is: - // 1. Get pool resources - // 2. Get info about pools on storage nodes - async start () { - var self = this; - - // get pool k8s resources for initial synchronization and install - // event handlers to follow changes to them. - self._bindWatcher(self.watcher); - await self.watcher.start(); - - // this will start async processing of node and pool events - self.eventStream = new EventStream({ registry: self.registry }); - self.eventStream.on('data', async (ev: any) => { - if (ev.kind === 'pool') { - await self.workq.push(ev, self._onPoolEvent.bind(self)); - } else if (ev.kind === 'node' && (ev.eventType === 'sync' || ev.eventType === 'mod')) { - await self.workq.push(ev.object.name, self._onNodeSyncEvent.bind(self)); - } else if (ev.kind === 'replica' && (ev.eventType === 'new' || ev.eventType === 'del')) { - await self.workq.push(ev, self._onReplicaEvent.bind(self)); - } - }); - } - - // Handler for new/mod/del pool events - // - // @param ev Pool event as received from event stream. - // - async _onPoolEvent (ev: any) { - const name: string = ev.object.name; - const resource = this.watcher.get(name); - - log.debug(`Received "${ev.eventType}" event for pool "${name}"`); - - if (ev.eventType === 'new') { - if (resource === undefined) { - log.warn(`Unknown pool "${name}" will be destroyed`); - await this._destroyPool(name); - } else { - await this._updateResource(ev.object); - } - } else if (ev.eventType === 'mod') { - await this._updateResource(ev.object); - } else if (ev.eventType === 'del' && resource) { - log.warn(`Recreating destroyed pool "${name}"`); - await this._createPool(resource); - } - } - - // Handler for node sync event. - // - // Either the node is new or came up after an outage - check that we - // don't have any pending pools waiting to be created on it. - // - // @param nodeName Name of the new node. - // - async _onNodeSyncEvent (nodeName: string) { - log.debug(`Syncing pool records for node "${nodeName}"`); - - const resources = this.watcher.list().filter( - (ent) => ent.spec.node === nodeName - ); - for (let i = 0; i < resources.length; i++) { - await this._createPool(resources[i]); - } - } - - // Handler for new/del replica events - // - // @param ev Replica event as received from event stream. - // - async _onReplicaEvent (ev: any) { - const pool = ev.object.pool; - if (!pool) { - // can happen if the node goes away (replica will shortly disappear too) - return; - } - await this._updateFinalizer(pool.name, pool.replicas.length > 0); - } - - // Stop the events, destroy event stream and reset resource cache. - stop () { - this.watcher.stop(); - this.watcher.removeAllListeners(); - if (this.eventStream) { - this.eventStream.destroy(); - this.eventStream = null; - } - } - - // Bind watcher's new/mod/del events to pool operator's callbacks. - // - // @param watcher k8s pool resource watcher. - // - _bindWatcher (watcher: CustomResourceCache) { - watcher.on('new', (resource: PoolResource) => { - this.workq.push(resource, this._createPool.bind(this)); - }); - watcher.on('mod', (resource: PoolResource) => { - this.workq.push(resource, this._modifyPool.bind(this)); - }); - watcher.on('del', (resource: PoolResource) => { - this.workq.push(resource, async (arg: PoolResource) => { - await this._destroyPool(arg.getName()); - }); - }); - } - - // Create a pool according to the specification. - // That includes parameters checks, node lookup and a call to registry - // to create the pool. - // - // @param resource Pool resource properties. - // - async _createPool (resource: PoolResource) { - const name: string = resource.getName(); - const nodeName = resource.getSpec().node; - - // Nothing prevents the user from modifying the spec part of the CRD, which could trick MOAC into recreating - // the pool on a different node, for example. - // So, store the initial spec in the status section of the CRD so that we may ignore any CRD edits from the user. - if (resource.status.spec === undefined) { - resource.status.spec = resource.spec; - await this._updateResourceProps( - name, - resource.status.state, - undefined, - undefined, - undefined, - undefined, - resource.status.spec - ); - } - - let pool = this.registry.getPool(name); - if (pool) { - // the pool already exists, just update its properties in k8s - await this._updateResource(pool); - return; - } - - const node = this.registry.getNode(nodeName); - if (!node) { - const msg = `mayastor does not run on node "${nodeName}"`; - log.error(`Cannot create pool "${name}": ${msg}`); - await this._updateResourceProps(name, PoolState.Pending, msg); - return; - } - if (!node.isSynced()) { - const msg = `mayastor on node "${nodeName}" is offline`; - log.error(`Cannot sync pool "${name}": ${msg}`); - await this._updateResourceProps(name, PoolState.Pending, msg); - return; - } - - // We will update the pool status once the pool is created, but - // that can take a time, so set reasonable default now. - await this._updateResourceProps(name, PoolState.Pending, 'Creating the pool'); - try { - // pool resource props will be updated when "new" pool event is emitted - pool = await node.createPool(name, resource.getDisks()); - } catch (err) { - log.error(`Failed to create pool "${name}": ${err}`); - await this._updateResourceProps(name, PoolState.Error, err.toString()); - } - } - - // Remove the pool from internal state and if it exists destroy it. - // Does not throw - only logs an error. - // - // @param name Name of the pool to destroy. - // - async _destroyPool (name: string) { - var pool = this.registry.getPool(name); - - if (pool) { - try { - await pool.destroy(); - } catch (err) { - log.error(`Failed to destroy pool "${name}@${pool.node.name}": ${err}`); - } - } - } - - // Changing pool parameters is actually not supported. However the pool - // operator's state should reflect the k8s state, so we make the change - // only at operator level and log a warning message. - // - // @param newPool New pool parameters. - // - async _modifyPool (resource: PoolResource) { - const name = resource.getName(); - const ignoreMessage = "SPEC Modification ignored since that is not currently supported. "; - let reason = resource.status.reason; - - // Pool SPEC modifications are ignored, add a reason to the CRD to make the user aware of this - if (!_.isEqual(resource.spec, resource.getSpec())) { - log.error(`Ignoring modification to pool "${name}" since that is not currently supported.`); - - if (!reason?.includes(ignoreMessage) || reason === undefined) { - await this._updateResourceProps( - name, - resource.status.state, - ignoreMessage + reason, - ); - } - } else if (reason?.includes(ignoreMessage)) { - await this._updateResourceProps( - name, - resource.status.state, - reason.replace(ignoreMessage, "") || "", - ); - } - } - - // Update status properties of k8s resource to be aligned with pool object - // properties. - // - // NOTE: This method does not throw if the update fails as there is nothing - // we can do if it fails. Though it logs an error message. - // - // @param pool Pool object. - // - async _updateResource (pool: any) { - var name = pool.name; - var resource = this.watcher.get(name); - - // we don't track this pool so we cannot update the CRD - if (!resource) { - log.warn(`State of unknown pool "${name}" has changed`); - return; - } - var state = poolStateFromString( - pool.state.replace(/^POOL_/, '').toLowerCase() - ); - var reason; - if (state === PoolState.Offline) { - reason = `mayastor does not run on the node "${pool.node}"`; - } - - await this._updateResourceProps( - name, - state, - reason, - pool.disks, - pool.capacity, - pool.used, - resource.status.spec || resource.spec - ); - } - - // Update status properties of k8s CRD object. - // - // Parameters "name" and "state" are required, the rest is optional. - // - // NOTE: This method does not throw if the update fails as there is nothing - // we can do if it fails. Though we log an error message in such a case. - // - // @param name Name of the pool. - // @param state State of the pool. - // @param [reason] Reason describing the root cause of the state. - // @param [disks] Disk URIs. - // @param [capacity] Capacity of the pool in bytes. - // @param [used] Used bytes in the pool. - async _updateResourceProps ( - name: string, - state: PoolState, - reason?: string, - disks?: string[], - capacity?: number, - used?: number, - specInStatus?: PoolSpec, - ) { - try { - await this.watcher.updateStatus(name, (orig: PoolResource) => { - // avoid the update if the object has not changed - if ( - state === orig.status.state && - (reason === orig.status.reason || (!reason && !orig.status.reason)) && - (capacity === undefined || capacity === orig.status.capacity) && - (used === undefined || used === orig.status.used) && - (disks === undefined || _.isEqual(disks, orig.status.disks)) && - (specInStatus === undefined || specInStatus === orig.status.spec) - ) { - return; - } - - log.debug(`Updating properties of pool resource "${name}"`); - let resource: PoolResource = _.cloneDeep(orig); - resource.status = { - state: state, - reason: reason || '', - spec: specInStatus || resource.status.spec, - disks: resource.status.disks - }; - if (disks != null) { - resource.status.disks = disks; - } - if (capacity != null) { - resource.status.capacity = capacity; - } - if (used != null) { - resource.status.used = used; - } - return resource; - }); - } catch (err) { - log.error(`Failed to update status of pool "${name}": ${err}`); - } - } - - // Place or remove finalizer from pool resource. - // - // @param name Name of the pool. - // @param [busy] At least one replica on it. - async _updateFinalizer(name: string, busy: boolean) { - try { - if (busy) { - this.watcher.addFinalizer(name, POOL_FINALIZER); - } else { - this.watcher.removeFinalizer(name, POOL_FINALIZER); - } - } catch (err) { - log.error(`Failed to update finalizer on pool "${name}": ${err}`); - } - } -} diff --git a/csi/moac/src/registry.ts b/csi/moac/src/registry.ts deleted file mode 100644 index decb10f92..000000000 --- a/csi/moac/src/registry.ts +++ /dev/null @@ -1,276 +0,0 @@ -// This is the component of the moac that maintains the state of the storage -// nodes. It keeps track of the nodes, pools, replicas and nexus. It serves as -// a database (other components can query it to get a list of objects) and also -// as a message bus (other components can subscribe to events). - -import assert from 'assert'; -import events = require('events'); -import { Node, NodeOpts } from './node'; -import { Pool } from './pool'; -import { Nexus } from './nexus'; -import { Replica } from './replica'; -import { PersistentStore } from './persistent_store'; -import { Logger } from './logger'; - -const log = Logger('registry'); - -// List of events emitted by the registry. -// -// The payload of the event is as follows: -// ``` -// { -// eventType: "sync", "new", "mod", "del" -// object: node, pool, replica, nexus object -// } -// ``` -const eventObjects = ['node', 'nexus', 'pool', 'replica']; - -interface NodeConstructor { - new (name: string, opts: any): Node; -} - -export class Registry extends events.EventEmitter { - private nodes: Record; - private Node: NodeConstructor; - private nodeOpts: NodeOpts; - private persistent_store: PersistentStore; - - constructor (nodeOpts: NodeOpts, persistent_store: PersistentStore) { - super(); - this.nodes = {}; // node objects indexed by name - this.nodeOpts = nodeOpts; - // This gives a chance to override Node class used for creating new - // node objects, which is useful for testing of the registry. - this.Node = Node; - this.persistent_store = persistent_store; - } - - // Disconnect all nodes. - close () { - const self = this; - Object.keys(this.nodes).forEach((name) => { - self.removeNode(name); - }); - } - - // Add mayastor node to the list of nodes and subscribe to events - // emitted by the node to relay them further. It can be called also for - // existing nodes to update their grpc endpoint. - // - // @param name Name of the node. - // @param endpoint Endpoint for gRPC communication. - addNode (name: string, endpoint: string) { - let node = this.nodes[name]; - if (node) { - node.connect(endpoint); - } else { - node = new this.Node(name, this.nodeOpts); - node.connect(endpoint); - this.emit('node', { - eventType: 'new', - object: node - }); - this._registerNode(node); - } - } - - // Register node object in registry and listen to events on it. - // - // NOTE: This would be normally done in addNode() but for testing it's easier - // to have a separate methods because in the tests we like to create our own - // nodes. - // - // @param {object} node Node object to register. - _registerNode (node: Node) { - assert(!this.nodes[node.name]); - this.nodes[node.name] = node; - - log.info( - `mayastor on node "${node.name}" and endpoint "${node.endpoint}" just joined` - ); - - eventObjects.forEach((objType) => { - node.on(objType, (ev) => this.emit(objType, ev)); - }); - } - - // Disconnect the node and offline it (but keep it in the list). - // - // @param name Name of the node to offline. - disconnectNode (name: string) { - const node = this.nodes[name]; - if (!node) return; - log.info(`mayastor on node "${name}" left`); - node.disconnect(); - } - - // Remove mayastor node from the list of nodes and unsubscribe events. - // - // @param name Name of the node to remove. - removeNode (name: string) { - const node = this.nodes[name]; - if (!node) return; - delete this.nodes[name]; - node.disconnect(); - - // There is a hidden problem here. Some actions that should have been - // done on behalf of node.disconnect() above, might not have sufficient time - // to run and after we would remove the node from list of the nodes and - // unsubscribe event subscribers, further event propagation on this node - // would stop. As a workaround we never remove the node unless we are - // shutting down the moac. Users can remove a node by kubectl if they wish. - node.unbind(); - this.emit('node', { - eventType: 'del', - object: node - }); - eventObjects.forEach((objType) => { - node.removeAllListeners(objType); - }); - } - - // Get specified mayastor node or list of all mayastor nodes if called - // without argument. - // - // @param name Name of the node to return. - // @returns Node object if found or undefined if not found. - getNode (name: string): Node | undefined { - return this.nodes[name]; - } - - // Get list of all mayastor nodes. - getNodes (): Node[] { - return Object.values(this.nodes); - } - - // Get specified storage pool or undefined if not found. - getPool (name: string): Pool | undefined { - return this.getPools().find((p) => p.name === name); - } - - // Get list of all storage pools. - getPools (): Pool[] { - return Object.values(this.nodes).reduce( - (acc: Pool[], node: Node) => acc.concat(node.pools), - [] - ); - } - - // Get specified nexus object. - getNexus (uuid: string): Nexus | undefined { - return this.getNexuses().find((n) => n.uuid === uuid); - } - - // Get list of nexus objects. - getNexuses (): Nexus[] { - return Object.values(this.nodes).reduce( - (acc: Nexus[], node: Node) => acc.concat(node.nexus), - [] - ); - } - - // Get replica objects with specified uuid. - getReplicaSet (uuid: string): Replica[] { - return this.getReplicas().filter((r: Replica) => r.uuid === uuid); - } - - // Get all replicas. - getReplicas (): Replica[] { - return Object.values(this.nodes).reduce( - (acc: Replica[], node: Node) => acc.concat(node.getReplicas()), - [] - ); - } - - // Return total capacity of all pools summed together or capacity of pools on - // a single node if node name is specified. - // - // @param [nodeName] Name of the node to get the capacity for. - // @returns Total capacity in bytes. - // - getCapacity (nodeName?: string) { - let pools; - - if (nodeName) { - pools = this.getPools().filter((p) => p.node?.name === nodeName); - } else { - pools = this.getPools(); - } - return pools - .filter((p: Pool) => p.isAccessible()) - .reduce((acc: number, p: Pool) => acc + (p.capacity - p.used), 0); - } - - // Return ordered list of storage pools suitable for new volume creation - // sorted by preference (only a single pool from each node). - // - // The rules are simple: - // 1) must be online (or degraded if there are no online pools) - // 2) must have sufficient space - // 3) the least busy pools first - // - choosePools (requiredBytes: number, mustNodes: string[], shouldNodes: string[]): Pool[] { - let pools = this.getPools().filter((p) => { - return ( - p.isAccessible() && - p.node && - p.capacity - p.used >= requiredBytes && - (mustNodes.length === 0 || mustNodes.indexOf(p.node.name) >= 0) - ); - }); - - pools.sort((a, b) => { - // Rule #1: User preference - if (shouldNodes.length > 0) { - if ( - shouldNodes.indexOf(a.node!.name) >= 0 && - shouldNodes.indexOf(b.node!.name) < 0 - ) { - return -1; - } else if ( - shouldNodes.indexOf(a.node!.name) < 0 && - shouldNodes.indexOf(b.node!.name) >= 0 - ) { - return 1; - } - } - - // Rule #2: Avoid degraded pools whenever possible - if (a.state === 'POOL_ONLINE' && b.state !== 'POOL_ONLINE') { - return -1; - } else if (a.state !== 'POOL_ONLINE' && b.state === 'POOL_ONLINE') { - return 1; - } - - // Rule #3: Use the least busy pool (with fewer replicas) - if (a.replicas.length < b.replicas.length) { - return -1; - } else if (a.replicas.length > b.replicas.length) { - return 1; - } - - // Rule #4: Pools with more free space take precedence - const aFree = a.capacity - a.used; - const bFree = b.capacity - b.used; - return bFree - aFree; - }); - - // only one pool from each node - const nodes: Node[] = []; - pools = pools.filter((p) => { - if (nodes.indexOf(p.node!) < 0) { - nodes.push(p.node!); - return true; - } else { - return false; - } - }); - - return pools; - } - - // Returns the persistent store which is kept within the registry - getPersistentStore(): PersistentStore { - return this.persistent_store; - } -} diff --git a/csi/moac/src/replica.ts b/csi/moac/src/replica.ts deleted file mode 100644 index 24ee5d050..000000000 --- a/csi/moac/src/replica.ts +++ /dev/null @@ -1,199 +0,0 @@ -// Replica object implementation. - -import assert from 'assert'; -import * as _ from 'lodash'; - -import { grpcCode, GrpcError } from './grpc_client'; -import { Logger } from './logger'; -import { Pool } from './pool'; - -const log = Logger('replica'); -const parse = require('url-parse'); - -// Replica destruction on mayastor node can be very slow. Until the problem -// is fixed in mayastor we use 1 hour timeout for destroy calls. -const DESTROY_TIMEOUT_MS = 60 * 60 * 1000; - -export class Replica { - pool?: Pool; - uuid: string; - size: number; - // TODO: define an enum - share: string; - uri: string; - isDown: boolean; - realUuid?: string; - - // Create replica object. - // - // @param {object} props Replica properties obtained from storage node. - constructor(props: any) { - this.pool = undefined; // set by pool object during registration - this.uuid = props.uuid; - this.size = props.size; - this.share = props.share; - this.uri = props.uri; - this.isDown = false; - this.realUuid = parse(this.uri, true).query['uuid']; - } - - // Stringify replica. - toString() { - return this.uuid + '@' + (this.pool ? this.pool.name : 'nowhere'); - } - - // Update object based on fresh properties obtained from mayastor storage node. - // - // @param {object} props Properties defining the replica. - // @param {string} props.uuid ID of replica. - // @param {number} props.size Capacity of the replica in bytes. - // @param {string} props.share Share protocol of replica. - // @param {string} props.uri URI to be used by nexus to access it. - // - merge(props: any) { - if (!this.pool) { - throw new Error('Cannot merge replica that has not been bound'); - } - let changed = false; - - if (this.size !== props.size) { - this.size = props.size; - changed = true; - } - if (this.share !== props.share) { - this.share = props.share; - changed = true; - } - if (this.uri !== props.uri) { - this.uri = props.uri; - changed = true; - } - if (this.isDown) { - this.isDown = false; - changed = true; - } - if (changed && this.pool.node) { - this.pool.node.emit('replica', { - eventType: 'mod', - object: this - }); - } - } - - // Set state of the replica to offline. - // This is typically called when mayastor stops running on the node and - // the replicas become inaccessible. - offline() { - if (!this.pool) { - throw new Error('Cannot offline a replica that has not been bound'); - } - log.warn(`Replica "${this}" got offline`); - this.isDown = true; - if (this.pool.node) { - this.pool.node.emit('replica', { - eventType: 'mod', - object: this - }); - } - } - - // Return true if replica is offline otherwise false. - isOffline() { - return this.isDown; - } - - // Export replica over given storage protocol for IO (NONE, ISCSI or NVMF). - // NONE means that the replica can be accessed only locally in SPDK process. - // - // @param {string} share Name of the share protocol or "NONE" to unshare it. - // @returns {string} URI used to reach replica from nexus. - // - async setShare(share: string) { - var res; - - assert( - ['REPLICA_NONE', 'REPLICA_ISCSI', 'REPLICA_NVMF'].indexOf(share) >= 0 - ); - if (!this.pool) { - throw new Error('Cannot set share protocol when replica is not bound'); - } - if (!this.pool.node) { - throw new Error('Cannot set share protocol when pool is not bound'); - } - log.debug(`Setting share protocol "${share}" for replica "${this}" ...`); - - try { - res = await this.pool.node.call('shareReplica', { - uuid: this.uuid, - share - }); - } catch (err) { - throw new GrpcError( - grpcCode.INTERNAL, - `Failed to set share pcol for replica "${this}": ` + err - ); - } - log.info(`Share pcol for replica "${this}" was set: ${res.uri}`); - this.share = share; - this.uri = res.uri; - this.pool.node.emit('replica', { - eventType: 'mod', - object: this - }); - return res.uri; - } - - // Destroy replica on storage node. - // - // This must be called after the replica is removed from nexus. - async destroy() { - log.debug(`Destroying replica "${this}" ...`); - if (!this.pool) { - throw new Error('Cannot destroy a replica that has not been bound'); - } - if (!this.pool?.node?.isSynced()) { - // We don't want to block the volume life-cycle in case that the node - // is down - it may never come back online. - log.warn(`Faking the destroy of "${this}" because it is unreachable`); - } else { - await this.pool.node.call( - 'destroyReplica', - { uuid: this.uuid }, - DESTROY_TIMEOUT_MS, - ); - log.info(`Destroyed replica "${this}"`); - } - this.unbind(); - } - - // Associate replica with a pool. - // - // @param {object} pool Pool object to associate the replica with. - // - bind(pool: Pool) { - this.pool = pool; - log.debug( - `Adding "${this.uuid}" to the list of replicas for the pool "${pool}"` - ); - if (this.pool.node) { - this.pool.node.emit('replica', { - eventType: 'new', - object: this - }); - } - } - - // Remove the replica reference from pool - unbind() { - if (!this.pool) return; - log.debug(`Removing replica "${this}" from the list of replicas`); - this.pool.unregisterReplica(this); - if (this.pool.node) { - this.pool.node.emit('replica', { - eventType: 'del', - object: this - }); - } - this.pool = undefined; - } -} \ No newline at end of file diff --git a/csi/moac/src/rest_api.ts b/csi/moac/src/rest_api.ts deleted file mode 100644 index f2ff7cdcb..000000000 --- a/csi/moac/src/rest_api.ts +++ /dev/null @@ -1,80 +0,0 @@ -// moac REST API server -// -// Auxilliary interface for two purposes: -// -// 1. All stuff for which using k8s custom resources would be too awkward. -// Currently we use it only for exposing stats. -// 2. Interface that other components can use to interact with the control -// plane. Currently used only for liveness and readiness probes. - -import express from 'express'; -import { Server } from 'http'; -import { Registry } from './registry'; -import { Node, ReplicaStat } from './node'; -import { Logger } from './logger'; - -const log = Logger('api'); - -export class ApiServer { - private registry: Registry; - private app: express.Express; - private server: Server | undefined; - - constructor (registry: Registry) { - const self = this; - this.registry = registry; - this.app = express(); - // for liveness & readiness probes - this.app.get('/', (req, res) => { - res.json({}); - }); - // for obtaining volume stats - this.app.get('/stats', (req, res) => { - self.getStats().then( - (stats) => res.json(stats), - (err) => res.status(500).send(err.toString()) - ); - }); - } - - async start (port: number): Promise { - return new Promise((resolve, reject) => { - this.server = this.app.listen(port, () => { - log.info('API server listening on port ' + port); - resolve(); - }); - }); - } - - stop () { - if (this.server) { - this.server.close(); - this.server = undefined; - } - } - - // TODO: should return stats for nexus rather than for replica - async getStats (): Promise { - const self = this; - let stats: ReplicaStat[] = []; - const nodes: Node[] = self.registry.getNodes(); - - // TODO: stats can be retrieved in parallel - for (let i = 0; i < nodes.length; i++) { - const node = nodes[i]; - const timestamp = new Date().toISOString(); - - let replicaStats: ReplicaStat[]; - try { - replicaStats = await node.getStats(); - } catch (err) { - log.error(`Failed to retrieve stats from "${node}": ${err}`); - continue; - } - - stats = stats.concat(replicaStats); - } - - return stats; - } -} diff --git a/csi/moac/src/volume.ts b/csi/moac/src/volume.ts deleted file mode 100644 index ccf40fcc5..000000000 --- a/csi/moac/src/volume.ts +++ /dev/null @@ -1,1237 +0,0 @@ -// Volume object abstracts user from volume components nexus and -// replicas and implements algorithms for volume recovery. - -import assert from 'assert'; -import events = require('events'); -import * as _ from 'lodash'; -import { grpcCode, GrpcError } from './grpc_client'; -import { Replica } from './replica'; -import { Child, Nexus, Protocol } from './nexus'; -import { Pool } from './pool'; -import { Node } from './node'; -import { Registry } from './registry'; -import { Logger } from './logger'; - -const log = Logger('volume'); - -// If state transition in FSA fails due to an error and there is no consumer -// for the error, we set a retry timer to retry the state transition. -const RETRY_TIMEOUT_MS = 30000; - -type DoneCallback = (err?: Error, res?: unknown) => void; - -// ID of the operation delegated to fsa() to perform. -enum DelegatedOp { - Publish, - Unpublish, - Destroy, -} - -// State of the volume -export enum VolumeState { - Unknown = 'unknown', - Pending = 'pending', - Healthy = 'healthy', - Degraded = 'degraded', - Offline = 'offline', // target (nexus) is down - Faulted = 'faulted', // data cannot be recovered - Destroyed = 'destroyed', // destroy in progress - Error = 'error', // used by the volume operator -} - -export function volumeStateFromString(val: string): VolumeState { - if (val == VolumeState.Healthy) { - return VolumeState.Healthy; - } else if (val == VolumeState.Degraded) { - return VolumeState.Degraded; - } else if (val == VolumeState.Faulted) { - return VolumeState.Faulted; - } else if (val == VolumeState.Offline) { - return VolumeState.Offline; - } else if (val == VolumeState.Destroyed) { - return VolumeState.Destroyed; - } else if (val == VolumeState.Error) { - return VolumeState.Error; - } else if (val == VolumeState.Pending) { - return VolumeState.Pending; - } else { - return VolumeState.Unknown; - } -} - -// Specification describing the desired state of the volume. -export type VolumeSpec = { - // Number of desired replicas. - replicaCount: number, - // If the application should run on the same node as the nexus. - local: boolean, - // Nodes to prefer for scheduling replicas. - // There is one quirk following from k8s implementation of CSI. The first - // node in the list is the node that k8s wants to schedule the app for. - // The ordering of the rest does not have any significance. - preferredNodes: string[], - // Replicas must be on a subset of these nodes. - requiredNodes: string[], - // The volume must have at least this size. - requiredBytes: number, - // The volume should not be bigger than this. - limitBytes: number, - // The share protocol for the nexus. - protocol: Protocol, -}; - -// Abstraction of the volume. It is an abstract object which consists of -// physical entities nexus and replicas. It provides high level methods -// for doing operations on the volume as well as recovery algorithms for -// maintaining desired redundancy. -export class Volume { - // volume spec properties - uuid: string; - spec: VolumeSpec; - // volume status properties - private size: number; - private nexus: Nexus | null; - private replicas: Record; // replicas indexed by node name - public state: VolumeState; - private publishedOn: string | undefined; - // internal properties - private emitter: events.EventEmitter; - private registry: Registry; - private runFsa: number; // number of requests to run FSA - private waiting: Record; // ops waiting for completion - private retry_fsa: NodeJS.Timeout | undefined; - private pendingDestroy: boolean; - - // Construct a volume object with given uuid. - // - // @params uuid ID of the volume. - // @params registry Registry object. - // @params emitEvent Callback that should be called anytime volume state changes. - // @params spec Volume parameters. - // @params [size] Current properties of the volume. - // @params [publishedOn] Node name where this volume is published. - // - constructor( - uuid: string, - registry: Registry, - emitter: events.EventEmitter, - spec: VolumeSpec, - state?: VolumeState, - size?: number, - publishedOn?: string, - ) { - // specification of the volume - this.uuid = uuid; - this.spec = _.clone(spec); - this.registry = registry; - // state variables of the volume - this.size = size || 0; - this.publishedOn = publishedOn; - this.nexus = null; - this.replicas = {}; - this.state = state || VolumeState.Pending; - this.pendingDestroy = false; - // other properties - this.runFsa = 0; - this.emitter = emitter; - this.waiting = > {}; - this.waiting[DelegatedOp.Publish] = []; - this.waiting[DelegatedOp.Unpublish] = []; - this.waiting[DelegatedOp.Destroy] = []; - } - - // Clear the timer on the volume to prevent it from keeping nodejs loop alive. - deactivate() { - this.runFsa = 0; - if (this.retry_fsa) { - clearTimeout(this.retry_fsa); - this.retry_fsa = undefined; - } - } - - // Stringify volume - toString(): string { - return this.uuid; - } - - // Get the size of the volume. - getSize(): number { - return this.size; - } - - // Get the node where the volume is accessible from (that is the node with - // the nexus) or undefined when nexus does not exist (unpublished/published). - getNodeName(): string | undefined { - return this.publishedOn; - } - - // Return volume replicas. - getReplicas(): Replica[] { - return Object.values(this.replicas); - } - - // Return volume nexus. - getNexus(): Nexus | undefined { - return this.nexus || undefined; - } - - // Return whether the volume can still be used and is updatable. - isSpecUpdatable (): boolean { - return ([ - VolumeState.Unknown, - VolumeState.Pending, - VolumeState.Destroyed, - VolumeState.Faulted, - ].indexOf(this.state) < 0); - } - - // Publish the volume. That means, make it accessible through a target. - // - // @params nodeId ID of the node where the volume will be mounted. - // @return uri The URI to access the nexus. - async publish(nodeId: String): Promise { - if ([ - VolumeState.Degraded, - VolumeState.Healthy, - VolumeState.Offline, - ].indexOf(this.state) < 0) { - throw new GrpcError( - grpcCode.INTERNAL, - `Cannot publish "${this}" that is neither healthy, degraded nor offline` - ); - } - - let uri = this.nexus && this.nexus.getUri(); - - let nexusNode = this._desiredNexusNode(this._activeReplicas(), nodeId); - if (!nexusNode) { - // If we get here it means that nexus is supposed to be already published - // but on a node that is not part of the cluster (has been deregistered). - if (!uri) { - throw new GrpcError( - grpcCode.INTERNAL, - `Cannot publish "${this}" because the node does not exist` - ); - } - return uri; - // If the publish has been already done on the desired node then return. - } else if (uri && this.nexus?.node?.name === nexusNode.name) { - return uri; - } - - // Set the new desired state - this.publishedOn = nexusNode.name; - - // Cancel any unpublish that might be in progress - this._delegatedOpCancel([DelegatedOp.Unpublish], new GrpcError( - grpcCode.INTERNAL, - `Volume ${this} has been re-published`, - )); - - let res = await this._delegate(DelegatedOp.Publish); - assert(typeof res === 'string'); - return res; - } - - // Undo publish operation on the volume. - async unpublish() { - // Set the new desired state - this.publishedOn = undefined; - - // If the volume has been already unpublished then return - if (!this.nexus || !this.nexus.getUri()) { - return; - } - - // Cancel any publish that might be in progress - this._delegatedOpCancel([DelegatedOp.Publish], new GrpcError( - grpcCode.INTERNAL, - `Volume ${this} has been unpublished`, - )); - - await this._delegate(DelegatedOp.Unpublish); - } - - // Delete nexus and destroy all replicas of the volume. - async destroy() { - // If the volume is still being created then we cannot change the state - // because fsa would immediately start to act on it. - if (this.state === VolumeState.Pending) { - this.pendingDestroy = true; - await this._delegate(DelegatedOp.Destroy); - return; - } - // Set the new desired state - this.publishedOn = undefined; - this._setState(VolumeState.Destroyed); - - // Cancel all other types of operations that might be in progress - this._delegatedOpCancel([ - DelegatedOp.Publish, - DelegatedOp.Unpublish, - ], new GrpcError( - grpcCode.INTERNAL, - `Volume ${this} has been destroyed`, - )); - - await this._delegate(DelegatedOp.Destroy); - } - - // Trigger the run of FSA. It will always run asynchronously to give caller - // a chance to perform other changes to the volume before everything is - // checked by FSA. If it is already running, it will start again when the - // current run finishes. - // - // Why critical section on fsa? Certain operations done by fsa are async. If - // we allow another process to enter fsa before the async operation is done, - // we risk that the second process repeats exactly the same actions (because - // the state hasn't been fully updated). - fsa() { - if (this.runFsa++ === 0) { - setImmediate(() => { - if (this.retry_fsa) { - clearTimeout(this.retry_fsa); - this.retry_fsa = undefined; - } - this._fsa().finally(() => { - const runAgain = this.runFsa > 1; - this.runFsa = 0; - if (runAgain) this.fsa(); - }); - }) - } - } - - // Implementation of the Finite State Automaton (FSA) that moves the volume - // through the states: degraded, faulted, healthy, ... It tries to reflect - // the desired state as recorded in spec properties and some other internal - // properties that change in response to create, publish, unpublish and - // destroy volume operations. Since these operations delegate their tasks - // onto FSA (not to interfere with other state transitions happening in FSA), - // it is also responsible for notifying delegators when certain state - // transitions complete or fail. - async _fsa() { - // If the volume is being created, FSA should not interfere with the - // creation process. - if (this.state === VolumeState.Pending) { - return; - } - log.debug(`Volume ${this} enters FSA in ${this.state} state`); - - // Destroy all components of the volume if it should be destroyed - if (this.state === VolumeState.Destroyed) { - if (this.nexus) { - try { - await this.nexus.destroy(); - } catch (err) { - this._delegatedOpFailed([ - DelegatedOp.Unpublish, - DelegatedOp.Destroy, - ], new GrpcError( - grpcCode.INTERNAL, - `Failed to destroy nexus ${this.nexus}: ${err}`, - )); - return; - } - } - const promises = Object.values(this.replicas).map((replica) => - replica.destroy() - ); - try { - await Promise.all(promises); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Destroy], new GrpcError( - grpcCode.INTERNAL, - `Failed to destroy a replica of ${this}: ${err}`, - )); - return; - } - try { - await this.registry.getPersistentStore().destroyNexus(this.uuid); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Destroy], new GrpcError( - grpcCode.INTERNAL, - `Failed to destroy entry from the persistent store of ${this}: ${err}`, - )); - return; - } - - this._delegatedOpSuccess(DelegatedOp.Destroy); - if (this.retry_fsa) { - clearTimeout(this.retry_fsa); - this.retry_fsa = undefined; - } - this._changed('del'); - return; - } - - if (this.nexus && !this.publishedOn) { - // Try to unpublish the nexus if it should not be published. - if (this.nexus.getUri()) { - try { - await this.nexus.unpublish(); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Unpublish], new GrpcError( - grpcCode.INTERNAL, - `Cannot unpublish ${this.nexus}: ${err}`, - )); - return; - } - this._delegatedOpSuccess(DelegatedOp.Unpublish); - return; - } else if (this.nexus.isOffline()) { - // The nexus is not used and it is offline so "forget it". - try { - await this.nexus.destroy(); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Unpublish], new GrpcError( - grpcCode.INTERNAL, - `Failed to forget nexus ${this.nexus}: ${err}`, - )); - return; - } - this._delegatedOpSuccess(DelegatedOp.Unpublish); - return; - } - } - - let replicaSet: Replica[] = []; - try { - replicaSet = this._activeReplicas(); - } catch (err) { - this._setState(VolumeState.Faulted); - this._delegatedOpFailed([ DelegatedOp.Publish ], new GrpcError( - grpcCode.INTERNAL, - err.toString(), - )); - // No point in continuing if there isn't a single usable replica. - // We might need to revisit this decision in the future, because nexus - // might have children that we are not aware of. - if (!this.nexus) return; - } - let nexusNode = this._desiredNexusNode(replicaSet); - - // If we don't have a nexus and we should have one then create it - if (!this.nexus) { - if ( - this.publishedOn || - replicaSet.length !== this.spec.replicaCount - ) { - if (nexusNode && nexusNode.isSynced()) { - try { - replicaSet = await this._ensureReplicaShareProtocols(nexusNode, replicaSet); - } catch (err) { - this._setState(VolumeState.Offline); - this._delegatedOpFailed([ DelegatedOp.Publish ], new GrpcError( - grpcCode.INTERNAL, - err.toString(), - )); - return; - } - try { - await this._createNexus(nexusNode, replicaSet); - } catch (err) { - this._setState(VolumeState.Offline); - this._delegatedOpFailed([ DelegatedOp.Publish ], new GrpcError( - grpcCode.INTERNAL, - `Failed to create nexus for ${this} on "${this.publishedOn}": ${err}`, - )); - } - } else { - this._setState(VolumeState.Offline); - this._delegatedOpFailed([ DelegatedOp.Publish ], new GrpcError( - grpcCode.INTERNAL, - `Cannot create nexus for ${this} because "${this.publishedOn}" is down`, - )); - } - } else { - // we have just the right # of replicas and we don't need a nexus - this._setState(VolumeState.Healthy); - } - // fsa will get called again when event about created nexus arrives - return; - } - - if (this.publishedOn && this.nexus.node?.name !== this.publishedOn) { - // Respawn the nexus on the desired node. - log.info(`Recreating the nexus "${this.nexus}" on the desired node "${this.publishedOn}"`); - try { - await this.nexus.destroy(); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Publish], new GrpcError( - grpcCode.INTERNAL, - `Failed to destroy nexus for ${this}: ${err}`, - )); - } - return; - } - if (this.nexus.isOffline()) { - this._setState(VolumeState.Offline); - return; - } - - // From now on the assumption is that the nexus exists and is reachable - assert(nexusNode); - - // Check that the replicas are shared as they should be - try { - replicaSet = await this._ensureReplicaShareProtocols(nexusNode, replicaSet); - } catch (err) { - this._delegatedOpFailed([ - DelegatedOp.Publish, - ], new GrpcError( - grpcCode.INTERNAL, - err.toString(), - )); - return; - } - - // pair nexus children with replica objects to get the full picture - const childReplicaPairs: { ch: Child, r: Replica | undefined }[] = this.nexus.children.map((ch) => { - const r = Object.values(replicaSet).find((r) => r.uri === ch.uri); - return { ch, r }; - }); - // add newly found replicas to the nexus (one by one) - const newReplicas = Object.values(replicaSet).filter((r) => { - return (!r.isOffline() && !childReplicaPairs.find((pair) => pair.r === r)); - }); - for (let i = 0; i < newReplicas.length; i++) { - try { - childReplicaPairs.push({ - ch: await this.nexus.addReplica(newReplicas[i]), - r: newReplicas[i], - }) - } catch (err) { - log.error(err.toString()); - } - } - - // If there is not a single child that is online then there is no hope - // that we could rebuild anything. - var onlineCount = childReplicaPairs - .filter((pair) => pair.ch.state === 'CHILD_ONLINE') - .length; - if (onlineCount === 0) { - this._setState(VolumeState.Faulted); - this._delegatedOpFailed([ - DelegatedOp.Publish, - ], new GrpcError( - grpcCode.INTERNAL, - `The volume ${this} has no healthy replicas` - )); - return; - } - - // publish the nexus if it is not and should be - let uri = this.nexus.getUri(); - if (!uri && this.publishedOn) { - try { - uri = await this.nexus.publish(this.spec.protocol); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Publish], new GrpcError( - grpcCode.INTERNAL, - err.toString(), - )); - return; - } - this._delegatedOpSuccess(DelegatedOp.Publish, uri); - } - - // If we don't have sufficient number of sound replicas (sound means online - // or under rebuild) then add a new one. - var soundCount = childReplicaPairs.filter((pair) => { - return ['CHILD_ONLINE', 'CHILD_DEGRADED'].indexOf(pair.ch.state) >= 0; - }).length; - if (this.spec.replicaCount > soundCount) { - this._setState(VolumeState.Degraded); - // add new replica - try { - await this._createReplicas(this.spec.replicaCount - soundCount); - } catch (err) { - log.error(err.toString()); - } - // The replicas will be added to nexus when the fsa is run next time - // which happens immediately after we exit. - return; - } - - // The condition for later actions is that volume must not be rebuilding or - // waiting for a child add. So check that and return if that's the case. - var rebuildCount = childReplicaPairs - .filter((pair) => pair.ch.state === 'CHILD_DEGRADED') - .length; - if (rebuildCount > 0) { - log.info(`The volume ${this} is rebuilding`); - this._setState(VolumeState.Degraded); - return; - } - - assert(onlineCount >= this.spec.replicaCount); - this._setState(VolumeState.Healthy); - - // If we have more online replicas than we need to, then remove one. - // Child that is broken or without a replica goes first. - let rmPair = childReplicaPairs.find( - (pair) => !pair.r && pair.ch.state === 'CHILD_FAULTED' - ); - if (!rmPair) { - rmPair = childReplicaPairs.find((pair) => pair.ch.state === 'CHILD_FAULTED'); - // Continue searching for a candidate for removal only if there are more - // online replicas than required. - if (!rmPair && onlineCount > this.spec.replicaCount) { - // A child that is unknown to us (without replica object) - rmPair = childReplicaPairs.find((pair) => !pair.r); - if (!rmPair) { - // The replica with the lowest score must go away - const rmReplica = this._prioritizeReplicas( - childReplicaPairs - .map((pair) => pair.r) - .filter((r) => r !== undefined) - ).pop(); - if (rmReplica) { - rmPair = childReplicaPairs.find((pair) => pair.r === rmReplica); - } - } - } - } - if (rmPair) { - try { - await this.nexus.removeReplica(rmPair.ch.uri); - } catch (err) { - log.error(`Failed to remove excessive replica "${rmPair.ch.uri}" from nexus: ${err}`); - return; - } - if (rmPair.r) { - try { - await rmPair.r.destroy(); - } catch (err) { - log.error(`Failed to destroy excessive replica "${rmPair.r}": ${err}`); - } - } - return; - } - - // If a replica should run on a different node then move it - var moveChild = childReplicaPairs.find((pair) => { - if ( - pair.r && - pair.ch.state === 'CHILD_ONLINE' && - this.spec.requiredNodes.length > 0 && - pair.r.pool?.node && - this.spec.requiredNodes.indexOf(pair.r.pool.node.name) < 0 - ) { - if (this.spec.requiredNodes.indexOf(pair.r.pool.node.name) < 0) { - return true; - } - } - return false; - }); - if (moveChild) { - // We add a new replica and the old one will be removed when both are - // online since there will be more of them than needed. We do one by one - // not to trigger too many changes. - try { - await this._createReplicas(1); - } catch (err) { - log.error(`Failed to move replica of the volume ${this}: ${err}`); - } - return; - } - - // Finally if everything is ok and volume isn't published, destroy the - // nexus. Leaving it around eats cpu cycles and induces network traffic - // between nexus and replicas. - if (!this.publishedOn) { - try { - await this.nexus.destroy(); - } catch (err) { - this._delegatedOpFailed([DelegatedOp.Destroy], new GrpcError( - grpcCode.INTERNAL, - `Failed to destroy nexus ${this.nexus}: ${err}`, - )); - } - } - } - - // Wait for the operation that was delegated to FSA to complete (either - // with success or failure). - async _delegate(op: DelegatedOp): Promise { - return new Promise((resolve: (res: unknown) => void, reject: (err: any) => void) => { - this.waiting[op].push((err: any, res: unknown) => { - if (err) { - reject(err); - } else { - resolve(res); - } - }); - this.fsa(); - }); - } - - // A state transition corresponding to certain finished operation on the - // volume has been done. Inform registered consumer about it. - _delegatedOpSuccess(op: DelegatedOp, result?: unknown) { - this.waiting[op] - .splice(0, this.waiting[op].length) - .forEach((cb) => cb(undefined, result)); - } - - // An error has been encountered while making a state transition to desired - // state. Inform registered consumer otherwise it could be waiting for ever - // for the state transition to happen. - // If there is no consumer for the information then log the error and - // schedule a retry for the state transition. - _delegatedOpFailed(ops: DelegatedOp[], err: Error) { - let reported = false; - ops.forEach((op) => { - this.waiting[op] - .splice(0, this.waiting[op].length) - .forEach((cb) => { - reported = true; - cb(err); - }); - }); - if (!reported) { - let msg; - if (err instanceof GrpcError) { - msg = err.message; - } else { - // These are sort of unexpected errors so print a stack trace as well - msg = err.stack; - } - log.error(msg); - this.retry_fsa = setTimeout(this.fsa.bind(this), RETRY_TIMEOUT_MS); - } - } - - // Cancel given operation that is in progress (if any) by unblocking it and - // returning specified error. - _delegatedOpCancel(ops: DelegatedOp[], err: Error) { - ops.forEach((op) => { - this.waiting[op] - .splice(0, this.waiting[op].length) - .forEach((cb) => cb(err)); - }); - } - - // Change the volume state to given state. If the state is not the same as - // previous one, we should emit a volume mod event. - // - // @param newState New state to set on volume. - _setState(newState: VolumeState) { - if (this.state !== newState) { - if (newState === VolumeState.Healthy || newState === VolumeState.Destroyed) { - log.info(`Volume state of "${this}" is ${newState}`); - } else { - log.warn(`Volume state of "${this}" is ${newState}`); - } - this.state = newState; - this._changed(); - } - } - - // Create the volume in accordance with requirements specified during the - // object creation. Create whatever component is missing (note that we - // might not be creating it from the scratch). - // - // NOTE: Until we switch state from "pending" at the end, the volume is not - // acted upon by FSA. That's exactly what we want, because the async events - // produced by this function do not interfere with execution of the "create". - // - // We have to check pending destroy flag after each async step in case that - // someone destroyed the volume before it was fully created. - async create() { - log.debug(`Creating the volume "${this}"`); - - this.attach(); - - // Ensure there is sufficient number of replicas for the volume. - const newReplicaCount = this.spec.replicaCount - Object.keys(this.replicas).length; - if (newReplicaCount > 0) { - // create more replicas if higher replication factor is desired - await this._createReplicas(newReplicaCount); - if (this.pendingDestroy) { - throw new GrpcError( - grpcCode.INTERNAL, - `The volume ${this} was destroyed before it was created`, - ); - } - } - this._setState(VolumeState.Healthy); - log.info(`Volume "${this}" with ${this.spec.replicaCount} replica(s) and size ${this.size} was created`); - } - - // Attach whatever objects belong to the volume and can be found in the - // registry. - attach() { - this.registry.getReplicaSet(this.uuid).forEach((r: Replica) => this.newReplica(r)); - const nexus = this.registry.getNexus(this.uuid); - if (nexus) { - this.newNexus(nexus); - } - } - - // Update child devices of existing nexus or create a new nexus if it does not - // exist. - // - // @param node Node where the nexus should be created. - // @param replicas Replicas that should be used for child bdevs of nexus. - // @returns Created nexus object. - // - async _createNexus(node: Node, replicas: Replica[]): Promise { - if (!this.size) { - // the size will be the smallest replica - this.size = Object.values(replicas) - .map((r) => r.size) - .reduce((acc, cur) => (cur < acc ? cur : acc), Number.MAX_SAFE_INTEGER); - } - - // filter out unhealthy replicas (they don't have the latest data) from the create call - replicas = await this.registry.getPersistentStore().filterReplicas(this.uuid, replicas); - - if (replicas.length == 0) { - // what could we really do in this case? - throw `No healthy children are available so nexus "${this.uuid}" creation is not allowed at this time`; - } else { - return node.createNexus( - this.uuid, - this.size, - Object.values(replicas) - ); - } - } - - // Adjust replica count for the volume to required count. - // - // @param count Number of new replicas to create. - // - async _createReplicas(count: number) { - let pools: Pool[] = this.registry.choosePools( - this.spec.requiredBytes, - this.spec.requiredNodes, - this.spec.preferredNodes - ); - // remove pools that are already used by existing replicas - const usedNodes = Object.keys(this.replicas); - pools = pools.filter((p) => p.node && usedNodes.indexOf(p.node.name) < 0); - if (pools.length < count) { - log.error( - `Not enough suitable pool(s) for volume "${this}" with capacity ` + - `${this.spec.requiredBytes} and replica count ${this.spec.replicaCount}` - ); - throw new GrpcError( - grpcCode.RESOURCE_EXHAUSTED, - `Volume ${this.uuid} with capacity ${this.spec.requiredBytes} requires ${count} storage pool(s). Only ${pools.length} suitable storage pool(s) found.` - ); - } - - // Calculate the size of the volume if not given precisely. - // - // TODO: Size of the smallest pool is a safe choice though too conservative. - if (!this.size) { - this.size = Math.min( - pools.reduce( - (acc, pool) => Math.min(acc, pool.freeBytes()), - Number.MAX_SAFE_INTEGER - ), - this.spec.limitBytes || this.spec.requiredBytes - ); - } - - // For local volumes, local pool should have the max priority. - if (this.spec.local && this.spec.preferredNodes[0]) { - let idx = pools.findIndex((p) => p.node && p.node.name === this.spec.preferredNodes[0]); - if (idx >= 0) { - let localPool = pools.splice(idx, 1)[0]; - pools.unshift(localPool); - } - } - - // We record all failures as we try to create the replica on available - // pools to return them to the user at the end if we ultimately fail. - const errors = []; - const requestedReplicas = count; - // try one pool after another until success - for (let i = 0; i < pools.length && count > 0; i++) { - const pool = pools[i]; - - try { - // this will add the replica to the cache if successful - await pool.createReplica(this.uuid, this.size); - } catch (err) { - log.error(err.message); - errors.push(err.message); - continue; - } - count--; - } - // check if we created enough replicas - if (count > 0) { - let msg = `Failed to create ${count} out of ${requestedReplicas} requested replicas for volume "${this}": `; - msg += errors.join('. '); - throw new GrpcError(grpcCode.INTERNAL, msg); - } - } - - // Get list of replicas for this volume sorted from the most to the - // least preferred. - // - // @returns {object[]} List of replicas sorted by preference (the most first). - // - _prioritizeReplicas(replicas: Replica[]): Replica[] { - // Object.values clones the array so that we don't modify the original value - return Object.values(replicas).sort( - (a, b) => this._scoreReplica(b) - this._scoreReplica(a) - ); - } - - // Assign score to a replica based on certain criteria. The higher the better. - // - // @param {object} replica Replica object. - // @returns {number} Score from 0 to 18. - // - _scoreReplica(replica: Replica) { - let score = 0; - const node = replica.pool?.node; - if (!node) { - return 0; - } - - // The idea is that the sum of less important scores should never overrule - // the more important criteria. - - // criteria #1: must be on the required nodes if set - if ( - this.spec.requiredNodes.length > 0 && - this.spec.requiredNodes.indexOf(node.name) >= 0 - ) { - score += 100; - } - // criteria #2: replica should be online - if (!replica.isOffline()) { - score += 50; - } - // criteria #3: would be nice to run on preferred node - if ( - this.spec.preferredNodes.length > 0 && - this.spec.preferredNodes.indexOf(node.name) >= 0 - ) { - score += 20; - } - // criteria #4: if "local" is set then running on the same node as app is desired - if ( - this.spec.local && - this.spec.preferredNodes.length > 0 && - this.spec.preferredNodes[0] === node.name - ) { - score += 9; - } - // criteria #4: local IO from nexus is certainly an advantage - if (this.nexus && node === this.nexus.node) { - score += 1; - } - - // TODO: Score the replica based on the pool parameters. - // I.e. the replica on a less busy pool would have higher score. - return score; - } - - // Sort replicas according to their value and remove those that aren't online. - _activeReplicas(): Replica[] { - const replicaSet = this - ._prioritizeReplicas(Object.values(this.replicas)) - .filter((r) => !r.isOffline()); - if (replicaSet.length === 0) { - throw new GrpcError( - grpcCode.INTERNAL, - `There are no good replicas for volume "${this}"` - ); - } - return replicaSet; - } - - // Return the node where the nexus for volume is located or where it should - // be located if it hasn't been created so far. If the nexus should be - // located on a node that does not exist then return undefined. - // - // @param replicaSet List of replicas sorted by preferrence. - // @param appNode Name of the node where the volume will be mounted if known. - // @returns Node object or undefined if not schedulable. - // - _desiredNexusNode(replicaSet: Replica[], appNode?: String): Node | undefined { - if (this.publishedOn) { - return this.registry.getNode(this.publishedOn); - } - let nexusNode: Node | undefined; - if (appNode) { - nexusNode = this.registry.getNode(appNode.toString()); - } - if (!nexusNode && this.nexus) { - nexusNode = this.nexus.node; - } - // If nexus does not exist it will be created on one of the replica nodes - // with the least # of nexuses. - if (!nexusNode) { - nexusNode = replicaSet - .filter((r: Replica) => !!(r.pool && r.pool.node)) - .map((r: Replica) => r.pool!.node!) - .sort((a: Node, b: Node) => a.nexus.length - b.nexus.length)[0]; - } - assert(nexusNode); - return nexusNode; - } - - // Share replicas as appropriate to allow access from the nexus. - // It does not throw unless none of the replicas can be shared. - // Returns list of replicas that can be accessed by the nexus. - async _ensureReplicaShareProtocols(nexusNode: Node, replicaSet: Replica[]): Promise { - let accessibleReplicas: Replica[] = []; - - for (let i = 0; i < replicaSet.length; i++) { - const replica: Replica = replicaSet[i]; - if (replica.pool?.node === undefined) continue; - const replicaNode: Node = replica.pool.node; - let share; - const local = replicaNode === nexusNode; - // make sure that replica which is local to the nexus is accessed locally - if (local && replica.share !== 'REPLICA_NONE') { - share = 'REPLICA_NONE'; - } else if (!local && replica.share === 'REPLICA_NONE') { - // make sure that replica which is remote to nexus can be accessed - share = 'REPLICA_NVMF'; - } - if (share) { - try { - await replica.setShare(share); - accessibleReplicas.push(replica); - } catch (err) { - log.error(err.toString()); - } - } else { - accessibleReplicas.push(replica); - } - } - if (accessibleReplicas.length === 0) { - throw new GrpcError( - grpcCode.INTERNAL, - `None of the replicas of ${this} can be accessed by nexus`, - ); - } - return accessibleReplicas; - } - - // Update parameters of the volume. - // - // Throw exception if size of volume is changed in an incompatible way - // (unsupported). - // - // @params {object} spec Volume parameters. - // @params {number} spec.replicaCount Number of desired replicas. - // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. - // @params {string[]} spec.requiredNodes Replicas must be on these nodes. - // @params {number} spec.requiredBytes The volume must have at least this size. - // @params {number} spec.limitBytes The volume should not be bigger than this. - // @params {string} spec.protocol The share protocol for the nexus. - // - update(spec: any) { - var changed = false; - - if (this.size && this.size < spec.requiredBytes) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - `Extending the volume "${this}" is not supported` - ); - } - if (spec.limitBytes && this.size > spec.limitBytes) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - `Shrinking the volume "${this}" is not supported` - ); - } - if (this.spec.protocol !== spec.protocol) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - `Changing the protocol for volume "${this}" is not supported` - ); - } - - if (this.spec.replicaCount !== spec.replicaCount) { - this.spec.replicaCount = spec.replicaCount; - changed = true; - } - if (this.spec.local !== spec.local) { - this.spec.local = spec.local; - changed = true; - } - if (!_.isEqual(this.spec.preferredNodes, spec.preferredNodes)) { - this.spec.preferredNodes = spec.preferredNodes; - changed = true; - } - if (!_.isEqual(this.spec.requiredNodes, spec.requiredNodes)) { - this.spec.requiredNodes = spec.requiredNodes; - changed = true; - } - if (this.spec.requiredBytes !== spec.requiredBytes) { - this.spec.requiredBytes = spec.requiredBytes; - changed = true; - } - if (this.spec.limitBytes !== spec.limitBytes) { - this.spec.limitBytes = spec.limitBytes; - changed = true; - } - if (changed) { - this._changed(); - this.fsa(); - } - } - - // Should be called whenever the volume changes. - // - // @param [eventType] The eventType is either new, mod or del and be default - // we assume "mod" which is the most common emitted event. - _changed(eventType?: string) { - this.emitter.emit('volume', { - eventType: eventType || 'mod', - object: this - }); - } - - // - // Handlers for the events from node registry follow - // - - // Add new replica to the volume. - // - // @param replica New replica object. - newReplica(replica: Replica) { - assert.strictEqual(replica.uuid, this.uuid); - const nodeName = replica.pool?.node?.name; - if (!nodeName) { - log.warn( - `Cannot add replica "${replica}" without a node to the volume` - ); - return; - } - if (this.replicas[nodeName]) { - log.warn( - `Trying to add the same replica "${replica}" to the volume twice` - ); - } else { - log.debug(`Replica "${replica}" attached to the volume`); - this.replicas[nodeName] = replica; - this._changed(); - this.fsa(); - } - } - - // Modify replica in the volume. - // - // @param replica Modified replica object. - modReplica(replica: Replica) { - assert.strictEqual(replica.uuid, this.uuid); - const nodeName = replica.pool?.node?.name; - if (!nodeName) { - log.warn( - `Cannot update volume by replica "${replica}" without a node` - ); - return; - } - if (!this.replicas[nodeName]) { - log.warn(`Modified replica "${replica}" does not belong to the volume`); - } else { - assert(this.replicas[nodeName] === replica); - this._changed(); - // the share protocol or uri could have changed - this.fsa(); - } - } - - // Delete replica in the volume. - // - // @param replica Deleted replica object. - delReplica(replica: Replica) { - assert.strictEqual(replica.uuid, this.uuid); - const nodeName = replica.pool?.node?.name; - if (!nodeName) { - log.warn( - `Cannot delete replica "${replica}" without a node from the volume` - ); - return; - } - if (!this.replicas[nodeName]) { - log.warn(`Deleted replica "${replica}" does not belong to the volume`); - } else { - log.debug(`Replica "${replica}" detached from the volume`); - assert(this.replicas[nodeName] === replica); - delete this.replicas[nodeName]; - this._changed(); - this.fsa(); - } - } - - // Assign nexus to the volume. - // - // @param nexus New nexus object. - newNexus(nexus: Nexus) { - assert.strictEqual(nexus.uuid, this.uuid); - if (!this.nexus) { - // If there is no nexus then accept any. This is to support rebuild when - // volume is not published. - log.debug(`Nexus "${nexus}" attached to the volume`); - this.nexus = nexus; - if (!this.size) this.size = nexus.size; - this._changed(); - this.fsa(); - } else if (this.nexus === nexus) { - log.warn(`Trying to add the same nexus "${nexus}" to the volume twice`); - } else if (!this.publishedOn) { - log.warn(`Trying to add another nexus "${nexus}" to unpublished volume`); - nexus.destroy().catch((err) => { - log.error(`Failed to destroy duplicated nexus ${nexus}: ${err}`); - }); - } else if (this.publishedOn === nexus.node?.name) { - log.warn(`Replacing nexus "${this.nexus}" by "${nexus}" in the volume`); - const oldNexus = this.nexus; - this.nexus = nexus; - oldNexus.destroy().catch((err) => { - log.error(`Failed to destroy stale nexus "${oldNexus}": ${err}`); - }); - } else { - log.warn(`Destroying new nexus "${nexus}" on the wrong node`); - nexus.destroy().catch((err) => { - log.error(`Failed to destroy wrong nexus "${nexus}": ${err}`); - }); - } - } - - // Nexus has been modified. - // - // @param nexus Modified nexus object. - modNexus(nexus: Nexus) { - assert.strictEqual(nexus.uuid, this.uuid); - if (!this.nexus) { - log.warn(`Modified nexus "${nexus}" does not belong to the volume`); - } else if (this.nexus === nexus) { - this._changed(); - this.fsa(); - } - } - - // Delete nexus in the volume. - // - // @param nexus Deleted nexus object. - delNexus(nexus: Nexus) { - assert.strictEqual(nexus.uuid, this.uuid); - if (!this.nexus) { - log.warn(`Deleted nexus "${nexus}" does not belong to the volume`); - } else if (this.nexus === nexus) { - log.debug(`Nexus "${nexus}" detached from the volume`); - assert.strictEqual(this.nexus, nexus); - this.nexus = null; - this._changed(); - this.fsa(); - } else { - // if this is a different nexus than ours, ignore it - } - } -} \ No newline at end of file diff --git a/csi/moac/src/volume_operator.ts b/csi/moac/src/volume_operator.ts deleted file mode 100644 index a8e49e15e..000000000 --- a/csi/moac/src/volume_operator.ts +++ /dev/null @@ -1,503 +0,0 @@ -// Volume operator managing volume k8s custom resources. -// -// Primary motivation for the resource is to provide information about -// existing volumes. Other actions and their consequences follow: -// -// * destroying the resource implies volume destruction (not advisable) -// * creating the resource implies volume import (not advisable) -// * modification of "preferred nodes" property influences scheduling of new replicas -// * modification of "required nodes" property moves the volume to different nodes -// * modification of replica count property changes redundancy of the volume -// -// Volume operator stands between k8s custom resource (CR) describing desired -// state and volume manager reflecting the actual state. It gets new/mod/del -// events from both, from the world of ideas and from the world of material -// things. It's task which is not easy, is to restore harmony between them: -// -// +---------+ new/mod/del +----------+ new/mod/del +-----------+ -// | Volumes +--------------> Operator <---------------+ Watcher | -// +------^--+ ++--------++ +---^-------+ -// | | | | -// | | | | -// +------------------+ +--------------------+ -// create/modify/destroy create/modify/destroy -// -// -// real object event | CR exists | CR does not exist -// ------------------------------------------------------------ -// new | -- | create CR -// mod | modify CR | -- -// del | delete CR | -- -// -// -// CR event | volume exists | volume does not exist -// --------------------------------------------------------------- -// new | modify volume | create volume -// mod | modify volume | -- -// del | delete volume | -- -// - -const yaml = require('js-yaml'); - -import assert from 'assert'; -import * as fs from 'fs'; -import * as _ from 'lodash'; -import * as path from 'path'; -import { - ApiextensionsV1Api, - KubeConfig, -} from '@kubernetes/client-node'; -import { - CustomResource, - CustomResourceCache, - CustomResourceMeta, -} from './watcher'; -import { EventStream } from './event_stream'; -import { protocolFromString } from './nexus'; -import { Replica } from './replica'; -import { Volume } from './volume'; -import { Volumes } from './volumes'; -import { VolumeSpec, VolumeState, volumeStateFromString } from './volume'; -import { Workq } from './workq'; -import { Logger } from './logger'; - -const log = Logger('volume-operator'); - -const RESOURCE_NAME: string = 'mayastorvolume'; -const crdVolume = yaml.load( - fs.readFileSync(path.join(__dirname, '../crds/mayastorvolume.yaml'), 'utf8') -); -// lower-case letters uuid pattern -const uuidRegexp = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}$/; - -// Optional status part in volume resource -export type VolumeStatus = { - size: number, - state: VolumeState, - reason?: string, - targetNodes?: string[], // node name of nexus if the volume is published - replicas: { - node: string, - pool: string, - uri: string, - offline: boolean, - }[], - nexus?: { - node: string, - deviceUri?: string, - state: string, - children: { - uri: string, - state: string, - }[] - } -}; - -// Object defines properties of node resource. -export class VolumeResource extends CustomResource { - apiVersion?: string; - kind?: string; - metadata: CustomResourceMeta; - spec: VolumeSpec; - status?: VolumeStatus; - - constructor(cr: CustomResource) { - super(); - this.apiVersion = cr.apiVersion; - this.kind = cr.kind; - if (cr.metadata?.name === undefined) { - throw new Error('Missing name attribute'); - } - this.metadata = cr.metadata; - if (!cr.metadata.name.match(uuidRegexp)) { - throw new Error(`Invalid UUID`); - } - let spec = cr.spec as any; - if (spec === undefined) { - throw new Error('Missing spec section'); - } - if (!spec.requiredBytes) { - throw new Error('Missing requiredBytes'); - } - this.spec = { - replicaCount: spec.replicaCount || 1, - local: spec.local || false, - preferredNodes: [].concat(spec.preferredNodes || []), - requiredNodes: [].concat(spec.requiredNodes || []), - requiredBytes: spec.requiredBytes, - limitBytes: spec.limitBytes || 0, - protocol: protocolFromString(spec.protocol) - }; - let status = cr.status as any; - if (status !== undefined) { - this.status = { - size: status.size || 0, - state: volumeStateFromString(status.state), - // sort the replicas according to node name to have deterministic order - replicas: [] - .concat(status.replicas || []) - .sort((a: any, b: any) => a.node.localeCompare(b.node)), - }; - if (status.targetNodes && status.targetNodes.length > 0) { - this.status.targetNodes = [].concat(status.targetNodes).sort(); - } - if (status.nexus) { - this.status.nexus = status.nexus; - } - } - } - - getUuid(): string { - let uuid = this.metadata.name; - if (uuid === undefined) { - throw new Error('Volume resource without UUID'); - } else { - return uuid; - } - } -} - -// Volume operator managing volume k8s custom resources. -export class VolumeOperator { - namespace: string; - volumes: Volumes; // Volume manager - eventStream: any; // A stream of node, replica and nexus events. - watcher: CustomResourceCache; // volume resource watcher. - workq: Workq; // Events from k8s are serialized so that we don't flood moac by - // concurrent changes to volumes. - - // Create volume operator object. - // - // @param namespace Namespace the operator should operate on. - // @param kubeConfig KubeConfig. - // @param volumes Volume manager. - // @param [idleTimeout] Timeout for restarting watcher connection when idle. - constructor ( - namespace: string, - kubeConfig: KubeConfig, - volumes: Volumes, - idleTimeout: number | undefined, - ) { - this.namespace = namespace; - this.volumes = volumes; - this.eventStream = null; - this.workq = new Workq('mayastorvolume'); - this.watcher = new CustomResourceCache( - this.namespace, - RESOURCE_NAME, - kubeConfig, - VolumeResource, - { idleTimeout } - ); - } - - // Create volume CRD if it doesn't exist. - // - // @param kubeConfig KubeConfig. - async init (kubeConfig: KubeConfig) { - log.info('Initializing volume operator'); - let k8sExtApi = kubeConfig.makeApiClient(ApiextensionsV1Api); - try { - await k8sExtApi.createCustomResourceDefinition(crdVolume); - log.info(`Created CRD ${RESOURCE_NAME}`); - } catch (err) { - // API returns a 409 Conflict if CRD already exists. - if (err.statusCode !== 409) throw err; - } - } - - // Start volume operator's watcher loop. - // - // NOTE: Not getting the start sequence right can have catastrophic - // consequence leading to unintended volume destruction and data loss. - // - async start () { - var self = this; - - // install event handlers to follow changes to resources. - this._bindWatcher(this.watcher); - await this.watcher.start(); - - // This will start async processing of volume events. - this.eventStream = new EventStream({ volumes: this.volumes }); - this.eventStream.on('data', async (ev: any) => { - // the only kind of event that comes from the volumes source - assert(ev.kind === 'volume'); - self.workq.push(ev, self._onVolumeEvent.bind(self)); - }); - } - - async _onVolumeEvent (ev: any) { - const uuid = ev.object.uuid; - - if (ev.eventType === 'new' || ev.eventType === 'mod') { - const origObj = this.watcher.get(uuid); - const spec = ev.object.spec; - const status = this._volumeToStatus(ev.object); - - if (origObj !== undefined) { - await this._updateSpec(uuid, origObj, spec); - } else if (ev.eventType === 'new') { - try { - await this._createResource(uuid, spec); - } catch (err) { - log.error(`Failed to create volume resource "${uuid}": ${err}`); - return; - } - } - await this._updateStatus(uuid, status); - } else if (ev.eventType === 'del') { - await this._deleteResource(uuid); - } else { - assert(false); - } - } - - // Transform volume to status properties used in k8s volume resource. - // - // @param volume Volume object. - // @returns Status properties. - // - _volumeToStatus (volume: Volume): VolumeStatus { - const st: VolumeStatus = { - size: volume.getSize(), - state: volume.state, - replicas: volume.getReplicas() - // ignore replicas that are being removed (disassociated from node) - .filter((r: Replica) => !!r.pool?.node) - .map((r: Replica) => { - return { - node: r.pool!.node!.name, - pool: r.pool!.name, - uri: r.uri, - offline: r.isOffline() - }; - }) - // enforce consistent order - important when comparing status objects - .sort((r1, r2) => r1.node.localeCompare(r2.node)) - }; - const nodeName = volume.getNodeName(); - if (nodeName) { - // NOTE: sort it when we have more than just one entry - st.targetNodes = [ nodeName ]; - } - const nexus = volume.getNexus(); - if (nexus && nexus.node) { - st.nexus = { - node: nexus.node.name, - state: nexus.state, - children: nexus.children.map((ch: any) => { - return { - uri: ch.uri, - state: ch.state - }; - }) - }; - if (nexus.deviceUri) { - st.nexus.deviceUri = nexus.deviceUri; - } - } - return st; - } - - // Create k8s CRD object. - // - // @param uuid ID of the created volume. - // @param spec New volume spec. - // - async _createResource (uuid: string, spec: VolumeSpec) { - await this.watcher.create({ - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorVolume', - metadata: { - name: uuid, - namespace: this.namespace - }, - spec - }); - } - - // Update properties of k8s CRD object or create it if it does not exist. - // - // @param uuid ID of the updated volume. - // @param origObj Existing k8s resource object. - // @param spec New volume spec. - // - async _updateSpec (uuid: string, origObj: VolumeResource, spec: VolumeSpec) { - try { - await this.watcher.update(uuid, (orig: VolumeResource) => { - // Update object only if it has really changed - if (_.isEqual(origObj.spec, spec)) { - return; - } - log.info(`Updating spec of volume resource "${uuid}"`); - return { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorVolume', - metadata: orig.metadata, - spec, - }; - }); - } catch (err) { - log.error(`Failed to update volume resource "${uuid}": ${err}`); - return; - } - } - - // Update status of the volume based on real data obtained from storage node. - // - // @param uuid UUID of the resource. - // @param status Status properties. - // - async _updateStatus (uuid: string, status: VolumeStatus) { - try { - await this.watcher.updateStatus(uuid, (orig: VolumeResource) => { - if (_.isEqual(orig.status, status)) { - // avoid unnecessary status updates - return; - } - log.debug(`Updating status of volume resource "${uuid}"`); - // merge old and new properties - return { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: orig.metadata, - spec: orig.spec, - status, - }; - }); - } catch (err) { - log.error(`Failed to update status of volume resource "${uuid}": ${err}`); - } - } - - // Set state and reason not touching the other status fields. - async _updateState (uuid: string, state: VolumeState, reason: string) { - try { - await this.watcher.updateStatus(uuid, (orig: VolumeResource) => { - if (orig.status?.state === state && orig.status?.reason === reason) { - // avoid unnecessary status updates - return; - } - log.debug(`Updating state of volume resource "${uuid}"`); - // merge old and new properties - let newStatus = _.assign({}, orig.status, { state, reason }); - return { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: orig.metadata, - spec: orig.spec, - status: newStatus, - }; - }); - } catch (err) { - log.error(`Failed to update status of volume resource "${uuid}": ${err}`); - } - } - - // Delete volume resource with specified uuid. - // - // @param uuid UUID of the volume resource to delete. - // - async _deleteResource (uuid: string) { - try { - log.info(`Deleting volume resource "${uuid}"`); - await this.watcher.delete(uuid); - } catch (err) { - log.error(`Failed to delete volume resource "${uuid}": ${err}`); - } - } - - // Stop listening for watcher and node events and reset the cache - async stop () { - this.watcher.stop(); - this.watcher.removeAllListeners(); - if (this.eventStream) { - this.eventStream.destroy(); - this.eventStream = null; - } - } - - // Bind watcher's new/mod/del events to volume operator's callbacks. - // - // @param watcher k8s volume resource cache. - // - _bindWatcher (watcher: CustomResourceCache) { - watcher.on('new', (obj: VolumeResource) => { - this.workq.push(obj, this._importVolume.bind(this)); - }); - watcher.on('mod', (obj: VolumeResource) => { - this.workq.push(obj, this._modifyVolume.bind(this)); - }); - watcher.on('del', (obj: VolumeResource) => { - this.workq.push(obj.metadata.name!, this._destroyVolume.bind(this)); - }); - } - - // When moac restarts the volume manager does not know which volumes exist. - // We need to import volumes based on the k8s resources. - // - // @param resource Volume resource properties. - // - async _importVolume (resource: VolumeResource) { - const uuid = resource.getUuid(); - - log.debug(`Importing volume "${uuid}" in response to "new" resource event`); - try { - this.volumes.importVolume(uuid, resource.spec, resource.status); - } catch (err) { - log.error( - `Failed to import volume "${uuid}" based on new resource: ${err}` - ); - await this._updateState(uuid, VolumeState.Error, err.toString()); - } - } - - // Modify volume according to the specification. - // - // @param resource Volume resource properties. - // - async _modifyVolume (resource: VolumeResource) { - const uuid = resource.getUuid(); - const volume = this.volumes.get(uuid); - - if (!volume) { - log.warn( - `Volume resource "${uuid}" was modified but the volume does not exist` - ); - return; - } - try { - volume.update(resource.spec); - } catch (err) { - log.error(`Failed to update volume "${uuid}" based on resource: ${err}`); - } - } - - // Remove the volume from internal state and if it exists destroy it. - // - // @param uuid ID of the volume to destroy. - // - async _destroyVolume (uuid: string) { - const volume = this.volumes.get(uuid); - if (!volume) { - log.warn( - `Volume resource "${uuid}" was deleted but the volume does not exist` - ); - return; - } else if (volume.state === VolumeState.Destroyed) { - log.warn(`Destruction of volume "${uuid}" is already in progress`); - return; - } - - log.debug( - `Destroying volume "${uuid}" in response to "del" resource event` - ); - - try { - await this.volumes.destroyVolume(uuid); - } catch (err) { - log.error(`Failed to destroy volume "${uuid}": ${err}`); - } - } -} diff --git a/csi/moac/src/volumes.ts b/csi/moac/src/volumes.ts deleted file mode 100644 index 72123222c..000000000 --- a/csi/moac/src/volumes.ts +++ /dev/null @@ -1,223 +0,0 @@ -// Volume manager implementation. - -import events = require('events'); -import { grpcCode, GrpcError } from './grpc_client'; -import { Volume, VolumeSpec, VolumeState } from './volume'; -import { Workq } from './workq'; -import { VolumeStatus } from './volume_operator'; -import { EventStream } from './event_stream'; -import { Logger } from './logger'; - -const log = Logger('volumes'); - -// Type used in "create volume" workq -type CreateArgs = { - uuid: string; - spec: VolumeSpec; -} - -// Volume manager that emit events for new/modified/deleted volumes. -export class Volumes extends events.EventEmitter { - private registry: any; - private events: any; // stream of events from registry - private volumes: Record; // volumes indexed by uuid - - constructor (registry: any) { - super(); - this.registry = registry; - this.events = null; - this.volumes = {}; - } - - start() { - const self = this; - this.events = new EventStream({ registry: this.registry }); - this.events.on('data', async function (ev: any) { - if (ev.kind === 'pool' && ev.eventType === 'new') { - // New pool was added and perhaps we have volumes waiting to schedule - // their replicas on it. - Object.values(self.volumes) - .filter((v) => v.state === VolumeState.Degraded) - .forEach((v) => v.fsa()); - } else if (ev.kind === 'replica' || ev.kind === 'nexus') { - const uuid: string = ev.object.uuid; - const volume = self.volumes[uuid]; - if (!volume) { - // Ignore events for volumes that do not exist. Those might be events - // related to a volume that is being destroyed. - log.debug(`${ev.eventType} event for unknown volume "${uuid}"`); - return; - } - if (ev.kind === 'replica') { - if (ev.eventType === 'new') { - volume.newReplica(ev.object); - } else if (ev.eventType === 'mod') { - volume.modReplica(ev.object); - } else if (ev.eventType === 'del') { - volume.delReplica(ev.object); - } - } else if (ev.kind === 'nexus') { - if (ev.eventType === 'new') { - volume.newNexus(ev.object); - } else if (ev.eventType === 'mod') { - volume.modNexus(ev.object); - } else if (ev.eventType === 'del') { - volume.delNexus(ev.object); - } - } - } else if (ev.kind === 'node' && ev.object.isSynced()) { - // Create nexus for volumes that should have one on the node - Object.values(self.volumes) - .filter((v) => v.getNodeName() === ev.object.name) - .forEach((v) => v.fsa()); - } - }); - } - - stop() { - this.events.destroy(); - this.events.removeAllListeners(); - this.events = null; - Object.values(this.volumes).forEach((vol) => { - vol.deactivate(); - }) - this.volumes = {}; - } - - // Return a volume with specified uuid. - // - // @param uuid ID of the volume. - // @returns Matching volume or undefined if not found. - // - get(uuid: string): Volume | undefined { - return this.volumes[uuid]; - } - - // Return all volumes. - list(): Volume[] { - return Object.values(this.volumes); - } - - // Create volume object (just the object) and add it to the internal list - // of volumes. The method is idempotent. If a volume with the same uuid - // already exists, then update its parameters. - // - // @param {string} uuid ID of the volume. - // @param {object} spec Properties of the volume. - // @params {number} spec.replicaCount Number of desired replicas. - // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. - // @params {string[]} spec.requiredNodes Replicas must be on these nodes. - // @params {number} spec.requiredBytes The volume must have at least this size. - // @params {number} spec.limitBytes The volume should not be bigger than this. - // @params {string} spec.protocol The share protocol for the nexus. - // @returns {object} New volume object. - async createVolume(uuid: string, spec: VolumeSpec): Promise { - if (!spec.requiredBytes || spec.requiredBytes < 0) { - throw new GrpcError( - grpcCode.INVALID_ARGUMENT, - 'Required bytes must be greater than zero' - ); - } - let volume = this.volumes[uuid]; - if (volume) { - if (volume.isSpecUpdatable()) - volume.update(spec); - else { - // note: if the volume is destroyed but still in the list, it may never get deleted again and so - // subsequent calls to create volume will keep failing. - log.error(`Failing createVolume for volume ${uuid} because its state is "${volume.state}"`); - throw new GrpcError( - grpcCode.UNAVAILABLE, - `Volume cannot be updated, its state is "${volume.state}"` - ); - } - } else { - // The volume starts to exist before it is created because we must receive - // events for it and we want to show to user that it is being created. - this.volumes[uuid] = new Volume(uuid, this.registry, this, spec); - volume = this.volumes[uuid]; - this.emit('volume', { - eventType: 'new', - object: volume - }); - - try { - await volume.create(); - } catch (err) { - // Undo the pending state and whatever has been created - volume.state = VolumeState.Unknown; - try { - this.destroyVolume(uuid); - } catch (err) { - log.error(`Failed to destroy "${volume}": ${err}`); - } - throw err; - } - volume.fsa(); - } - return volume; - } - - // Destroy the volume. - // - // The method is idempotent - if the volume does not exist it does not return - // an error. - // - // @param uuid ID of the volume. - // - async destroyVolume(uuid: string) { - const volume = this.volumes[uuid]; - if (!volume) return; - - await volume.destroy(); - volume.deactivate(); - delete this.volumes[uuid]; - } - - // Import the volume object (just the object) and add it to the internal list - // of volumes. The method is idempotent. If a volume with the same uuid - // already exists, then update its parameters. - // - // @param {string} uuid ID of the volume. - // @param {object} spec Properties of the volume. - // @params {number} spec.replicaCount Number of desired replicas. - // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. - // @params {string[]} spec.requiredNodes Replicas must be on these nodes. - // @params {number} spec.requiredBytes The volume must have at least this size. - // @params {number} spec.limitBytes The volume should not be bigger than this. - // @params {string} spec.protocol The share protocol for the nexus. - // @params {object} status Current properties of the volume - // @params {string} status.state Last known state of the volume. - // @params {number} status.size Size of the volume. - // @params {string} status.targetNodes Node(s) where the volume is published. - // @returns {object} New volume object. - // - importVolume(uuid: string, spec: VolumeSpec, status?: VolumeStatus): Volume { - let volume = this.volumes[uuid]; - - if (!volume) { - let state = status?.state; - let size = status?.size; - // We don't support multiple nexuses yet so take the first one - let publishedOn = (status?.targetNodes || []).pop(); - // If for some strange reason the status is "pending" change it to unknown - // because fsa would refuse to act on it otherwise. - if (!state || state === VolumeState.Pending) { - state = VolumeState.Unknown; - } - this.volumes[uuid] = new Volume( - uuid, - this.registry, - this, - spec, - state, - size, - publishedOn, - ); - volume = this.volumes[uuid]; - volume.attach(); - volume.fsa(); - } - return volume; - } -} \ No newline at end of file diff --git a/csi/moac/src/watcher.ts b/csi/moac/src/watcher.ts deleted file mode 100644 index 6baa7ce7a..000000000 --- a/csi/moac/src/watcher.ts +++ /dev/null @@ -1,569 +0,0 @@ -// Implementation of a cache for arbitrary k8s custom resource in openebs.io -// api with v1alpha1 version. - -import assert from 'assert'; -import * as _ from 'lodash'; -import events = require('events'); -import { - CustomObjectsApi, - HttpError, - KubeConfig, - KubernetesObject, - KubernetesListObject, - ListWatch, - V1ListMeta, - Watch, -} from '@kubernetes/client-node'; -import { Logger } from './logger'; - -const log = Logger('watcher'); - -// If listWatch errors out then we restart it after this many msecs. -const RESTART_DELAY: number = 3000; -// We wait this many msecs for an event confirming operation done previously. -const EVENT_TIMEOUT: number = 5000; -const GROUP: string = 'openebs.io'; -const VERSION: string = 'v1alpha1'; - -// Errors generated by api requests are hopelessly useless. We need to add -// a text from http body to them. -function bodyError(prefix: string, error: any): any { - if (error instanceof HttpError) { - error.message = prefix + ': ' + error.body.message; - } else { - error.message = prefix + ': ' + error.message; - } - return error; -} - -// Commonly used metadata attributes. -export class CustomResourceMeta extends V1ListMeta { - name?: string; - namespace?: string; - generation?: number; - finalizers?: string[]; -} - -// Properties of custom resources (all optional so that we can do easy -// conversion from "object" type) -export class CustomResource implements KubernetesObject { - apiVersion?: string; - kind?: string; - metadata?: CustomResourceMeta; - spec?: object; - status?: any; -} - -class TimeoutError extends Error { - constructor() { - super(); - } -} - -// Utility class for wrapping asynchronous operations that once done, need to be -// confirmed by something from outside (i.e. watcher event). If confirmation does -// not arrive on time, then end the operation regardless and let user know. -class ConfirmOp { - private id: string; - private timer: NodeJS.Timeout | null; - private timeout: number; - private since: number; - private confirmed: boolean; - private done: boolean; - private resolve?: () => void; - private reject?: (err: any) => void; - - constructor(id: string, timeout: number) { - this.id = id; - this.timeout = timeout; - this.since = 0; - this.timer = null; - this.confirmed = false; - this.done = false; - } - - run(action: () => Promise): Promise { - this.since = (new Date()).getTime(); - if (this.timeout <= 0) { - this.confirmed = true; - } - return new Promise((resolve, reject) => { - this.resolve = resolve; - this.reject = reject; - action() - .then(() => { - this.done = true; - if (!this.confirmed) { - this.timer = setTimeout(() => { - const delta = (new Date()).getTime() - this.since; - log.warn(`Timed out waiting for watcher event on "${this.id}" (${delta}ms)`); - this.timer = null; - reject(new TimeoutError()); - }, this.timeout); - } else { - this._complete(); - } - }) - .catch((err) => { - this.done = true; - this._complete(err); - }); - }); - } - - // Beware that confirm can come before the operation done callback! - confirm() { - this.confirmed = true; - if (this.timeout > 0) { - this._complete(); - } - } - - _complete(err?: any) { - if (!err && (!this.confirmed || !this.done)) return; - - const delta = (new Date()).getTime() - this.since; - log.trace(`The operation on "${this.id}" took ${delta}ms`); - if (this.timer) { - clearTimeout(this.timer); - } - if (err) { - this.reject!(err); - } else { - this.resolve!(); - } - } -} - -// Resource cache keeps track of a k8s custom resource and exposes methods -// for modifying the cache content. -// -// It is a classic operator loop design as seen in i.e. operator-sdk (golang) -// to watch a k8s resource. We utilize k8s client library to take care of low -// level details. -// -// It is a general implementation of watcher which can be used for any resource -// operator. The operator should subscribe to "new", "mod" and "del" events that -// are triggered when a resource is added, modified or deleted. -export class CustomResourceCache extends events.EventEmitter { - name: string; - plural: string; - namespace: string; - waiting: Record; - k8sApi: CustomObjectsApi; - listWatch: ListWatch; - creator: new (obj: CustomResource) => T; - eventHandlers: Record void>; - onErrorCb: (err: any) => void; - connected: boolean; - restartDelay: number; - idleTimeout: number; - eventTimeout: number; - timer: any; - - // Create the cache for given namespace and resource name. - // - // @param namespace Namespace of custom resource. - // @param name Name of the resource. - // @param kubeConfig Kube config object. - // @param creator Constructor of the object from custom resource object. - // @param opts Cache/watcher options. - constructor( - namespace: string, - name: string, - kubeConfig: KubeConfig, - creator: new (obj: CustomResource) => T, - opts?: { - restartDelay?: number, - eventTimeout?: number, - idleTimeout?: number - } - ) { - super(); - this.k8sApi = kubeConfig.makeApiClient(CustomObjectsApi); - this.name = name; - this.plural = name + 's'; - this.namespace = namespace; - this.creator = creator; - this.waiting = {}; - this.connected = false; - this.restartDelay = opts?.restartDelay || RESTART_DELAY; - this.eventTimeout = opts?.eventTimeout || EVENT_TIMEOUT; - this.idleTimeout = opts?.idleTimeout || 0; - this.eventHandlers = { - error: this._onError.bind(this), - add: this._onEvent.bind(this, 'new'), - update: this._onEvent.bind(this, 'mod'), - delete: this._onEvent.bind(this, 'del'), - }; - this.onErrorCb = (err) => this._onError(err); - - const watch = new Watch(kubeConfig); - this.listWatch = new ListWatch( - `/apis/${GROUP}/${VERSION}/namespaces/${this.namespace}/${this.plural}`, - watch, - async () => { - var resp = await this.k8sApi.listNamespacedCustomObject( - GROUP, - VERSION, - this.namespace, - this.plural); - return { - response: resp.response, - body: resp.body as KubernetesListObject, - }; - }, - false - ); - } - - // Clear idle/restart timer. - _clearTimer() { - if (this.timer) { - clearTimeout(this.timer); - this.timer = undefined; - } - } - // Install a timer that restarts watcher if idle for more than x seconds. - // On Azure AKS we have observed watcher connections that don't get any - // events after some time when idle. - _setIdleTimeout() { - if (this.idleTimeout > 0) { - this._clearTimer(); - this.timer = setTimeout(() => { - this.stop(); - this.start(); - }, this.idleTimeout); - } - } - - // Called upon a watcher event. It unblocks create or update operation if any - // is waiting for the event and propagates the event further. - _onEvent(event: string, cr: CustomResource) { - let name = cr.metadata?.name; - if (name === undefined) { - log.error(`Ignoring event ${event} with object without a name`); - return; - } - log.trace(`Received watcher event ${event} for ${this.name} "${name}": ${JSON.stringify(cr)}`); - this._setIdleTimeout(); - let confirmOp = this.waiting[name]; - if (confirmOp) { - confirmOp.confirm(); - } - this._doWithObject(cr, (obj) => this.emit(event, obj)); - } - - // Convert custom resource object to desired object swallowing exceptions - // and call callback with the new object. - _doWithObject(obj: CustomResource | undefined, cb: (obj: T) => void): void { - if (obj === undefined) return; - - try { - var newObj = new this.creator(obj); - } catch (e) { - log.error(`Ignoring invalid ${this.name} custom resource: ${e}`); - return; - } - cb(newObj); - } - - // This method does not return until the cache is successfully populated. - // That means that the promise eventually always fulfills (resolves). - start(): Promise { - for (let evName in this.eventHandlers) { - this.listWatch.on(evName, this.eventHandlers[evName]); - } - return this.listWatch.start() - .then(() => { - // k8s client library has a bug/feature that on-error cb is called right - // after the client starts, so we must register it after it starts. - this.listWatch.on('error', this.onErrorCb); - - this.connected = true; - log.debug(`${this.name} watcher with ${this.listWatch.list().length} objects was started`); - log.trace(`Initial content of the "${this.name}" cache: ` + - this.listWatch.list().map((i: CustomResource) => i.metadata?.name)); - this._setIdleTimeout(); - }) - .catch((err) => { - log.error(`Failed to start ${this.name} watcher: ${err}`) - this.stop(); - log.info(`Restart ${this.name} watcher after ${this.restartDelay}ms...`); - return new Promise((resolve, reject) => { - this.timer = setTimeout(() => { - this.start().then(resolve, reject); - }, this.restartDelay); - }); - }); - } - - // Called when the connection breaks. - _onError(err: any) { - // do not print the error if it was us who terminated the watcher - if (this.connected) { - log.error(`Watcher error: ${err}`); - this.stop(); - } - log.info(`Restarting ${this.name} watcher after ${this.restartDelay}ms...`); - this.timer = setTimeout(() => this.start(), this.restartDelay); - } - - // Deregister all internal event handlers on the watcher. - stop() { - log.debug(`Deregistering "${this.name}" cache event handlers`); - for (let evName in this.eventHandlers) { - this.listWatch.off(evName, this.eventHandlers[evName]); - } - this.listWatch.off('error', this.onErrorCb); - - if (this.connected) { - this._clearTimer(); - this.connected = false; - this.listWatch.stop(); - } - } - - isConnected(): boolean { - // should we propagate event to consumers about the reset? - return this.connected; - } - - // Get all objects from the cache. - list(): T[] { - let list: T[] = []; - this.listWatch.list().forEach((item) => { - this._doWithObject(item, (obj) => list.push(obj)); - }); - return list; - } - - // Get object with given name (ID). - get(name: string): T | undefined { - var result; - this._doWithObject(this.listWatch.get(name), (obj) => result = obj); - return result; - } - - // Execute the action and do not return until we receive an event from watcher. - // Otherwise the object in the cache might be stale when we do the next - // modification to it. Set timeout for the case when we never receive the - // event and restart the watcher to get fresh content in that case. - async _waitForEvent(name: string, action: () => Promise) { - this.waiting[name] = new ConfirmOp(name, this.eventTimeout); - try { - await this.waiting[name].run(action); - } catch (err) { - delete this.waiting[name]; - if (err instanceof TimeoutError) { - // restart the cache - this.stop(); - await this.start(); - } else { - throw err; - } - } - } - - // Create the resource and wait for it to be created. - async create(obj: CustomResource) { - let name: string = obj.metadata?.name || ''; - if (!name) { - throw Error("Object does not have a name"); - } - log.trace(`Creating new "${this.name}" resource: ${JSON.stringify(obj)}`); - await this._waitForEvent( - name, - async () => { - try { - await this.k8sApi.createNamespacedCustomObject( - GROUP, - VERSION, - this.namespace, - this.plural, - obj - ); - } catch (err) { - throw bodyError(`Delete of ${this.name} "${name}" failed`, err); - } - } - ); - } - - // Update the resource. The merge callback takes the original version from - // the cache, modifies it and returns the new version of object. The reason - // for this is that sometimes we get stale errors and we must repeat - // the operation with an updated version of the original object. - async update(name: string, merge: (orig: T) => CustomResource | undefined) { - await this._update(name, () => { - let orig = this.get(name); - if (orig === undefined) { - log.warn(`Tried to update ${this.name} "${name}" that does not exist`); - return; - } - return merge(orig); - }); - } - - // Same as above but works with custom resource type rather than user - // defined object. - async _updateCustomResource(name: string, merge: (orig: CustomResource) => CustomResource | undefined) { - await this._update(name, () => { - let orig = this.listWatch.get(name); - if (orig === undefined) { - log.warn(`Tried to update ${this.name} "${name}" that does not exist`); - return; - } - return merge(orig); - }); - } - - // Update the resource and wait for mod event. If update fails due to an error - // we restart the watcher and retry the operation. If event does not come, - // we restart the watcher. - async _update( - name: string, - getAndMerge: () => CustomResource | undefined, - ) { - for (let retries = 1; retries >= 0; retries -= 1) { - let obj = getAndMerge(); - if (obj === undefined) { - // likely means that the props are the same - nothing to do - return; - } - log.trace(`Updating ${this.name} "${name}": ${JSON.stringify(obj)}`); - try { - await this._waitForEvent( - name, - async () => { - await this.k8sApi.replaceNamespacedCustomObject( - GROUP, - VERSION, - this.namespace, - this.plural, - name, - obj! - ); - } - ); - break; - } catch (err) { - err = bodyError(`Update of ${this.name} "${name}" failed`, err); - if (retries == 0) { - throw err; - } - log.warn(`${err} (retrying ...)`); - this.stop(); - await this.start(); - } - } - } - - // Update status of the resource. Unlike in case create/update we don't have - // to wait for confirming event because generation number is not incremented - // upon status change. - async updateStatus(name: string, merge: (orig: T) => CustomResource | undefined) { - for (let retries = 1; retries >= 0; retries -= 1) { - let orig = this.get(name); - if (orig === undefined) { - log.warn(`Tried to update status of ${this.name} "${name}" but it is gone`); - return; - } - let obj = merge(orig); - if (obj === undefined) { - // likely means that the props are the same - nothing to do - return; - } - log.trace(`Updating status of ${this.name} "${name}": ${JSON.stringify(obj.status)}`); - try { - await this._waitForEvent( - name, - async () => { - await this.k8sApi.replaceNamespacedCustomObjectStatus( - GROUP, - VERSION, - this.namespace, - this.plural, - name, - obj! - ); - } - ); - break; - } catch (err) { - err = bodyError(`Status update of ${this.name} "${name}" failed`, err); - if (retries == 0) { - throw err; - } - log.warn(`${err} (retrying ...)`); - this.stop(); - await this.start(); - } - } - } - - // Delete the resource. - async delete(name: string) { - let orig = this.get(name); - if (orig === undefined) { - log.warn(`Tried to delete ${this.name} "${name}" that does not exist`); - return new Promise((resolve) => resolve(undefined)); - } - log.trace(`Deleting ${this.name} "${name}"`); - await this._waitForEvent( - name, - async () => { - try { - await this.k8sApi.deleteNamespacedCustomObject( - GROUP, - VERSION, - this.namespace, - this.plural, - name - ); - } catch (err) { - throw bodyError(`Delete of ${this.name} "${name}" failed`, err); - } - } - ); - } - - // Add finalizer to given resource if not already there. - async addFinalizer(name: string, finalizer: string) { - await this._updateCustomResource(name, (orig) => { - let finalizers = orig.metadata?.finalizers; - let newFinalizers = finalizers || []; - if (newFinalizers.indexOf(finalizer) >= 0) { - // it's already there - return; - } - newFinalizers = [finalizer].concat(newFinalizers); - let obj = _.cloneDeep(orig); - if (obj.metadata === undefined) { - throw new Error(`Resource ${this.name} "${name}" without metadata`) - } - obj.metadata.finalizers = newFinalizers; - return obj; - }); - } - - // Remove finalizer from the resource in case it's there. - async removeFinalizer(name: string, finalizer: string) { - await this._updateCustomResource(name, (orig) => { - let finalizers = orig.metadata?.finalizers; - let newFinalizers = finalizers || []; - let idx = newFinalizers.indexOf(finalizer); - if (idx < 0) { - // it's not there - return; - } - newFinalizers.splice(idx, 1); - let obj = _.cloneDeep(orig); - if (obj.metadata === undefined) { - throw new Error(`Resource ${this.name} "${name}" without metadata`) - } - obj.metadata.finalizers = newFinalizers; - return obj; - }); - } -} diff --git a/csi/moac/src/workq.ts b/csi/moac/src/workq.ts deleted file mode 100644 index 6b2cc0b55..000000000 --- a/csi/moac/src/workq.ts +++ /dev/null @@ -1,74 +0,0 @@ - -import assert from 'assert'; -import { Logger } from './logger'; - -const log = Logger('workq'); - -type Task = { - func: (arg: A) => Promise; - arg: A; - resolveCb: (res: R) => void; - rejectCb: (err: any) => void; -} - -// Implementation of a simple work queue which takes a task, puts it to the -// queue and processes the task when all other tasks that were queued before -// have completed. This is useful if the task consists of async steps and one -// wants to be sure that at any given time only one task is being processed -// not to interfere with the other tasks. -export class Workq { - private name: string; - private queue: Task[]; - private inprog: boolean; - - constructor (name?: string) { - this.name = name || ''; - this.queue = []; - this.inprog = false; - } - - // Put a task to the queue for processing. - // - // Since the method is async the caller can decide if she wants to block - // waiting until the task is processed or continue immediately. - // - // @param arg Opaque context parameter passed to the func. - // @param func Async function returning a promise. - // @returns A promise fulfilled when the task is done. - // The value of the promise is the value returned by the func. - async push (arg: A, func: (arg: A) => Promise): Promise { - assert.strictEqual(typeof func, 'function'); - - return new Promise((resolve, reject) => { - let resolveCb = resolve; - let rejectCb = reject; - let task: Task = { func, arg, resolveCb, rejectCb }; - - this.queue.push(task); - if (!this.inprog) { - this.inprog = true; - this._nextTask(); - } else { - log.trace(`${this.name} task has been queued for later`); - } - }); - } - - // Pick and dispatch next task from the queue. - _nextTask () { - var self = this; - - var task = this.queue.shift(); - if (!task) { - self.inprog = false; - return; - } - - log.trace(`Dispatching a new ${this.name} task`); - task - .func(task.arg) - .then((res: any) => task!.resolveCb(res)) - .catch((err: any) => task!.rejectCb(err)) - .finally(() => self._nextTask()); - } -} diff --git a/csi/moac/test/.gitignore b/csi/moac/test/.gitignore deleted file mode 100644 index 85f058f4e..000000000 --- a/csi/moac/test/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/*.js.map -/persistence_test.js diff --git a/csi/moac/test/csi_test.js b/csi/moac/test/csi_test.js deleted file mode 100644 index b3f869c28..000000000 --- a/csi/moac/test/csi_test.js +++ /dev/null @@ -1,1217 +0,0 @@ -// Unit tests for the CSI controller - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; -const fs = require('fs').promises; -const grpc = require('@grpc/grpc-js'); -const sinon = require('sinon'); -const sleep = require('sleep-promise'); -const EventEmitter = require('events'); -const { CsiServer, csi } = require('../dist/csi'); -const { GrpcError, grpcCode } = require('../dist/grpc_client'); -const { Registry } = require('../dist/registry'); -const { Volume } = require('../dist/volume'); -const { Volumes } = require('../dist/volumes'); -const { shouldFailWith } = require('./utils'); - -const SOCKPATH = '/tmp/csi_controller_test.sock'; -// uuid used whenever we need some uuid and don't care about which one -const UUID = 'd01b8bfb-0116-47b0-a03a-447fcbdc0e99'; -const UUID2 = 'a01b8bfb-0116-47b0-a03a-447fcbdc0e92'; -const YAML_TRUE_VALUE = [ - 'y', 'Y', 'yes', 'Yes', 'YES', - 'true', 'True', 'TRUE', - 'on', 'On', 'ON' -]; - -// Return gRPC CSI client for given csi service -function getCsiClient (svc) { - const client = new csi[svc]('unix://' + SOCKPATH, grpc.credentials.createInsecure()); - // promisifying wrapper for calling api methods - client.pcall = (method, args) => { - return new Promise((resolve, reject) => { - client[method](args, (err, res) => { - if (err) reject(err); - else resolve(res); - }); - }); - }; - return client; -} - -module.exports = function () { - it('should start even if there is stale socket file', async () => { - await fs.writeFile(SOCKPATH, 'blabla'); - const server = new CsiServer(SOCKPATH); - await server.start(); - await server.stop(); - try { - await fs.stat(SOCKPATH); - } catch (err) { - if (err.code === 'ENOENT') { - return; - } - throw err; - } - throw new Error('Server did not clean up the socket file'); - }); - - describe('identity', function () { - let server; - let client; - - // create csi server and client - before(async () => { - server = new CsiServer(SOCKPATH); - await server.start(); - client = getCsiClient('Identity'); - }); - - after(async () => { - if (server) { - await server.stop(); - } - if (client) { - client.close(); - } - }); - - it('get plugin info', async () => { - const res = await client.pcall('getPluginInfo', {}); - // If you need to change any value of properties below, you will - // need to change source code of csi node server too! - expect(res.name).to.equal('io.openebs.csi-mayastor'); - expect(res.vendorVersion).to.equal('0.1'); - expect(Object.keys(res.manifest)).to.have.lengthOf(0); - }); - - it('get plugin capabilities', async () => { - const res = await client.pcall('getPluginCapabilities', {}); - // If you need to change any capabilities below, you will - // need to change source code of csi node server too! - expect(res.capabilities).to.have.lengthOf(2); - expect(res.capabilities[0].service.type).to.equal('CONTROLLER_SERVICE'); - expect(res.capabilities[1].service.type).to.equal( - 'VOLUME_ACCESSIBILITY_CONSTRAINTS' - ); - }); - - it('probe not ready', async () => { - const res = await client.pcall('probe', {}); - expect(res.ready).to.have.property('value', false); - }); - - it('probe ready', async () => { - server.makeReady({}, {}); - const res = await client.pcall('probe', {}); - expect(res.ready).to.have.property('value', true); - }); - }); - - describe('controller', function () { - let client; - let registry, volumes; - let getCapacityStub, createVolumeStub, listVolumesStub, getVolumesStub, destroyVolumeStub; - const volumeArgs = { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 100, - limitBytes: 20, - protocol: 'nvmf' - }; - - async function mockedServer (pools, replicas, nexus) { - const server = new CsiServer(SOCKPATH); - await server.start(); - registry = new Registry({}); - volumes = new Volumes(registry); - server.makeReady(registry, volumes); - getCapacityStub = sinon.stub(registry, 'getCapacity'); - createVolumeStub = sinon.stub(volumes, 'createVolume'); - listVolumesStub = sinon.stub(volumes, 'list'); - getVolumesStub = sinon.stub(volumes, 'get'); - destroyVolumeStub = sinon.stub(volumes, 'destroyVolume'); - return server; - } - - // create csi server and client - before(() => { - client = getCsiClient('Controller'); - }); - - after(() => { - if (client) { - client.close(); - client = null; - } - }); - - describe('generic', function () { - let server; - - afterEach(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - it('should get controller capabilities', async () => { - server = await mockedServer(); - const res = await client.pcall('controllerGetCapabilities', {}); - const caps = res.capabilities; - expect(caps).to.have.lengthOf(4); - expect(caps[0].rpc.type).to.equal('CREATE_DELETE_VOLUME'); - expect(caps[1].rpc.type).to.equal('PUBLISH_UNPUBLISH_VOLUME'); - expect(caps[2].rpc.type).to.equal('LIST_VOLUMES'); - expect(caps[3].rpc.type).to.equal('GET_CAPACITY'); - }); - - it('should not get controller capabilities if not ready', async () => { - server = await mockedServer(); - server.undoReady(); - await shouldFailWith(grpcCode.UNAVAILABLE, () => - client.pcall('controllerGetCapabilities', {}) - ); - }); - - it('should return unimplemented error for CreateSnapshot', async () => { - server = await mockedServer(); - await shouldFailWith(grpcCode.UNIMPLEMENTED, () => - client.pcall('createSnapshot', { - sourceVolumeId: 'd01b8bfb-0116-47b0-a03a-447fcbdc0e99', - name: 'blabla2' - }) - ); - }); - - it('should return unimplemented error for DeleteSnapshot', async () => { - server = await mockedServer(); - await shouldFailWith(grpcCode.UNIMPLEMENTED, () => - client.pcall('deleteSnapshot', { snapshotId: 'blabla' }) - ); - }); - - it('should return unimplemented error for ListSnapshots', async () => { - server = await mockedServer(); - await shouldFailWith(grpcCode.UNIMPLEMENTED, () => - client.pcall('listSnapshots', {}) - ); - }); - - it('should return unimplemented error for ControllerExpandVolume', async () => { - server = await mockedServer(); - await shouldFailWith(grpcCode.UNIMPLEMENTED, () => - client.pcall('controllerExpandVolume', { - volumeId: UUID, - capacityRange: { - requiredBytes: 200, - limitBytes: 500 - } - }) - ); - }); - }); - - describe('CreateVolume', function () { - let server; - const defaultParams = { protocol: 'nvmf', repl: '1' }; - - // place-holder for return value from createVolume when we don't care - // if the input matches the output data (i.e. when testing error cases). - function returnedVolume (params) { - const vol = new Volume(UUID, registry, new EventEmitter(), { - replicaCount: parseInt(params.repl) || 1, - local: YAML_TRUE_VALUE.indexOf(params.local) >= 0, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 20, - protocol: params.protocol - }); - sinon.stub(vol, 'getSize').returns(20); - sinon.stub(vol, 'getNodeName').returns('some-node'); - sinon.stub(vol, 'getReplicas').callsFake(() => { - const replicas = []; - for (let i = 1; i <= vol.spec.replicaCount; i++) { - // poor approximation of replica object, but it works - replicas.push({ - pool: { node: { name: `node${i}` } } - }); - } - return replicas; - }); - return vol; - } - - beforeEach(async () => { - server = await mockedServer(); - }); - - afterEach(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - it('should create a volume and return parameters in volume context', async () => { - const parameters = { protocol: 'iscsi', repl: 3, local: 'true', blah: 'again' }; - createVolumeStub.resolves(returnedVolume(parameters)); - const result = await client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters - }); - // volume context is a of type map - const expected = {}; - for (const key in parameters) { - expected[key] = parameters[key].toString(); - } - expect(result.volume.volumeId).to.equal(UUID); - expect(result.volume.capacityBytes).to.equal(20); - expect(result.volume.volumeContext).to.eql(expected); - expect(result.volume.accessibleTopology).to.have.lengthOf(3); - sinon.assert.calledWith(createVolumeStub, UUID, { - replicaCount: 3, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 20, - protocol: 'iscsi' - }); - }); - - it('should fail if topology requirement other than hostname', async () => { - createVolumeStub.resolves(returnedVolume(defaultParams)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - accessibilityRequirements: { - requisite: [{ segments: { rack: 'some-rack-info' } }], - preferred: [] - }, - parameters: { protocol: 'nvmf' } - }) - ); - }); - - it('should fail if volume source', async () => { - createVolumeStub.resolves(returnedVolume(defaultParams)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - volumeContentSource: { volume: { volumeId: UUID } }, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters: { protocol: 'nvmf' } - }) - ); - }); - - it('should fail if capability other than SINGLE_NODE_WRITER', async () => { - createVolumeStub.resolves(returnedVolume(defaultParams)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_READER_ONLY' }, - block: {} - } - ], - parameters: { protocol: 'nvmf' } - }) - ); - }); - - it('should fail if grpc exception is thrown', async () => { - createVolumeStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'Something went wrong') - ); - await shouldFailWith(grpcCode.INTERNAL, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - filesystem: {} - } - ], - parameters: { protocol: 'nvmf' } - }) - ); - }); - - it('should fail if volume name is not in expected form', async () => { - createVolumeStub.resolves(returnedVolume(defaultParams)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: UUID, // missing pvc- prefix - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - filesystem: {} - } - ], - parameters: { protocol: 'nvmf' } - }) - ); - }); - - it('should fail if ioTimeout is used with protocol other than nvmf', async () => { - const parameters = { protocol: 'iscsi', ioTimeout: '30' }; - createVolumeStub.resolves(returnedVolume(parameters)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - filesystem: {} - } - ], - parameters: { - protocol: 'iscsi', - ioTimeout: 30 - } - }) - ); - }); - - it('should fail if ioTimeout has invalid value', async () => { - const parameters = { protocol: 'nvmf', ioTimeout: 'bla' }; - createVolumeStub.resolves(returnedVolume(parameters)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - filesystem: {} - } - ], - parameters: { - protocol: 'nvmf', - ioTimeout: 'non-sense' - } - }) - ); - }); - - it('should fail if share protocol is not specified', async () => { - const params = { ioTimeout: '30', local: 'On' }; - createVolumeStub.resolves(returnedVolume(params)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 10, - limitBytes: 20 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - filesystem: {} - } - ], - parameters: { ioTimeout: '60' } - }) - ); - }); - - it('should create volume on specified node', async () => { - const params = { protocol: 'nvmf', local: 'Y' }; - createVolumeStub.resolves(returnedVolume(params)); - const result = await client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 50, - limitBytes: 0 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - filesystem: {} - } - ], - accessibilityRequirements: { - requisite: [{ segments: { 'kubernetes.io/hostname': 'node' } }] - }, - parameters: params - }); - expect(result.volume.volumeId).to.equal(UUID); - expect(result.volume.accessibleTopology).to.have.lengthOf(1); - expect(result.volume.accessibleTopology[0].segments['kubernetes.io/hostname']).to.equal('node1'); - sinon.assert.calledWith(createVolumeStub, UUID, { - replicaCount: 1, - local: true, - preferredNodes: [], - requiredNodes: ['node'], - requiredBytes: 50, - limitBytes: 0, - protocol: 'nvmf' - }); - }); - - it('should create volume on preferred node', async () => { - const params = { protocol: 'nvmf', local: 'No' }; - createVolumeStub.resolves(returnedVolume(params)); - await client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 50, - limitBytes: 50 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - accessibilityRequirements: { - preferred: [ - { - segments: { - // should ignore unknown segment if preferred - rack: 'some-rack-info', - 'kubernetes.io/hostname': 'node' - } - } - ] - }, - parameters: params - }); - sinon.assert.calledWith(createVolumeStub, UUID, { - replicaCount: 1, - local: false, - preferredNodes: ['node'], - requiredNodes: [], - requiredBytes: 50, - limitBytes: 50, - protocol: 'nvmf' - }); - }); - - it('should create volume with specified number of replicas', async () => { - const params = { repl: '3', protocol: 'nvmf' }; - createVolumeStub.resolves(returnedVolume(params)); - await client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 50, - limitBytes: 70 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters: params - }); - sinon.assert.calledWith(createVolumeStub, UUID, { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 50, - limitBytes: 70, - protocol: 'nvmf' - }); - }); - - it('should fail if number of replicas is not a number', async () => { - createVolumeStub.resolves(returnedVolume(defaultParams)); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 50, - limitBytes: 70 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters: { repl: 'bla2', protocol: 'nvmf' } - }) - ); - }); - - it('should serialize all requests and detect duplicates', (done) => { - // We must sleep in the stub. Otherwise reply is sent before the second - // request comes in. - this.timeout(1000); - const delay = 50; - createVolumeStub.callsFake(async () => { - await sleep(delay); - return returnedVolume(defaultParams); - }); - const create1 = client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 50, - limitBytes: 70 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters: { repl: '3', protocol: 'nvmf' } - }); - const create2 = client.pcall('createVolume', { - name: 'pvc-' + UUID2, - capacityRange: { - requiredBytes: 50, - limitBytes: 70 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters: { repl: '3', protocol: 'nvmf' } - }); - const create3 = client.pcall('createVolume', { - name: 'pvc-' + UUID, - capacityRange: { - requiredBytes: 50, - limitBytes: 70 - }, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ], - parameters: { repl: '3', protocol: 'nvmf' } - }); - const start = new Date(); - Promise.all([create1, create2, create3]).then((results) => { - expect(results).to.have.lengthOf(3); - expect(results[0].volume.volumeId).to.equal(UUID); - expect(results[1].volume.volumeId).to.equal(UUID2); - expect(results[2].volume.volumeId).to.equal(UUID); - sinon.assert.calledTwice(createVolumeStub); - expect(new Date() - start).to.be.above(2 * delay - 1); - done(); - }); - }); - }); - - describe('DeleteVolume', function () { - let server; - - beforeEach(async () => { - server = await mockedServer(); - }); - - afterEach(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - it('should delete volume with multiple replicas', async () => { - destroyVolumeStub.resolves(); - - await client.pcall('deleteVolume', { volumeId: UUID }); - - sinon.assert.calledOnce(destroyVolumeStub); - sinon.assert.calledWith(destroyVolumeStub, UUID); - }); - - it('should fail if backend grpc call fails', async () => { - destroyVolumeStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'Something went wrong') - ); - - await shouldFailWith(grpcCode.INTERNAL, () => - client.pcall('deleteVolume', { volumeId: UUID }) - ); - - sinon.assert.calledOnce(destroyVolumeStub); - }); - - it('should detect duplicate delete volume request', (done) => { - // We must sleep in the stub. Otherwise reply is sent before the second - // request comes in. - destroyVolumeStub.callsFake(async () => { - await sleep(10); - }); - const delete1 = client.pcall('deleteVolume', { volumeId: UUID }); - const delete2 = client.pcall('deleteVolume', { volumeId: UUID }); - Promise.all([delete1, delete2]).then((results) => { - sinon.assert.calledOnce(destroyVolumeStub); - expect(results).to.have.lengthOf(2); - done(); - }); - }); - }); - - describe('ListVolumes', function () { - let server; - // uuid except the last two digits - const uuidBase = '4334cc8a-2fed-45ed-866f-3716639db5'; - - // Create army of volumes (100) - before(async () => { - const vols = []; - for (let i = 0; i < 10; i++) { - for (let j = 0; j < 10; j++) { - const vol = new Volume(uuidBase + i + j, registry, new EventEmitter(), { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 100, - limitBytes: 20, - protocol: 'nvmf' - }); - const getSizeStub = sinon.stub(vol, 'getSize'); - getSizeStub.returns(100); - const getNodeName = sinon.stub(vol, 'getNodeName'); - getNodeName.returns('node'); - vols.push(vol); - } - } - server = await mockedServer(); - listVolumesStub.returns(vols); - }); - - after(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - it('should list all volumes', async () => { - const resp = await client.pcall('listVolumes', {}); - expect(resp.nextToken).to.be.empty; - const vols = resp.entries.map((ent) => ent.volume); - expect(vols).to.have.lengthOf(100); - for (let i = 0; i < 10; i++) { - for (let j = 0; j < 10; j++) { - expect(vols[10 * i + j].volumeId).to.equal(uuidBase + i + j); - } - } - }); - - it('should list volumes page by page', async () => { - const pageSize = 17; - let next; - let allVols = []; - - do { - const resp = await client.pcall('listVolumes', { - maxEntries: pageSize, - startingToken: next - }); - const vols = resp.entries.map((ent) => ent.volume); - next = resp.nextToken; - if (next) { - expect(vols).to.have.lengthOf(pageSize); - } else { - expect(vols).to.have.lengthOf(100 % pageSize); - } - allVols = allVols.concat(vols); - } while (next); - - expect(allVols).to.have.lengthOf(100); - for (let i = 0; i < 10; i++) { - for (let j = 0; j < 10; j++) { - expect(allVols[10 * i + j].volumeId).to.equal(uuidBase + i + j); - } - } - }); - - it('should fail if starting token is unknown', async () => { - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('listVolumes', { startingToken: 'asdfquwer' }) - ); - }); - }); - - describe('ControllerPublishVolume', function () { - let server; - - before(async () => { - server = await mockedServer(); - }); - - after(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - afterEach(() => { - getVolumesStub.reset(); - }); - - it('should publish volume', async () => { - const nvmfUri = `nvmf://host/nqn-${UUID}`; - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const publishStub = sinon.stub(volume, 'publish'); - publishStub.resolves(nvmfUri); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - const reply = await client.pcall('controllerPublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://node2', - readonly: false, - volumeCapability: { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - mount: { - fsType: 'xfs', - mount_flags: 'ro' - } - }, - volumeContext: { - protocol: 'nvmf', - ioTimeout: 0 - } - }); - expect(reply.publishContext.uri).to.equal(nvmfUri); - expect(reply.publishContext.ioTimeout).to.equal('0'); - sinon.assert.calledOnce(getVolumesStub); - sinon.assert.calledWith(getVolumesStub, UUID); - sinon.assert.calledOnce(publishStub); - sinon.assert.calledWith(publishStub, 'node2'); - }); - - it('should serialize all requests and detect duplicates', (done) => { - const delay = 50; - const iscsiUri = `iscsi://host/iqn-${UUID}`; - const publishArgs = { - volumeId: UUID, - nodeId: 'mayastor://node2', - readonly: false, - volumeCapability: { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - mount: { - fsType: 'xfs', - mount_flags: 'ro' - } - }, - volumeContext: { protocol: 'iscsi' } - }; - const publishArgs2 = _.clone(publishArgs); - publishArgs.volumeId = UUID2; - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const publishStub = sinon.stub(volume, 'publish'); - // We must sleep in the stub. Otherwise reply is sent before the second - // request comes in. - publishStub.callsFake(async () => { - await sleep(delay); - return iscsiUri; - }); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - const publish1 = client.pcall('controllerPublishVolume', publishArgs); - const publish2 = client.pcall('controllerPublishVolume', publishArgs2); - const publish3 = client.pcall('controllerPublishVolume', publishArgs); - const start = new Date(); - Promise.all([publish1, publish2, publish3]).then((results) => { - expect(results).to.have.lengthOf(3); - expect(results[0].publishContext.uri).to.equal(iscsiUri); - expect(results[1].publishContext.uri).to.equal(iscsiUri); - expect(results[2].publishContext.uri).to.equal(iscsiUri); - sinon.assert.calledTwice(publishStub); - expect(new Date() - start).to.be.above(2 * delay - 1); - done(); - }); - }); - - it('should not publish volume if it does not exist', async () => { - getVolumesStub.returns(); - - await shouldFailWith(grpcCode.NOT_FOUND, () => - client.pcall('controllerPublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://node', - readonly: false, - volumeCapability: { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - mount: { - fsType: 'xfs', - mount_flags: 'ro' - } - }, - volumeContext: { protocol: 'nvmf' } - }) - ); - sinon.assert.calledOnce(getVolumesStub); - sinon.assert.calledWith(getVolumesStub, UUID); - }); - - it('should not publish readonly volume', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const publishStub = sinon.stub(volume, 'publish'); - publishStub.resolves(); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('controllerPublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://node', - readonly: true, - volumeCapability: { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - mount: { - fsType: 'xfs', - mount_flags: 'ro' - } - }, - volumeContext: { protocol: 'nvmf' } - }) - ); - }); - - it('should not publish volume with unsupported capability', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const publishStub = sinon.stub(volume, 'publish'); - publishStub.resolves(); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('controllerPublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://node', - readonly: false, - volumeCapability: { - accessMode: { mode: 'SINGLE_NODE_READER_ONLY' }, - mount: { - fsType: 'xfs', - mount_flags: 'ro' - } - }, - volumeContext: { protocol: 'nvmf' } - }) - ); - }); - - it('should not publish volume on node with invalid ID', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const publishStub = sinon.stub(volume, 'publish'); - publishStub.resolves(); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('controllerPublishVolume', { - volumeId: UUID, - nodeId: 'mayastor2://node/10.244.2.15:10124', - readonly: false, - volumeCapability: { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - mount: { - fsType: 'xfs', - mount_flags: 'ro' - } - }, - volumeContext: { protocol: 'nvmf' } - }) - ); - }); - }); - - describe('ControllerUnpublishVolume', function () { - let server; - - before(async () => { - server = await mockedServer(); - }); - - after(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - afterEach(() => { - getVolumesStub.reset(); - }); - - it('should not return an error on unpublish volume if it does not exist', async () => { - getVolumesStub.returns(null); - - const error = await client.pcall('controllerUnpublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://node' - }); - - expect(error).is.empty; - }); - - it('should not unpublish volume on pool with invalid ID', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const unpublishStub = sinon.stub(volume, 'unpublish'); - unpublishStub.resolves(); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - client.pcall('controllerUnpublishVolume', { - volumeId: UUID, - nodeId: 'mayastor2://node/10.244.2.15:10124' - }) - ); - }); - - it('should unpublish volume', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const unpublishStub = sinon.stub(volume, 'unpublish'); - unpublishStub.resolves(); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - await client.pcall('controllerUnpublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://node' - }); - - sinon.assert.calledOnce(getVolumesStub); - sinon.assert.calledWith(getVolumesStub, UUID); - sinon.assert.calledOnce(unpublishStub); - }); - - it('should unpublish volume even if on a different node', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const unpublishStub = sinon.stub(volume, 'unpublish'); - unpublishStub.resolves(); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - await client.pcall('controllerUnpublishVolume', { - volumeId: UUID, - nodeId: 'mayastor://another-node' - }); - - sinon.assert.calledOnce(getVolumesStub); - sinon.assert.calledWith(getVolumesStub, UUID); - sinon.assert.calledOnce(unpublishStub); - }); - - it('should detect duplicate unpublish volume request', (done) => { - const unpublishArgs = { - volumeId: UUID, - nodeId: 'mayastor://another-node' - }; - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - const unpublishStub = sinon.stub(volume, 'unpublish'); - // We must sleep in the stub. Otherwise reply is sent before the second - // request comes in. - unpublishStub.callsFake(async () => { - await sleep(10); - }); - const getNodeNameStub = sinon.stub(volume, 'getNodeName'); - getNodeNameStub.returns('node'); - getVolumesStub.returns(volume); - - const unpublish1 = client.pcall('controllerUnpublishVolume', unpublishArgs); - const unpublish2 = client.pcall('controllerUnpublishVolume', unpublishArgs); - Promise.all([unpublish1, unpublish2]).then((results) => { - sinon.assert.calledOnce(unpublishStub); - expect(results).to.have.lengthOf(2); - done(); - }); - }); - }); - - describe('ValidateVolumeCapabilities', function () { - let server; - - before(async () => { - server = await mockedServer(); - }); - - after(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - it('should report SINGLE_NODE_WRITER cap as valid', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - getVolumesStub.returns(volume); - const caps = [ - 'SINGLE_NODE_WRITER', - 'SINGLE_NODE_READER_ONLY', - 'MULTI_NODE_READER_ONLY', - 'MULTI_NODE_SINGLE_WRITER', - 'MULTI_NODE_MULTI_WRITER' - ]; - const resp = await client.pcall('validateVolumeCapabilities', { - volumeId: UUID, - volumeCapabilities: caps.map((c) => { - return { - accessMode: { mode: c }, - block: {} - }; - }) - }); - expect(resp.confirmed.volumeCapabilities).to.have.lengthOf(1); - expect(resp.confirmed.volumeCapabilities[0].accessMode.mode).to.equal( - 'SINGLE_NODE_WRITER' - ); - expect(resp.message).to.have.lengthOf(0); - }); - - it('should report other caps than SINGLE_NODE_WRITER as invalid', async () => { - const volume = new Volume(UUID, registry, new EventEmitter(), volumeArgs); - getVolumesStub.returns(volume); - const caps = [ - 'SINGLE_NODE_READER_ONLY', - 'MULTI_NODE_READER_ONLY', - 'MULTI_NODE_SINGLE_WRITER', - 'MULTI_NODE_MULTI_WRITER' - ]; - const resp = await client.pcall('validateVolumeCapabilities', { - volumeId: UUID, - volumeCapabilities: caps.map((c) => { - return { - accessMode: { mode: c }, - block: {} - }; - }) - }); - expect(resp.confirmed).to.be.null; - expect(resp.message).to.match(/SINGLE_NODE_WRITER/); - }); - - it('should return error if volume does not exist', async () => { - getVolumesStub.returns(null); - await shouldFailWith(grpcCode.NOT_FOUND, () => - client.pcall('validateVolumeCapabilities', { - volumeId: UUID, - volumeCapabilities: [ - { - accessMode: { mode: 'SINGLE_NODE_WRITER' }, - block: {} - } - ] - }) - ); - }); - }); - - describe('GetCapacity', function () { - let server; - - before(async () => { - server = await mockedServer(); - }); - - after(async () => { - if (server) { - await server.stop(); - server = null; - } - }); - - afterEach(() => { - getCapacityStub.reset(); - }); - - it('should get capacity of a single node with multiple pools', async () => { - getCapacityStub.returns(75); - const resp = await client.pcall('getCapacity', { - accessibleTopology: { - segments: { - 'kubernetes.io/hostname': 'node1' - } - } - }); - expect(resp.availableCapacity).to.equal(75); - sinon.assert.calledOnce(getCapacityStub); - sinon.assert.calledWith(getCapacityStub, 'node1'); - }); - - it('should get capacity of all pools on all nodes', async () => { - getCapacityStub.returns(80); - const resp = await client.pcall('getCapacity', {}); - expect(resp.availableCapacity).to.equal(80); - sinon.assert.calledOnce(getCapacityStub); - sinon.assert.calledWith(getCapacityStub, undefined); - }); - }); - }); -}; diff --git a/csi/moac/test/event_stream_test.js b/csi/moac/test/event_stream_test.js deleted file mode 100644 index ef6b05485..000000000 --- a/csi/moac/test/event_stream_test.js +++ /dev/null @@ -1,294 +0,0 @@ -// Unit tests for event stream - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const expect = require('chai').expect; -const EventEmitter = require('events'); -const sinon = require('sinon'); -const { Pool } = require('../dist/pool'); -const { Replica } = require('../dist/replica'); -const { Nexus } = require('../dist/nexus'); -const { Registry } = require('../dist/registry'); -const { Volume } = require('../dist/volume'); -const { Volumes } = require('../dist/volumes'); -const { EventStream } = require('../dist/event_stream'); -const parse = require('url-parse'); - -module.exports = function () { - // Easy generator of a test node with fake pools, replicas and nexus - // omitting all properties that are not necessary for the event stream. - class FakeNode { - constructor (name, pools, nexus) { - this.name = name; - this.pools = pools.map((obj) => { - const p = new Pool({ name: obj.name, disks: ['/dev/sda'] }); - p.node = new EventEmitter(); - let index = 0; - obj.replicas.forEach((uuid) => { - index++; - p.registerReplica(new Replica({ uuid, uri: `bdev:///${uuid}?uuid=${index}` })); - }); - return p; - }); - this.nexus = nexus.map((uuid) => new Nexus({ uuid, children: [] })); - } - } - - it('should read events from registry and volumes stream', (done) => { - const registry = new Registry({}); - const volumes = new Volumes(registry); - const getNodesStub = sinon.stub(registry, 'getNodes'); - const getVolumeStub = sinon.stub(volumes, 'list'); - // The initial state of the nodes. "new" event should be written to the - // stream for all these objects and one "sync" event for each node meaning - // that the reader has caught up with the initial state. - getNodesStub.returns([ - new FakeNode( - 'node1', - [ - { - name: 'pool1', - replicas: ['uuid1', 'uuid2'] - }, - { - name: 'pool2', - replicas: ['uuid3'] - } - ], - ['nexus1', 'nexus2'] - ), - new FakeNode( - 'node2', - [ - { - name: 'pool3', - replicas: ['uuid4', 'uuid5', 'uuid6'] - } - ], - [] - ) - ]); - getVolumeStub.returns([ - new Volume('volume1', registry, new EventEmitter(), { - replicaCount: 1, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 100, - limitBytes: 0, - protocol: 'nvmf' - }), - new Volume('volume2', registry, new EventEmitter(), { - replicaCount: 1, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 100, - limitBytes: 0, - protocol: 'nvmf' - }) - ]); - - // set low high water mark to test buffered reads - const stream = new EventStream( - { - registry, - volumes - }, - { - highWaterMark: 3, - lowWaterMark: 1 - } - ); - const events = []; - let realUuid = 1; - - stream.on('data', (ev) => { - events.push(ev); - }); - - setTimeout(() => { - registry.emit('pool', { - eventType: 'new', - object: { name: 'pool4' } - }); - registry.emit('pool', { - eventType: 'mod', - object: { name: 'pool3' } - }); - registry.emit('pool', { - eventType: 'del', - object: { name: 'pool4' } - }); - - setTimeout(() => { - // exhibit buffering - stream.pause(); - - registry.emit('node', { - eventType: 'sync', - object: { name: 'node3' } - }); - - registry.emit('replica', { - eventType: 'new', - object: { uuid: 'replica1', uri: `bdev:///replica1?uuid=${realUuid++}` } - }); - registry.emit('replica', { - eventType: 'mod', - object: { uuid: 'replica2', uri: `bdev:///replica2?uuid=${realUuid++}` } - }); - registry.emit('replica', { - eventType: 'del', - object: { uuid: 'replica3', uri: `bdev:///replica3?uuid=${realUuid++}` } - }); - - registry.emit('nexus', { - eventType: 'new', - object: { uuid: 'nexus1' } - }); - registry.emit('nexus', { - eventType: 'mod', - object: { uuid: 'nexus2' } - }); - registry.emit('nexus', { - eventType: 'del', - object: { uuid: 'nexus3' } - }); - - volumes.emit('volume', { - eventType: 'new', - object: { uuid: 'volume3' } - }); - volumes.emit('volume', { - eventType: 'mod', - object: { uuid: 'volume4' } - }); - volumes.emit('volume', { - eventType: 'del', - object: { uuid: 'volume5' } - }); - - registry.emit('unknown', { - eventType: 'new', - object: { name: 'something' } - }); - - stream.resume(); - - setTimeout(() => { - stream.destroy(); - }, 1); - }, 1); - }, 1); - - stream.once('end', () => { - let i = 0; - // A note about ordering of events that are part of the initial state: - // First go pools. Each pool is followed by its replicas. Nexus go last. - // Then follow volume "new" events. - expect(events).to.have.lengthOf.at.least(30); - expect(events[i].kind).to.equal('node'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('node1'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('uuid1'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('uuid2'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool1'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('uuid3'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool2'); - expect(events[i].kind).to.equal('nexus'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('nexus1'); - expect(events[i].kind).to.equal('nexus'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('nexus2'); - expect(events[i].kind).to.equal('node'); - expect(events[i].eventType).to.equal('sync'); - expect(events[i++].object.name).to.equal('node1'); - expect(events[i].kind).to.equal('node'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('node2'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('uuid4'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('uuid5'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('uuid6'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool3'); - expect(events[i].kind).to.equal('node'); - expect(events[i].eventType).to.equal('sync'); - expect(events[i++].object.name).to.equal('node2'); - expect(events[i].kind).to.equal('volume'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('volume1'); - expect(events[i].kind).to.equal('volume'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('volume2'); - // these events happened after the stream was created - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool4'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('mod'); - expect(events[i++].object.name).to.equal('pool3'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('del'); - expect(events[i++].object.name).to.equal('pool4'); - expect(events[i].kind).to.equal('node'); - expect(events[i].eventType).to.equal('sync'); - expect(events[i++].object.name).to.equal('node3'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('replica1'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('mod'); - expect(events[i++].object.uuid).to.equal('replica2'); - expect(events[i].kind).to.equal('replica'); - expect(events[i].eventType).to.equal('del'); - expect(events[i++].object.uuid).to.equal('replica3'); - expect(events[i].kind).to.equal('nexus'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('nexus1'); - expect(events[i].kind).to.equal('nexus'); - expect(events[i].eventType).to.equal('mod'); - expect(events[i++].object.uuid).to.equal('nexus2'); - expect(events[i].kind).to.equal('nexus'); - expect(events[i].eventType).to.equal('del'); - expect(events[i++].object.uuid).to.equal('nexus3'); - expect(events[i].kind).to.equal('volume'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.uuid).to.equal('volume3'); - expect(events[i].kind).to.equal('volume'); - expect(events[i].eventType).to.equal('mod'); - expect(events[i++].object.uuid).to.equal('volume4'); - expect(events[i].kind).to.equal('volume'); - expect(events[i].eventType).to.equal('del'); - expect(events[i++].object.uuid).to.equal('volume5'); - expect(events).to.have.lengthOf(i); - events.forEach(element => { - if (element.kind === 'replica') { - const realUuid = parse(element.object.uri, true).query.uuid; - expect(realUuid).not.to.be.undefined; - } - }); - done(); - }); - }); -}; diff --git a/csi/moac/test/grpc_client_test.js b/csi/moac/test/grpc_client_test.js deleted file mode 100644 index 469c98195..000000000 --- a/csi/moac/test/grpc_client_test.js +++ /dev/null @@ -1,144 +0,0 @@ -// Unit tests for grpc utility functions. - -'use strict'; - -const expect = require('chai').expect; -const { MayastorServer } = require('./mayastor_mock'); -const { GrpcClient, grpcCode } = require('../dist/grpc_client'); -const { shouldFailWith } = require('./utils'); - -const MS_ENDPOINT = '127.0.0.1:12345'; -const UUID = '88dba542-d187-11ea-87d0-0242ac130003'; - -module.exports = function () { - let srv; - let client; - - function startServer (replyDelay, done) { - if (!srv) { - const pools = [ - { - name: 'pool', - disks: ['/dev/sdb'], - state: 0, - capacity: 100, - used: 4 - } - ]; - srv = new MayastorServer(MS_ENDPOINT, pools, [], [], replyDelay); - srv.start(done); - } else { - done(); - } - } - - function stopServer () { - if (srv) { - srv.stop(); - srv = null; - } - } - - function createClient (timeout) { - client = new GrpcClient(MS_ENDPOINT, timeout); - } - - function destroyClient () { - if (client) { - client.close(); - client = null; - } - } - - describe('server without delay', () => { - before((done) => { - createClient(); - startServer(undefined, done); - }); - - after(() => { - destroyClient(); - stopServer(); - }); - - it('should provide grpc status codes', () => { - expect(grpcCode.NOT_FOUND).to.equal(5); - expect(grpcCode.INTERNAL).to.equal(13); - }); - - it('should call a grpc method', async () => { - const res = await client.call('listPools', {}); - expect(res.pools).to.have.lengthOf(1); - expect(res.pools[0].name).to.equal('pool'); - }); - - it('should throw if grpc method fails', async () => { - await shouldFailWith( - grpcCode.NOT_FOUND, - () => client.call('removeChildNexus', { uuid: UUID, uri: 'bdev://bbb' }) - ); - }); - - // This must come after other tests using the server because it closes it. - it('should throw if the server with connected client shuts down', async () => { - stopServer(); - await shouldFailWith( - grpcCode.CANCELLED, - () => client.call('destroyPool', { name: 'unknown-pool' }) - ); - }); - - // This must be the last test here because it closes the client handle. - it('should release the client after close', async () => { - client.close(); - try { - await client.call('listPools', {}); - } catch (err) { - return; - } - throw new Error('Expected to throw error'); - }); - }); - - describe('server with delayed replies', () => { - const delayMs = 20; - - before((done) => { - startServer(delayMs, done); - }); - - after(() => { - stopServer(); - }); - - afterEach(destroyClient); - - it('should honor timeout set for the grpc call', async () => { - createClient(); - await shouldFailWith( - grpcCode.DEADLINE_EXCEEDED, - () => client.call('listPools', {}, delayMs / 2) - ); - }); - - it('should honor the default timeout set for the grpc client', async () => { - createClient(delayMs / 2); - await shouldFailWith( - grpcCode.DEADLINE_EXCEEDED, - () => client.call('listPools', {}) - ); - }); - }); - - describe('no server', () => { - before(() => createClient()); - after(destroyClient); - - it('should throw if unable to connect to the server', async () => { - await shouldFailWith( - grpcCode.UNAVAILABLE, - () => client.call('destroyPool', { name: 'unknown-pool' }) - ); - }); - }); -}; diff --git a/csi/moac/test/grpc_enums.js b/csi/moac/test/grpc_enums.js deleted file mode 100644 index 4ca0291b2..000000000 --- a/csi/moac/test/grpc_enums.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict'; - -const path = require('path'); -const protoLoader = require('@grpc/proto-loader'); -const grpc = require('@grpc/grpc-js'); - -const constants = {}; - -const defs = Object.values( - grpc.loadPackageDefinition( - protoLoader.loadSync( - path.join(__dirname, '..', 'proto', 'mayastor.proto'), - { - // this is to load google/descriptor.proto - includeDirs: ['./node_modules/protobufjs'] - } - ) - ).mayastor -); - -defs.forEach((ent) => { - if (ent.format && ent.format.indexOf('EnumDescriptorProto') >= 0) { - ent.type.value.forEach((variant) => { - constants[variant.name] = variant.number; - }); - } -}); - -module.exports = constants; diff --git a/csi/moac/test/index.ts b/csi/moac/test/index.ts deleted file mode 100644 index 62b122992..000000000 --- a/csi/moac/test/index.ts +++ /dev/null @@ -1,87 +0,0 @@ -// Unit tests for the moac components - -const path = require('path'); -const { spawn } = require('child_process'); - -const logger = require('../dist/logger'); -const workqTest = require('./workq_test.js'); -const grpcTest = require('./grpc_client_test.js'); -const watcherTest = require('./watcher_test.js'); -const nodeObject = require('./node_test.js'); -const poolObject = require('./pool_test.js'); -const replicaObject = require('./replica_test.js'); -const nexusObject = require('./nexus_test.js'); -const nodeOperator = require('./node_operator_test.js'); -const natsTest = require('./nats_test.js'); -const registryTest = require('./registry_test.js'); -const eventStream = require('./event_stream_test.js'); -const poolOperator = require('./pool_operator_test.js'); -const volumeObject = require('./volume_test.js'); -const volumesTest = require('./volumes_test.js'); -const volumeOperator = require('./volume_operator_test.js'); -const restApi = require('./rest_api_test.js'); -const csiTest = require('./csi_test.js'); -const persistenceTest = require('./persistence_test.ts'); - -require('source-map-support').install(); -logger.setLevel('silly'); - -describe('moac', function () { - describe('workq', workqTest); - describe('grpc client', grpcTest); - describe('watcher', watcherTest); - describe('node object', nodeObject); - describe('pool object', poolObject); - describe('replica object', replicaObject); - describe('nats message bus', natsTest); - describe('nexus object', nexusObject); - describe('node operator', nodeOperator); - describe('registry', registryTest); - describe('event stream', eventStream); - describe('pool operator', poolOperator); - describe('volume object', volumeObject); - describe('volumes', volumesTest); - describe('volume operator', volumeOperator); - describe('rest api', restApi); - describe('csi', csiTest); - describe('persistence', persistenceTest); - - // Start moac without k8s and NATS server just to test basic errors - it('start moac process', function (done) { - // Starting moac, which includes loading all NPM modules from disk, takes - // time when running in docker with FS mounted from non-linux host. - this.timeout(5000); - - const child = spawn(path.join(__dirname, '..', 'moac'), [ - '-s', - '--namespace=default', - // NATS does not run but just to verify that the option works - '--message-bus=127.0.0.1', - // ETCD does not run but just to verify that the option works - '--etcd-endpoint=127.0.0.1', - // shorten the warm up to make the test faster - '--heartbeat-interval=1', - // test various sync options - '--sync-period=10', - '--sync-retry=1', - '--sync-bad-limit=3' - ]); - let stderr = ''; - - child.stdout.on('data', (data: any) => { - if (data.toString().indexOf('🚀') >= 0) { - child.kill(); - } - }); - child.stderr.on('data', (data: any) => { - stderr += data.toString(); - }); - child.on('close', (code: any) => { - if (code === 0) { - done(); - } else { - done(new Error(stderr)); - } - }); - }); -}); diff --git a/csi/moac/test/mayastor_mock.js b/csi/moac/test/mayastor_mock.js deleted file mode 100644 index a192bc370..000000000 --- a/csi/moac/test/mayastor_mock.js +++ /dev/null @@ -1,338 +0,0 @@ -const _ = require('lodash'); -const assert = require('chai').assert; -const path = require('path'); -const protoLoader = require('@grpc/proto-loader'); -const grpc = require('@grpc/grpc-js'); -const enums = require('./grpc_enums'); -const parse = require('url-parse'); - -// each stat is incremented by this each time when stat method is called -const STAT_DELTA = 1000; - -// The problem is that the grpc server creates the keys from proto file -// even if they don't exist. So we have to test that the key is there -// but also that it has not a default value (empty string, zero, ...). -function assertHasKeys (obj, keys, empty) { - empty = empty || []; - for (const key in obj) { - if (keys.indexOf(key) < 0) { - assert( - false, - 'Extra parameter "' + key + '" in object ' + JSON.stringify(obj) - ); - } - } - for (let i = 0; i < keys.length; i++) { - const key = keys[i]; - const val = obj[key]; - if ( - val == null || - // no way to check boolean - (typeof val === 'string' && val.length === 0 && empty.indexOf(key) < 0) || - (typeof val === 'number' && val === 0 && empty.indexOf(key) < 0) - ) { - assert( - false, - 'Missing property ' + key + ' in object ' + JSON.stringify(obj) - ); - } - } -} - -// Create mayastor mock grpc server with preconfigured storage pool, replica -// and nexus objects. Pools can be added & deleted by means of grpc calls. -// The actual state (i.e. list of pools) can be retrieved by get*() method. -class MayastorServer { - constructor (endpoint, pools, replicas, nexus, replyDelay) { - const packageDefinition = protoLoader.loadSync( - path.join(__dirname, '..', 'proto', 'mayastor.proto'), - { - keepCase: false, - longs: Number, - enums: String, - defaults: true, - oneofs: true - } - ); - const mayastor = grpc.loadPackageDefinition(packageDefinition).mayastor; - const srv = new grpc.Server(); - - this.endpoint = endpoint; - this.pools = _.cloneDeep(pools || []); - this.replicas = _.cloneDeep(replicas || []); - this.nexus = _.cloneDeep(nexus || []); - this.statCounter = 0; - const randomUuidQp = () => { - return '?uuid=' + _.random(0, Number.MAX_SAFE_INTEGER); - }; - const uuidQp = (uuid) => { - return '?uuid=' + uuid; - }; - if (replyDelay == null) { - replyDelay = 0; - } - - const self = this; - srv.addService(mayastor.Mayastor.service, { - // When a pool is created we implicitly set state to POOL_ONLINE, - // capacity to 100 and used to 4. - createPool: (call, cb) => { - const args = call.request; - assertHasKeys( - args, - ['name', 'disks'], - [] - ); - let pool = self.pools.find((p) => p.name === args.name); - if (!pool) { - pool = { - name: args.name, - disks: args.disks.map((d) => `aio://${d}`), - state: enums.POOL_ONLINE, - capacity: 100, - used: 4 - }; - self.pools.push(pool); - } - setTimeout(() => cb(null, pool), replyDelay); - }, - destroyPool: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['name']); - const idx = self.pools.findIndex((p) => p.name === args.name); - if (idx >= 0) { - self.pools.splice(idx, 1); - } - setTimeout(() => cb(null, {}), replyDelay); - }, - listPools: (_unused, cb) => { - setTimeout(() => cb(null, { pools: self.pools }), replyDelay); - }, - createReplica: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid', 'pool', 'size', 'thin', 'share']); - let r = self.replicas.find((r) => r.uuid === args.uuid); - if (r) { - return setTimeout(() => cb(null, r), replyDelay); - } - const pool = self.pools.find((p) => p.name === args.pool); - if (!pool) { - const err = new Error('pool not found'); - err.code = grpc.status.NOT_FOUND; - return setTimeout(() => cb(err), replyDelay); - } - if (!args.thin) { - pool.used += args.size; - } - let uri; - if (args.share === 'REPLICA_NONE') { - uri = 'bdev:///' + args.uuid + randomUuidQp(); - } else if (args.share === 'REPLICA_ISCSI') { - uri = 'iscsi://192.168.0.1:3800/' + args.uuid + randomUuidQp(); - } else { - uri = 'nvmf://192.168.0.1:4020/' + args.uuid + randomUuidQp(); - } - - r = { - uuid: args.uuid, - pool: args.pool, - size: args.size, - thin: args.thin, - share: args.share, - uri - }; - self.replicas.push(r); - setTimeout(() => cb(null, r), replyDelay); - }, - destroyReplica: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid']); - const idx = self.replicas.findIndex((r) => r.uuid === args.uuid); - if (idx >= 0) { - const r = self.replicas.splice(idx, 1)[0]; - if (!r.thin) { - const pool = self.pools.find((p) => p.name === r.pool); - pool.used -= r.size; - } - } - setTimeout(() => cb(null, {}), replyDelay); - }, - listReplicas: (_unused, cb) => { - setTimeout(() => cb(null, { replicas: self.replicas }), replyDelay); - }, - statReplicas: (_unused, cb) => { - self.statCounter += STAT_DELTA; - setTimeout(() => cb(null, { - replicas: self.replicas.map((r) => { - return { - uuid: r.uuid, - pool: r.pool, - stats: { - numReadOps: self.statCounter, - numWriteOps: self.statCounter, - bytesRead: self.statCounter, - bytesWritten: self.statCounter - } - }; - }) - }), replyDelay); - }, - shareReplica: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid', 'share']); - const r = self.replicas.find((ent) => ent.uuid === args.uuid); - if (!r) { - const err = new Error('not found'); - err.code = grpc.status.NOT_FOUND; - return setTimeout(() => cb(err), replyDelay); - } - assertHasKeys(r, ['uri']); - const realUuid = parse(r.uri, true).query.uuid; - if (args.share === 'REPLICA_NONE') { - r.uri = 'bdev:///' + uuidQp(realUuid); - } else if (args.share === 'REPLICA_ISCSI') { - r.uri = 'iscsi://192.168.0.1:3800/' + r.uuid + uuidQp(realUuid); - } else if (args.share === 'REPLICA_NVMF') { - r.uri = 'nvmf://192.168.0.1:4020/' + r.uuid + uuidQp(realUuid); - } else { - assert(false, 'Invalid share protocol'); - } - r.share = args.share; - setTimeout(() => cb(null, { uri: r.uri }), replyDelay); - }, - createNexus: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid', 'size', 'children']); - let nexus = self.nexus.find((r) => r.uuid === args.uuid); - if (!nexus) { - nexus = { - uuid: args.uuid, - size: args.size, - state: enums.NEXUS_ONLINE, - children: args.children.map((r) => { - return { - uri: r, - state: enums.CHILD_ONLINE, - rebuildProgress: 0 - }; - }) - // device_path omitted - }; - self.nexus.push(nexus); - } - setTimeout(() => cb(null, nexus), replyDelay); - }, - destroyNexus: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid']); - const idx = self.nexus.findIndex((n) => n.uuid === args.uuid); - if (idx >= 0) { - self.nexus.splice(idx, 1); - } - setTimeout(() => cb(null, {}), replyDelay); - }, - listNexus: (_unused, cb) => { - setTimeout(() => cb(null, { nexusList: self.nexus }), replyDelay); - }, - publishNexus: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid', 'share', 'key'], ['key']); - assert.equal(1, args.share); // Must be value of NEXUS_NVMF for now - const idx = self.nexus.findIndex((n) => n.uuid === args.uuid); - if (idx >= 0) { - self.nexus[idx].deviceUri = 'nvmf://host/nqn'; - setTimeout(() => cb(null, { - deviceUri: 'nvmf://host/nqn' - }), replyDelay); - } else { - const err = new Error('not found'); - err.code = grpc.status.NOT_FOUND; - setTimeout(() => cb(err), replyDelay); - } - }, - unpublishNexus: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid']); - const idx = self.nexus.findIndex((n) => n.uuid === args.uuid); - if (idx >= 0) { - delete self.nexus[idx].deviceUri; - setTimeout(() => cb(null, {}), replyDelay); - } else { - const err = new Error('not found'); - err.code = grpc.status.NOT_FOUND; - setTimeout(() => cb(err), replyDelay); - } - }, - addChildNexus: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid', 'uri', 'norebuild']); - const n = self.nexus.find((n) => n.uuid === args.uuid); - if (!n) { - const err = new Error('not found'); - err.code = grpc.status.NOT_FOUND; - return setTimeout(() => cb(err), replyDelay); - } - if (!n.children.find((ch) => ch.uri === args.uri)) { - n.children.push({ - uri: args.uri, - state: enums.CHILD_DEGRADED - }); - } - setTimeout(() => cb(null, { - uri: args.uri, - state: enums.CHILD_DEGRADED, - rebuildProgress: 0 - }), replyDelay); - }, - removeChildNexus: (call, cb) => { - const args = call.request; - assertHasKeys(args, ['uuid', 'uri']); - const n = self.nexus.find((n) => n.uuid === args.uuid); - if (!n) { - const err = new Error('not found'); - err.code = grpc.status.NOT_FOUND; - return setTimeout(() => cb(err), replyDelay); - } - n.children = n.children.filter((ch) => ch.uri !== args.uri); - setTimeout(cb, replyDelay); - }, - // dummy impl to silence the warning about unimplemented method - childOperation: (_unused, cb) => { - setTimeout(cb, replyDelay); - } - }); - this.srv = srv; - } - - getPools () { - return this.pools; - } - - getReplicas () { - return this.replicas; - } - - getNexus () { - return this.nexus; - } - - start (done) { - this.srv.bindAsync( - this.endpoint, - grpc.ServerCredentials.createInsecure(), - (err) => { - if (err) return done(err); - this.srv.start(); - done(); - }); - } - - stop () { - this.srv.forceShutdown(); - } -} - -module.exports = { - MayastorServer, - STAT_DELTA -}; diff --git a/csi/moac/test/multi_reporter.js b/csi/moac/test/multi_reporter.js deleted file mode 100644 index a4084949f..000000000 --- a/csi/moac/test/multi_reporter.js +++ /dev/null @@ -1,31 +0,0 @@ -// Mocha does not support multiple reporters running at once. So we use this -// simple wrapper as suggested in: -// https://github.com/mochajs/mocha/pull/1360#issuecomment-407404831 - -const mocha = require('mocha'); - -function MultiReporter (runner, options) { - this.reports = []; - if (!options.reporterOptions.reporters) { - console.log('\nneeds --reporter-options reporters="SPACE_SEPARATED_MOCHA_REPORTS"'); - return; - } - const self = this; - options.reporterOptions.reporters.split(' ').forEach(function (report) { - const ReportClass = mocha.reporters[report]; - if (!ReportClass) { - console.log('\ninvalid report class available: ' + Object.keys(mocha.reporters).join(',')); - return; - } - const reportInstance = new ReportClass(runner, options); - self.reports.push(reportInstance); - }); -} - -MultiReporter.prototype.epilogue = function () { - this.reports.forEach(function (reportInstance) { - reportInstance.epilogue(); - }); -}; - -exports = module.exports = MultiReporter; diff --git a/csi/moac/test/nats_test.js b/csi/moac/test/nats_test.js deleted file mode 100644 index 6a4ab6b34..000000000 --- a/csi/moac/test/nats_test.js +++ /dev/null @@ -1,183 +0,0 @@ -// Unit tests for the nats message bus - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const expect = require('chai').expect; -const { spawn } = require('child_process'); -const nats = require('nats'); -const sleep = require('sleep-promise'); -const { Registry } = require('../dist/registry'); -const { MessageBus } = require('../dist/nats'); -const { waitUntil } = require('./utils'); -const NodeStub = require('./node_stub'); - -const NATS_PORT = '14222'; -const NATS_HOST = '127.0.0.1'; -const NATS_EP = `${NATS_HOST}:${NATS_PORT}`; -const RECONNECT_DELAY = 300; -const GRPC_ENDPOINT = '127.0.0.1:12345'; -const NODE_NAME = 'node-name'; - -let natsProc; - -// Starts nats server and call callback when the server is up and ready. -function startNats (done) { - natsProc = spawn('nats-server', ['-a', NATS_HOST, '-p', NATS_PORT]); - let doneCalled = false; - let stderr = ''; - - natsProc.stderr.on('data', (data) => { - stderr += data.toString(); - if (data.toString().match(/Server is ready/)) { - doneCalled = true; - done(); - } - }); - - natsProc.once('close', (code) => { - natsProc = null; - if (!doneCalled) { - if (code) { - done(new Error(`nats server exited with code ${code}: ${stderr}`)); - } else { - done(new Error('nats server exited prematurely')); - } - return; - } - if (code) { - console.log(`nats server exited with code ${code}: ${stderr}`); - } - }); -} - -// Kill nats server. Though it does not wait for it to exit! -function stopNats () { - if (natsProc) natsProc.kill(); -} - -module.exports = function () { - let eventBus; - let registry; - let nc; - const sc = nats.StringCodec(); - - function connectNats (done) { - nats.connect({ - servers: [`nats://${NATS_EP}`] - }) - .then((res) => { - nc = res; - done(); - }) - .catch(() => { - setTimeout(() => { - connectNats(done); - }, 200); - }); - } - - // Create registry, event bus object, nats client and start nat server - before((done) => { - registry = new Registry({}); - registry.Node = NodeStub; - eventBus = new MessageBus(registry, RECONNECT_DELAY); - startNats(err => { - if (err) return done(err); - connectNats(done); - }); - }); - - after(() => { - eventBus.stop(); - if (nc) { - nc.close(); - nc = null; - } - stopNats(); - registry.close(); - }); - - it('should connect to the nats server', async () => { - eventBus.start(NATS_EP); - - await waitUntil(async () => { - return eventBus.isConnected(); - }, 1000, 'connect to NATS'); - }); - - it('should register a node', async () => { - nc.publish('v0/registry', sc.encode(JSON.stringify({ - id: 'v0/register', - data: { id: NODE_NAME, grpcEndpoint: GRPC_ENDPOINT } - }))); - await waitUntil(async () => { - return registry.getNode(NODE_NAME); - }, 1000, 'new node'); - const node = registry.getNode(NODE_NAME); - expect(node.name).to.equal(NODE_NAME); - expect(node.endpoint).to.equal(GRPC_ENDPOINT); - }); - - it('should ignore register request with missing node name', async () => { - nc.publish('v0/registry', sc.encode(JSON.stringify({ - id: 'v0/register', - data: { grpcEndpoint: GRPC_ENDPOINT } - }))); - // small delay to wait for a possible crash of moac - await sleep(10); - }); - - it('should ignore register request with missing grpc endpoint', async () => { - nc.publish('v0/registry', sc.encode(JSON.stringify({ - id: 'v0/register', - data: { id: NODE_NAME } - }))); - // small delay to wait for a possible crash of moac - await sleep(10); - }); - - it('should not crash upon a request with invalid JSON', async () => { - nc.publish('v0/register', sc.encode('{"id": "NODE", "grpcEndpoint": "something"')); - // small delay to wait for a possible crash of moac - await sleep(10); - }); - - it('should deregister a node', async () => { - nc.publish('v0/registry', sc.encode(JSON.stringify({ - id: 'v0/deregister', - data: { id: NODE_NAME } - }))); - expect(registry.getNode(NODE_NAME).isSynced()); - await waitUntil(async () => { - return !registry.getNode(NODE_NAME).isSynced(); - }, 1000, 'node offline'); - }); - - it('should disconnect from the nats server', () => { - eventBus.stop(); - expect(eventBus.isConnected()).to.be.false; - }); - - it('should retry connect until successfull', async () => { - stopNats(); - await sleep(100); - eventBus.start(NATS_EP); - await sleep(500); - - let resolveCb, rejectCb; - const NatsStarted = new Promise((resolve, reject) => { - resolveCb = resolve; - rejectCb = reject; - }); - startNats((err) => { - if (err) rejectCb(err); - else resolveCb(); - }); - await NatsStarted; - await waitUntil(async () => { - return eventBus.isConnected(); - }, 1000, 'connect to NATS'); - }); -}; diff --git a/csi/moac/test/nexus_test.js b/csi/moac/test/nexus_test.js deleted file mode 100644 index f7eb4ac41..000000000 --- a/csi/moac/test/nexus_test.js +++ /dev/null @@ -1,451 +0,0 @@ -// Unit tests for the nexus object - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const { Node } = require('../dist/node'); -const { Replica } = require('../dist/replica'); -const { Nexus } = require('../dist/nexus'); -const { shouldFailWith } = require('./utils'); -const { grpcCode, GrpcError } = require('../dist/grpc_client'); - -const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; - -module.exports = function () { - const props = { - uuid: UUID, - size: 100, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'nvmf://' + UUID, - state: 'CHILD_ONLINE' - }, - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE' - } - ] - }; - - it('should bind the nexus to node and then unbind it', (done) => { - const node = new Node('node'); - const nexus = new Nexus(props); - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.equal(nexus); - expect(nexus.node).to.equal(node); - - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.equal(nexus); - setTimeout(() => { - expect(nexus.node).to.be.undefined; - done(); - }, 0); - }); - nexus.unbind(); - }); - nexus.bind(node); - }); - - it('should offline the nexus', () => { - const node = new Node('node'); - const nexus = new Nexus(props); - node._registerNexus(nexus); - - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.equal(nexus); - expect(nexus.state).to.equal('NEXUS_OFFLINE'); - }); - nexus.offline(); - }); - - describe('mod event', () => { - let node, eventSpy, nexus, newProps; - - beforeEach(() => { - node = new Node('node'); - eventSpy = sinon.spy(node, 'emit'); - nexus = new Nexus(props); - node._registerNexus(nexus); - newProps = _.clone(props); - }); - - it('should emit event upon change of size property', () => { - newProps.size = 1000; - nexus.merge(newProps); - - // First event is new nexus event - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.firstCall, 'nexus', { - eventType: 'new', - object: nexus - }); - sinon.assert.calledWith(eventSpy.secondCall, 'nexus', { - eventType: 'mod', - object: nexus - }); - expect(nexus.size).to.equal(1000); - }); - - it('should emit event upon change of deviceUri property', () => { - newProps.deviceUri = 'nvmf://host/nqn'; - nexus.merge(newProps); - - // First event is new nexus event - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.secondCall, 'nexus', { - eventType: 'mod', - object: nexus - }); - expect(nexus.deviceUri).to.equal('nvmf://host/nqn'); - }); - - it('should emit event upon change of state property', () => { - newProps.state = 'NEXUS_DEGRADED'; - nexus.merge(newProps); - - // First event is new nexus event - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.secondCall, 'nexus', { - eventType: 'mod', - object: nexus - }); - expect(nexus.state).to.equal('NEXUS_DEGRADED'); - }); - - it('should emit event upon change of children property', () => { - newProps.children = [ - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE' - } - ]; - nexus.merge(newProps); - - // First event is new nexus event - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.secondCall, 'nexus', { - eventType: 'mod', - object: nexus - }); - expect(nexus.children).to.have.lengthOf(1); - expect(nexus.children[0].uri).to.equal(`bdev:///${UUID}`); - expect(nexus.children[0].state).to.equal('CHILD_ONLINE'); - }); - - it('should not emit event when children are the same', () => { - newProps.children = [ - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE' - }, - { - uri: 'nvmf://' + UUID, - state: 'CHILD_ONLINE' - } - ]; - nexus.merge(newProps); - - // First event is new nexus event - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'new', - object: nexus - }); - }); - }); - - describe('grpc', () => { - let node, nexus, eventSpy, callStub, isSyncedStub; - - // Create a sample nexus bound to a node - beforeEach((done) => { - node = new Node('node'); - nexus = new Nexus(props); - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('new'); - eventSpy = sinon.spy(node, 'emit'); - callStub = sinon.stub(node, 'call'); - isSyncedStub = sinon.stub(node, 'isSynced'); - isSyncedStub.returns(true); - done(); - }); - node._registerNexus(nexus); - }); - - afterEach(() => { - eventSpy.resetHistory(); - callStub.reset(); - isSyncedStub.reset(); - }); - - it('should not publish the nexus with whatever protocol', async () => { - callStub.resolves({ deviceUri: 'file:///dev/whatever0' }); - callStub.rejects(new GrpcError(grpcCode.NOT_FOUND, 'Test failure')); - - await shouldFailWith(grpcCode.NOT_FOUND, async () => { - await nexus.publish('whatever'); - }); - - sinon.assert.notCalled(callStub); - }); - - it('should publish the nexus with iscsi protocol', async () => { - callStub.resolves({ deviceUri: 'iscsi://host/dev/iscsi' }); - - await nexus.publish('iscsi'); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'publishNexus', { - uuid: UUID, - key: '', - share: 2 - }); - expect(nexus.deviceUri).to.equal('iscsi://host/dev/iscsi'); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should publish the nexus with nvmf protocol', async () => { - callStub.resolves({ deviceUri: 'nvmf://host/nvme0' }); - - await nexus.publish('nvmf'); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'publishNexus', { - uuid: UUID, - key: '', - share: 1 - }); - expect(nexus.deviceUri).to.equal('nvmf://host/nvme0'); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should publish the nexus with nvmf protocol', async () => { - callStub.resolves({ deviceUri: 'nvmf://host/nqn' }); - - await nexus.publish('nvmf'); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'publishNexus', { - uuid: UUID, - key: '', - share: 1 - }); - expect(nexus.deviceUri).to.equal('nvmf://host/nqn'); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should unpublish the nexus', async () => { - callStub.resolves({}); - - await nexus.unpublish(); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'unpublishNexus', { uuid: UUID }); - expect(nexus.deviceUri).to.equal(''); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should not fail to unpublish the nexus if it does not exist', async () => { - callStub.rejects(new GrpcError(grpcCode.NOT_FOUND, 'test not found')); - - await nexus.unpublish(); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'unpublishNexus', { uuid: UUID }); - expect(nexus.deviceUri).to.equal(''); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should fake the unpublish if the node is offline', async () => { - callStub.resolves({}); - isSyncedStub.returns(false); - - await nexus.unpublish(); - - sinon.assert.notCalled(callStub); - expect(nexus.deviceUri).to.equal(''); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should add replica to nexus', async () => { - const uri = 'iscsi://' + UUID; - const replica = new Replica({ - uuid: UUID, - uri - }); - callStub.resolves({ - uri, - state: 'CHILD_DEGRADED', - rebuildProgress: 0 - }); - - const res = await nexus.addReplica(replica); - - expect(res.uri).to.equal(uri); - expect(res.state).to.equal('CHILD_DEGRADED'); - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'addChildNexus', { - uuid: UUID, - uri: 'iscsi://' + UUID, - norebuild: false - }); - expect(nexus.children).to.have.lengthOf(3); - // should be sorted according to uri - expect(nexus.children[0].uri).to.equal('bdev:///' + UUID); - expect(nexus.children[1].uri).to.equal('iscsi://' + UUID); - expect(nexus.children[2].uri).to.equal('nvmf://' + UUID); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should not add replica to nexus if grpc fails', async () => { - const replica = new Replica({ - uuid: UUID, - uri: 'iscsi://' + UUID - }); - callStub.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - - await shouldFailWith(grpcCode.INTERNAL, async () => { - await nexus.addReplica(replica); - }); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'addChildNexus', { - uuid: UUID, - uri: 'iscsi://' + UUID, - norebuild: false - }); - expect(nexus.children).to.have.lengthOf(2); - expect(nexus.children[0].uri).to.equal('bdev:///' + UUID); - expect(nexus.children[1].uri).to.equal('nvmf://' + UUID); - sinon.assert.notCalled(eventSpy); - }); - - it('should remove replica from nexus', async () => { - const replica = new Replica({ - uuid: UUID, - uri: 'nvmf://' + UUID - }); - callStub.resolves({}); - - await nexus.removeReplica(replica.uri); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'removeChildNexus', { - uuid: UUID, - uri: 'nvmf://' + UUID - }); - expect(nexus.children).to.have.lengthOf(1); - expect(nexus.children[0].uri).to.equal('bdev:///' + UUID); - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'mod', - object: nexus - }); - }); - - it('should not remove replica from nexus if grpc fails', async () => { - const replica = new Replica({ - uuid: UUID, - uri: 'nvmf://' + UUID - }); - callStub.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - - await shouldFailWith(grpcCode.INTERNAL, async () => { - await nexus.removeReplica(replica.uri); - }); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'removeChildNexus', { - uuid: UUID, - uri: 'nvmf://' + UUID - }); - expect(nexus.children).to.have.lengthOf(2); - expect(nexus.children[0].uri).to.equal('bdev:///' + UUID); - expect(nexus.children[1].uri).to.equal('nvmf://' + UUID); - sinon.assert.notCalled(eventSpy); - }); - - it('should destroy the nexus', async () => { - callStub.resolves({}); - - await nexus.destroy(); - - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'del', - object: nexus - }); - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'destroyNexus', { uuid: UUID }); - expect(nexus.node).to.be.undefined; - expect(node.nexus).to.have.lengthOf(0); - }); - - it('should not remove the nexus if grpc fails', async () => { - callStub.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - - await shouldFailWith(grpcCode.INTERNAL, async () => { - await nexus.destroy(); - }); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'destroyNexus', { uuid: UUID }); - sinon.assert.notCalled(eventSpy); - expect(nexus.node).to.equal(node); - expect(node.nexus).to.have.lengthOf(1); - }); - - it('should fake the destroy if the node is offline', async () => { - callStub.rejects(new GrpcError(grpcCode.INTERNAL, 'Not connected')); - isSyncedStub.returns(false); - - await nexus.destroy(); - - sinon.assert.calledOnce(eventSpy); - sinon.assert.calledWith(eventSpy, 'nexus', { - eventType: 'del', - object: nexus - }); - sinon.assert.notCalled(callStub); - expect(nexus.node).to.be.undefined; - expect(node.nexus).to.have.lengthOf(0); - }); - }); -}; diff --git a/csi/moac/test/node_operator_test.js b/csi/moac/test/node_operator_test.js deleted file mode 100644 index cca7bda1d..000000000 --- a/csi/moac/test/node_operator_test.js +++ /dev/null @@ -1,507 +0,0 @@ -// Unit tests for the node operator - -'use strict'; - -const expect = require('chai').expect; -const sinon = require('sinon'); -const sleep = require('sleep-promise'); -const { KubeConfig } = require('@kubernetes/client-node'); -const { Registry } = require('../dist/registry'); -const { NodeOperator, NodeResource } = require('../dist/node_operator'); -const { mockCache } = require('./watcher_stub'); -const Node = require('./node_stub'); - -const EVENT_PROPAGATION_DELAY = 10; -const NAME = 'node-name'; -const NAMESPACE = 'mayastor'; -const ENDPOINT = 'localhost:1234'; -const ENDPOINT2 = 'localhost:1235'; - -const fakeConfig = { - clusters: [ - { - name: 'cluster', - server: 'foo.company.com' - } - ], - contexts: [ - { - cluster: 'cluster', - user: 'user' - } - ], - users: [{ name: 'user' }] -}; - -function defaultMeta (name) { - return { - creationTimestamp: '2019-02-15T18:23:53Z', - generation: 1, - name: name, - namespace: NAMESPACE, - resourceVersion: '627981', - selfLink: `/apis/openebs.io/v1alpha1/namespaces/${NAMESPACE}/mayastornodes/${name}`, - uid: 'd99f06a9-314e-11e9-b086-589cfc0d76a7' - }; -} - -// Create k8s node resource object -function createK8sNodeResource (name, grpcEndpoint, status) { - const obj = { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: defaultMeta(name), - spec: { grpcEndpoint } - }; - if (status) { - obj.status = status; - } - return obj; -} - -// Create k8s node resource object -function createNodeResource (name, grpcEndpoint, status) { - return new NodeResource(createK8sNodeResource(name, grpcEndpoint, status)); -} - -// Create a pool operator object suitable for testing - with fake watcher -// and fake k8s api client. -function createNodeOperator (registry) { - const kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - return new NodeOperator(NAMESPACE, kc, registry); -} - -module.exports = function () { - describe('NodeResource constructor', () => { - it('should create valid node resource with status', () => { - const obj = createNodeResource(NAME, ENDPOINT, 'online'); - expect(obj.metadata.name).to.equal(NAME); - expect(obj.spec.grpcEndpoint).to.equal(ENDPOINT); - expect(obj.status).to.equal('online'); - }); - - it('should create valid node resource without status', () => { - const obj = createNodeResource(NAME, ENDPOINT); - expect(obj.metadata.name).to.equal(NAME); - expect(obj.spec.grpcEndpoint).to.equal(ENDPOINT); - expect(obj.status).to.equal('unknown'); - }); - - // empty endpoint means that the node has unregistered itself - it('should create node resource with empty grpc endpoint', () => { - const obj = createNodeResource(NAME, '', 'offline'); - expect(obj.metadata.name).to.equal(NAME); - expect(obj.spec.grpcEndpoint).to.equal(''); - expect(obj.status).to.equal('offline'); - }); - }); - - describe('init method', () => { - let kc, oper, fakeApiStub; - - beforeEach(() => { - const registry = new Registry({}); - kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - oper = new NodeOperator(NAMESPACE, kc, registry); - const makeApiStub = sinon.stub(kc, 'makeApiClient'); - const fakeApi = { - createCustomResourceDefinition: () => null - }; - fakeApiStub = sinon.stub(fakeApi, 'createCustomResourceDefinition'); - makeApiStub.returns(fakeApi); - }); - - afterEach(() => { - if (oper) { - oper.stop(); - oper = undefined; - } - }); - - it('should create CRD if it does not exist', async () => { - fakeApiStub.resolves(); - await oper.init(kc); - }); - - it('should ignore error if CRD already exists', async () => { - fakeApiStub.rejects({ - statusCode: 409 - }); - await oper.init(kc); - }); - - it('should throw if CRD creation fails', async () => { - fakeApiStub.rejects({ - statusCode: 404 - }); - try { - await oper.init(kc); - } catch (err) { - return; - } - throw new Error('Init did not fail'); - }); - }); - - describe('watcher events', () => { - let oper; // node operator - let stubs, registry, nodeResource; - - beforeEach(async () => { - registry = new Registry({}); - registry.Node = Node; - - oper = createNodeOperator(registry); - nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - }); - - afterEach(() => { - if (oper) { - oper.stop(); - oper = null; - } - }); - - it('should add node to registry upon "new" event', async () => { - const addNodeSpy = sinon.spy(registry, 'addNode'); - oper.watcher.emit('new', nodeResource); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.calledOnce(addNodeSpy); - sinon.assert.calledWith(addNodeSpy, NAME, ENDPOINT); - }); - - it('should not add node to registry if endpoint is empty', async () => { - const addNodeSpy = sinon.spy(registry, 'addNode'); - nodeResource.spec.grpcEndpoint = ''; - oper.watcher.emit('new', nodeResource); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.notCalled(addNodeSpy); - }); - - it('should remove node from registry upon "del" event', async () => { - // create registry with a node - const node = new Node(NAME); - node.connect(ENDPOINT); - registry.nodes[NAME] = node; - const removeNodeSpy = sinon.spy(registry, 'removeNode'); - - // trigger "del" event - oper.watcher.emit('del', nodeResource); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.calledWith(removeNodeSpy, NAME); - }); - - it('should not do anything upon "mod" event', async () => { - // create registry with a node - const node = new Node(NAME); - node.connect(ENDPOINT); - registry.nodes[NAME] = node; - const addNodeStub = sinon.stub(registry, 'addNode'); - addNodeStub.returns(); - const removeNodeStub = sinon.stub(registry, 'removeNode'); - removeNodeStub.returns(); - - // trigger "mod" event - oper.watcher.emit('mod', nodeResource); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.notCalled(removeNodeStub); - sinon.assert.notCalled(addNodeStub); - }); - }); - - describe('registry events', () => { - let registry, oper; - - beforeEach(async () => { - registry = new Registry({}); - registry.Node = Node; - oper = createNodeOperator(registry); - }); - - afterEach(() => { - if (oper) { - oper.stop(); - oper = null; - } - }); - - it('should create a resource upon "new" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.onFirstCall().returns(); - stubs.get.onSecondCall().returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.create); - expect(stubs.create.args[0][4].metadata.name).to.equal(NAME); - expect(stubs.create.args[0][4].spec.grpcEndpoint).to.equal(ENDPOINT); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not crash if POST fails upon "new" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.onFirstCall().returns(); - stubs.get.onSecondCall().returns(nodeResource); - stubs.create.rejects(new Error('post failed')); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - - it('should update the resource upon "new" node event if it exists', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'offline'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT2); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].metadata.name).to.equal(NAME); - expect(stubs.update.args[0][5].spec.grpcEndpoint).to.equal(ENDPOINT2); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].metadata.name).to.equal(NAME); - expect(stubs.updateStatus.args[0][5].status).to.equal('online'); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not update the resource upon "new" node event if it is the same', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - - it('should update the resource upon "mod" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT2); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].metadata.name).to.equal(NAME); - expect(stubs.update.args[0][5].spec.grpcEndpoint).to.equal(ENDPOINT2); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - - it('should update status of the resource upon "mod" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT); - await sleep(EVENT_PROPAGATION_DELAY); - const node = registry.getNode(NAME); - const isSyncedStub = sinon.stub(node, 'isSynced'); - isSyncedStub.returns(false); - node._offline(); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].metadata.name).to.equal(NAME); - expect(stubs.updateStatus.args[0][5].status).to.equal('offline'); - sinon.assert.notCalled(stubs.delete); - }); - - it('should update spec and status of the resource upon "mod" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT); - await sleep(EVENT_PROPAGATION_DELAY); - const node = registry.getNode(NAME); - const isSyncedStub = sinon.stub(node, 'isSynced'); - isSyncedStub.returns(false); - node.disconnect(); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].metadata.name).to.equal(NAME); - expect(stubs.update.args[0][5].spec.grpcEndpoint).to.equal(''); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].metadata.name).to.equal(NAME); - expect(stubs.updateStatus.args[0][5].status).to.equal('offline'); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not crash if PUT fails upon "mod" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - stubs.update.rejects(new Error('put failed')); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - registry.addNode(NAME, ENDPOINT2); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.calledTwice(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not create the resource upon "mod" node event', async () => { - let stubs; - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // secretly inject node to registry (watcher does not know) - const node = new Node(NAME); - node.connect(ENDPOINT); - registry.nodes[NAME] = node; - registry.addNode(NAME, ENDPOINT2); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - - it('should delete the resource upon "del" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // secretly inject node to registry (watcher does not know) - const node = new Node(NAME); - node.connect(ENDPOINT); - registry.nodes[NAME] = node; - registry.removeNode(NAME); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.calledOnce(stubs.delete); - }); - - it('should not crash if DELETE fails upon "del" node event', async () => { - let stubs; - const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(nodeResource); - stubs.delete.rejects(new Error('delete failed')); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // secretly inject node to registry (watcher does not know) - const node = new Node(NAME); - node.connect(ENDPOINT); - registry.nodes[NAME] = node; - registry.removeNode(NAME); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.calledOnce(stubs.delete); - }); - - it('should not crash if the resource does not exist upon "del" node event', async () => { - let stubs; - mockCache(oper.watcher, (arg) => { - stubs = arg; - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // secretly inject node to registry (watcher does not know) - const node = new Node(NAME); - node.connect(ENDPOINT); - registry.nodes[NAME] = node; - registry.removeNode(NAME); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(stubs.delete); - }); - }); -}; diff --git a/csi/moac/test/node_stub.js b/csi/moac/test/node_stub.js deleted file mode 100644 index 8e6f47ba6..000000000 --- a/csi/moac/test/node_stub.js +++ /dev/null @@ -1,55 +0,0 @@ -// Fake node object with altered connect/call/disconnect method to -// prevent any over the wire calls when testing. - -'use strict'; - -const { Node } = require('../dist/node'); - -// It can be used instead of real node object in tests of components that -// depend on the Node. -class NodeStub extends Node { - // Construct a node object. - // Compared to the real constructor it accepts additional "pools" arg, - // that is used to set pool list to initial value. - constructor (name, opts, pools, nexus) { - super(name, opts); - - if (pools) { - this.pools = pools.map((p) => { - p.node = this; - return p; - }); - } - if (nexus) { - this.nexus = nexus.map((n) => { - n.node = this; - return n; - }); - } - // keep existing behaviour and set the fake node to synced by default - this.syncFailed = 0; - } - - connect (endpoint) { - this.syncFailed = 0; - if (this.endpoint === endpoint) { - // nothing changed - return; - } else if (this.endpoint) { - this.emit('node', { - eventType: 'mod', - object: this - }); - } - this.endpoint = endpoint; - } - - disconnect () { - this.syncFailed = this.syncBadLimit + 1; - this.endpoint = null; - this.client = null; - this._offline(); - } -} - -module.exports = NodeStub; diff --git a/csi/moac/test/node_test.js b/csi/moac/test/node_test.js deleted file mode 100644 index a27a4c78f..000000000 --- a/csi/moac/test/node_test.js +++ /dev/null @@ -1,721 +0,0 @@ -// Unit tests for the node object - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; - -const { Node } = require('../dist/node'); -const { Nexus } = require('../dist/nexus'); -const { Pool } = require('../dist/pool'); -const { Replica } = require('../dist/replica'); -const { grpcCode } = require('../dist/grpc_client'); - -const { MayastorServer } = require('./mayastor_mock'); -const { shouldFailWith } = require('./utils'); -const enums = require('./grpc_enums'); - -const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; -const MS_ENDPOINT = '127.0.0.1:12345'; - -module.exports = function () { - let srv; - let node; - const pools = [ - { - name: 'pool', - disks: ['aio:///dev/sdb', 'aio:///dev/sdc'], - state: enums.POOL_ONLINE, - capacity: 100, - used: 14 - } - ]; - const replicas = [ - { - uuid: UUID, - pool: 'pool', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - } - ]; - const nexus = [ - { - uuid: UUID, - size: 10, - share: 0, // value of NEXUS_NBD for now. - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE' - } - ] - } - ]; - - it('should stringify a node object', () => { - const node = new Node('node-name'); - expect(node.toString()).to.equal('node-name'); - }); - - describe('node events', function () { - this.timeout(500); - - // start a fake mayastor server - before((done) => { - srv = new MayastorServer(MS_ENDPOINT, pools, replicas, nexus); - srv.start(done); - }); - - after(() => { - if (srv) srv.stop(); - srv = null; - }); - - describe('initial sync', () => { - after(() => { - // properly shut down the node object - if (node) { - node.removeAllListeners(); - node.disconnect(); - node = null; - } - }); - - it('should sync the state with storage node and emit event', (done) => { - // the first sync takes sometimes >20ms so don't set the interval too low - const syncInterval = 100; - const nodeEvents = []; - const poolObjects = []; - const replicaObjects = []; - const nexusObjects = []; - - node = new Node('node', { - syncPeriod: syncInterval, - syncRetry: syncInterval, - syncBadLimit: 0 - }); - - node.on('pool', (ev) => { - expect(ev.eventType).to.equal('new'); - poolObjects.push(ev.object); - }); - node.on('replica', (ev) => { - expect(ev.eventType).to.equal('new'); - replicaObjects.push(ev.object); - }); - node.on('nexus', (ev) => { - expect(ev.eventType).to.equal('new'); - nexusObjects.push(ev.object); - }); - node.on('node', (ev) => { - nodeEvents.push(ev); - }); - node.connect(MS_ENDPOINT); - - setTimeout(() => { - expect(node.isSynced()).to.be.true; - expect(nodeEvents).to.have.lengthOf(1); - expect(nodeEvents[0].eventType).to.equal('mod'); - expect(nodeEvents[0].object).to.equal(node); - - expect(poolObjects).to.have.lengthOf(1); - expect(poolObjects[0].name).to.equal('pool'); - expect(poolObjects[0].disks).to.have.lengthOf(2); - expect(poolObjects[0].disks[0]).to.equal('aio:///dev/sdb'); - expect(poolObjects[0].disks[1]).to.equal('aio:///dev/sdc'); - expect(poolObjects[0].state).to.equal('POOL_ONLINE'); - expect(poolObjects[0].capacity).to.equal(100); - expect(poolObjects[0].used).to.equal(14); - - expect(replicaObjects).to.have.lengthOf(1); - expect(replicaObjects[0].uuid).to.equal(UUID); - expect(replicaObjects[0].pool.name).to.equal('pool'); - expect(replicaObjects[0].size).to.equal(10); - expect(replicaObjects[0].share).to.equal('REPLICA_NONE'); - expect(replicaObjects[0].uri).to.equal(`bdev:///${UUID}?uuid=1`); - - expect(nexusObjects).to.have.lengthOf(1); - expect(nexusObjects[0].uuid).to.equal(UUID); - expect(nexusObjects[0].size).to.equal(10); - expect(nexusObjects[0].state).to.equal('NEXUS_ONLINE'); - expect(nexusObjects[0].children).to.have.lengthOf(1); - expect(nexusObjects[0].children[0].uri).to.equal(`bdev:///${UUID}?uuid=1`); - expect(nexusObjects[0].children[0].state).to.equal('CHILD_ONLINE'); - - done(); - }, syncInterval * 3); - }); - }); - - describe('new/mod/del events', () => { - const syncInterval = 10; - - before(() => { - // we make a deep copy of srv objects because the tests modify them - srv.pools = _.cloneDeep(pools); - srv.replicas = _.cloneDeep(replicas); - srv.nexus = _.cloneDeep(nexus); - }); - - // wait for the initial sync - beforeEach((done) => { - node = new Node('node', { - syncPeriod: syncInterval, - syncRetry: syncInterval, - syncBadLimit: 0 - }); - - node.once('node', (ev) => { - expect(ev.eventType).to.equal('mod'); - done(); - }); - node.connect(MS_ENDPOINT); - }); - - afterEach(() => { - if (node) { - node.removeAllListeners(); - node.disconnect(); - node = null; - } - srv.pools = _.cloneDeep(pools); - srv.replicas = _.cloneDeep(replicas); - srv.nexus = _.cloneDeep(nexus); - }); - - it('should emit event when a replica is changed', (done) => { - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.be.an.instanceof(Replica); - expect(ev.object.share).to.equal('REPLICA_NVMF'); - expect(ev.object.uri).to.equal('nvmf://blabla'); - done(); - }); - // modify replica property - const newReplicas = _.cloneDeep(replicas); - newReplicas[0].share = 'REPLICA_NVMF'; - newReplicas[0].uri = 'nvmf://blabla'; - srv.replicas = newReplicas; - }); - - it('should emit event when a replica is deleted', (done) => { - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.be.an.instanceof(Replica); - expect(ev.object.uuid).to.equal(UUID); - done(); - }); - // empty the replica list - srv.replicas = []; - }); - - it('should emit event when a replica is created', (done) => { - const newUuid = 'f04015e1-3689-4e34-9bed-e2dbba1e4a27'; - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.be.an.instanceof(Replica); - expect(ev.object.uuid).to.equal(newUuid); - done(); - }); - // add a new replica - srv.replicas.push({ - uuid: newUuid, - pool: 'pool', - size: 20, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${newUuid}?uuid=1234` - }); - }); - - it('should not emit event when a replica that does not belong to any pool is created', (done) => { - const newUuid = 'f04015e1-3689-4e34-9bed-e2dbba1e4a28'; - let emitted = false; - - node.once('replica', (ev) => { - emitted = true; - done(new Error('Event emitted')); - }); - setTimeout(() => { - if (!emitted) done(); - }, 2 * syncInterval); - // add a new replica - srv.replicas.push({ - uuid: newUuid, - pool: 'unknown-pool', - size: 20, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${newUuid}?uuid=1234` - }); - }); - - it('should emit event when a pool is changed', (done) => { - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.be.an.instanceof(Pool); - expect(ev.object.state).to.equal('POOL_DEGRADED'); - done(); - }); - // modify pool property - const newPools = _.cloneDeep(pools); - newPools[0].state = enums.POOL_DEGRADED; - srv.pools = newPools; - }); - - it('should emit event when a pool is deleted', (done) => { - let replicaRemoved = false; - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.be.an.instanceof(Replica); - expect(ev.object.uuid).to.equal(UUID); - replicaRemoved = true; - }); - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.be.an.instanceof(Pool); - expect(ev.object.name).to.equal('pool'); - expect(replicaRemoved).to.be.true; - done(); - }); - // empty the pool list - srv.pools = []; - }); - - it('should emit event when a pool with replica is created', (done) => { - const newUuid = 'f04015e1-3689-4e34-9bed-e2dbba1e4a29'; - let poolAdded = false; - - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.be.an.instanceof(Pool); - expect(ev.object.name).to.equal('new-pool'); - poolAdded = true; - }); - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.be.an.instanceof(Replica); - expect(ev.object.uuid).to.equal(newUuid); - expect(poolAdded).to.be.true; - done(); - }); - // add a new pool with a replica - srv.pools.push({ - name: 'new-pool', - disks: ['/dev/sda'], - state: enums.POOL_ONLINE, - capacity: 100, - used: 14 - }); - srv.replicas.push({ - uuid: newUuid, - pool: 'new-pool', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${newUuid}?uuid=1234` - }); - }); - - it('should emit event when a nexus is changed', (done) => { - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.be.an.instanceof(Nexus); - expect(ev.object.uuid).to.equal(UUID); - expect(ev.object.children).to.have.lengthOf(2); - done(); - }); - // modify nexus property - const newNexus = _.cloneDeep(nexus); - newNexus[0].children = [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE' - }, - { - uri: 'nvmf:///something', - state: 'CHILD_ONLINE' - } - ]; - srv.nexus = newNexus; - }); - - it('should emit event when a nexus is deleted', (done) => { - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.be.an.instanceof(Nexus); - expect(ev.object.uuid).to.equal(UUID); - done(); - }); - // empty the nexus list - srv.nexus = []; - }); - - it('should emit event when a nexus is created', (done) => { - const newUuid = 'f04015e1-3689-4e34-9bed-e2dbba1e4a27'; - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.be.an.instanceof(Nexus); - expect(ev.object.uuid).to.equal(newUuid); - done(); - }); - // add a new nexus - srv.nexus.push({ - uuid: newUuid, - size: 10, - state: 'NEXUS_ONLINE', - children: [] - }); - }); - }); - }); - - describe('sync failures', () => { - // start a fake mayastor server - beforeEach((done) => { - srv = new MayastorServer(MS_ENDPOINT, pools, replicas, nexus); - srv.start(done); - }); - - afterEach(() => { - if (node) { - node.removeAllListeners(); - node.disconnect(); - node = null; - } - if (srv) srv.stop(); - srv = null; - }); - - it('should emit event for all objects when the node goes out of sync', (done) => { - const syncInterval = 100; - let offlineCount = 0; - - node = new Node('node', { - syncPeriod: syncInterval, - syncRetry: syncInterval, - syncBadLimit: 0 - }); - - node.once('node', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.equal(node); - const firstSync = Date.now(); - srv.stop(); - srv = null; - - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object.name).to.equal('pool'); - expect(ev.object.state).to.equal('POOL_OFFLINE'); - offline(); - }); - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object.uuid).to.equal(UUID); - expect(ev.object.isOffline()).to.be.true; - offline(); - }); - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object.uuid).to.equal(UUID); - expect(ev.object.state).to.equal('NEXUS_OFFLINE'); - offline(); - }); - - function offline () { - if (++offlineCount === 3) { - expect(node.isSynced()).to.be.false; - expect(Date.now() - firstSync).to.be.below(syncInterval * 1.5); - done(); - } - } - }); - node.connect(MS_ENDPOINT); - }); - - it('should tollerate n sync failures when configured so', (done) => { - const syncPeriod = 200; - const syncRetry = 40; - - node = new Node('node', { - syncPeriod: syncPeriod, - syncRetry: syncRetry, - syncBadLimit: 2 - }); - - node.once('node', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.equal(node); - const firstSync = Date.now(); - srv.stop(); - srv = null; - - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object.name).to.equal('pool'); - expect(ev.object.state).to.equal('POOL_OFFLINE'); - expect(node.isSynced()).to.be.false; - expect(Date.now() - firstSync).to.be.above( - syncPeriod + syncRetry * 2 - 1 - ); - expect(Date.now() - firstSync).to.be.below( - syncPeriod + syncRetry * 4 + 1 - ); - done(); - }); - }); - node.connect(MS_ENDPOINT); - }); - - it('should emit event when the node is synced after being disconnected', (done) => { - const syncPeriod = 20; - - node = new Node('node', { - syncPeriod: syncPeriod, - syncRetry: syncPeriod, - syncBadLimit: 0 - }); - - node.once('node', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.equal(node); - expect(node.isSynced()).to.be.true; - - srv.stop(); - srv = null; - - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object.name).to.equal('pool'); - expect(ev.object.state).to.equal('POOL_OFFLINE'); - expect(node.isSynced()).to.be.false; - - srv = new MayastorServer( - MS_ENDPOINT, - pools, - replicas, - nexus - ); - srv.start((err) => { - if (err) return done(err); - - // pool/replica/nexus event should be emitted before node event and - // node should be online when emitting those events. - let poolEvent; - node.once('pool', (ev) => { - expect(node.isSynced()).to.be.true; - poolEvent = ev; - }); - node.once('node', (ev) => { - expect(poolEvent).not.to.be.undefined; - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.equal(node); - expect(node.isSynced()).to.be.true; - done(); - }); - }); - }); - }); - node.connect(MS_ENDPOINT); - }); - }); - - describe('object create', function () { - const DELAY_MS = 100; - let replica; - let pool; - let nexus; - - this.timeout(500); - - // start a fake mayastor server - before((done) => { - srv = new MayastorServer(MS_ENDPOINT, [], [], [], DELAY_MS); - srv.start((err) => { - if (err) return done(err); - // wait for the initial sync - node = new Node('node'); - node.once('node', (ev) => { - expect(ev.eventType).to.equal('mod'); - done(); - }); - node.connect(MS_ENDPOINT); - }); - }); - - after(() => { - if (node) { - node.removeAllListeners(); - node.disconnect(); - node = null; - } - if (srv) srv.stop(); - srv = null; - }); - - it('should create a pool on the node', async () => { - let emitted = false; - - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object.name).to.equal('pool'); - expect(ev.object.disks).to.have.lengthOf(1); - expect(ev.object.disks[0]).to.equal('aio:///dev/sda'); - expect(node.pools).to.have.lengthOf(1); - emitted = true; - }); - - pool = await node.createPool('pool', ['/dev/sda']); - expect(pool).to.be.an.instanceof(Pool); - expect(emitted).to.be.true; - }); - - it('should create a replica on the pool', async () => { - let emitted = false; - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object.uuid).to.equal(UUID); - expect(ev.object.size).to.equal(100); - expect(pool.replicas).to.have.lengthOf(1); - emitted = true; - }); - replica = await pool.createReplica(UUID, 100); - expect(replica).to.be.an.instanceof(Replica); - expect(emitted).to.be.true; - }); - - it('should create a nexus on the node', async () => { - let emitted = false; - - node.once('nexus', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object.uuid).to.equal(UUID); - expect(ev.object.size).to.equal(100); - expect(ev.object.children).to.have.lengthOf(1); - expect(ev.object.children[0].uri).to.match(/^bdev:\/\/\//); - expect(ev.object.children[0].state).to.equal('CHILD_ONLINE'); - expect(node.nexus).to.have.lengthOf(1); - emitted = true; - }); - - nexus = await node.createNexus(UUID, 100, [replica]); - expect(nexus).to.be.an.instanceof(Nexus); - expect(emitted).to.be.true; - }); - - it('should timeout on a call that takes too long', async () => { - const UUID2 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb2'; - await shouldFailWith( - grpcCode.DEADLINE_EXCEEDED, - () => node.call( - 'createNexus', - { - uuid: UUID2, - size: 100, - children: [replica.uri] - }, - DELAY_MS / 2 - ) - ); - }); - }); - - describe('object list', function () { - const UUID1 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb1'; - const UUID2 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb2'; - const UUID3 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb3'; - const UUID4 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb4'; - - // start a fake mayastor server - before((done) => { - const pools = [ - { - name: 'pool1', - disks: ['/dev/sdb', '/dev/sdc'], - state: enums.POOL_ONLINE, - capacity: 100, - used: 14 - }, - { - name: 'pool2', - disks: ['/dev/sda'], - state: enums.POOL_ONLINE, - capacity: 100, - used: 14 - } - ]; - const replicas = [ - { - uuid: UUID1, - pool: 'pool1', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID1}?uuid=1` - }, - { - uuid: UUID2, - pool: 'pool1', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID2}?uuid=2` - }, - { - uuid: UUID3, - pool: 'pool2', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID3}?uuid=3` - }, - // this replica does not belong to any pool so should be ignored - { - uuid: UUID4, - pool: 'unknown-pool', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID4}?uuid=4` - } - ]; - srv = new MayastorServer(MS_ENDPOINT, pools, replicas, []); - srv.start((err) => { - if (err) return done(err); - // wait for the initial sync - node = new Node('node'); - node.once('node', (ev) => { - expect(ev.eventType).to.equal('mod'); - done(); - }); - node.connect(MS_ENDPOINT); - }); - }); - - after(() => { - if (node) { - node.removeAllListeners(); - node.disconnect(); - node = null; - } - if (srv) srv.stop(); - srv = null; - }); - - it('should get a list of replicas on the node', () => { - const replicas = node.getReplicas(); - expect(replicas).to.have.lengthOf(3); - expect(replicas[0].uuid).to.equal(UUID1); - expect(replicas[1].uuid).to.equal(UUID2); - expect(replicas[2].uuid).to.equal(UUID3); - }); - }); -}; diff --git a/csi/moac/test/persistence_test.ts b/csi/moac/test/persistence_test.ts deleted file mode 100644 index b14c016df..000000000 --- a/csi/moac/test/persistence_test.ts +++ /dev/null @@ -1,307 +0,0 @@ -// Unit tests for the persistent store and its etcd client - -import { Etcd3, IOptions, isRecoverableError } from 'etcd3'; -import { defaults } from 'lodash'; -import { Done } from 'mocha'; -import { spawn, ChildProcessWithoutNullStreams } from 'child_process'; -import { expect } from 'chai'; -import { Replica } from '../src/replica'; -import { Policy, ConsecutiveBreaker } from 'cockatiel'; -import * as sinon from 'ts-sinon'; -import { PersistentStore, NexusInfo, ChildInfo } from '../src/persistent_store'; - -const fs = require('fs'); - -const ETCD_STORE = "/tmp/moac-etcd-test"; -const ETCD_PORT = '2379'; -const ETCD_HOST = '127.0.0.1'; -const ETCD_EP = `${ETCD_HOST}:${ETCD_PORT}`; - -let etcdProc: ChildProcessWithoutNullStreams | null; -// Starts etcd server and calls the callback when the server is up and ready. -function startEtcd (done: Done) { - if (etcdProc != null) { - done(); - return; - } - fs.rm(ETCD_STORE, { recursive: true }, (err: NodeJS.ErrnoException) => { - if (err && err.code !== 'ENOENT') return done(err); - - etcdProc = spawn('etcd', ['--data-dir', ETCD_STORE]); - let doneCalled = false; - let stderr = ''; - - etcdProc?.stderr.on('data', (data: any) => { - stderr += data.toString(); - if (data.toString().match(/ready to serve client requests/)) { - doneCalled = true; - done(); - } - }); - - etcdProc?.once('close', (code: any) => { - etcdProc = null; - if (!doneCalled) { - if (code) { - done(new Error(`etcd server exited with code ${code}: ${stderr}`)); - } else { - done(new Error('etcd server exited prematurely')); - } - return; - } - if (code) { - console.log(`etcd server exited with code ${code}: ${stderr}`); - } - }); - }); -} - -// Kill etcd server. Though it does not wait for it to exit! -async function stopEtcd () { - etcdProc?.kill(); - await fs.promises.rm(ETCD_STORE, { recursive: true }); -} - -module.exports = function () { - // adds all possible combinations of child info to the NexusInfo - // currently only healthy or otherwise - function addChildrenCombinations(nexusInfo: NexusInfo) { - const healthyChild = new ChildInfo({ - uuid: nexusInfo.children.length.toString(), - healthy: true, - }); - nexusInfo.children.push(healthyChild); - const unhealthyChild = new ChildInfo({ - uuid: nexusInfo.children.length.toString(), - healthy: false, - }); - nexusInfo.children.push(unhealthyChild); - } - - // returns a NexusInfo with the given clean_shutdown flag and twice all possible children combinations - function getNexusInfo(cleanShutdown: boolean): NexusInfo { - let nexusInfo = new NexusInfo({ - clean_shutdown: cleanShutdown, - children: [] - }); - - // add 2 of each - addChildrenCombinations(nexusInfo); - addChildrenCombinations(nexusInfo); - - return nexusInfo; - } - - describe('with real etcd server', () => { - let client = new Etcd3(getOptions()); - - function getOptions(): IOptions { - return { - hosts: ETCD_EP, - faultHandling: { - host: () => - // make sure the circuit breaker does not kick in right away for most tests - Policy.handleWhen(isRecoverableError).circuitBreaker(1_000, new ConsecutiveBreaker(10)), - global: Policy.handleWhen(isRecoverableError).retry().attempts(3), - }, - ...defaults - }; - } - - beforeEach((done) => { - startEtcd(async (err: any) => { - if (err) return done(err); - // clear up etcd - await client.delete().all(); - done(); - }); - }); - - after(stopEtcd); - - it('should read NexusInfo from the persistent store', async () => { - let uuid = "1"; - - let nexusNotThere = await client.get(uuid); - expect(nexusNotThere).to.be.null; - - // now put it there - // todo: use number format for the enums - await client.put(uuid).value(JSON.stringify(getNexusInfo(true))); - // and read it back - let nexus = await client.get(uuid).json() as NexusInfo; - - expect(nexus).not.to.be.null; - // inner values should match up - expect(nexus.children.values).equals(getNexusInfo(true).children.values); - }); - - it('should throw if etcd is not reachable', async () => { - const persistentStore = new PersistentStore([], 1000, () => client); - - await stopEtcd(); - let hasThrown = false; - try { - await persistentStore.filterReplicas("1", []); - } catch (error: any) { - hasThrown = true; - } - expect(hasThrown).to.be.true; - - // start etcd again - await new Promise((resolve: (res: void) => void) => { - startEtcd(() => { - resolve(); - }); - }); - - hasThrown = false; - try { - await persistentStore.filterReplicas("1", []); - } catch (error: any) { - console.log(`Caught unexpected exception, error: ${error}`); - hasThrown = true; - } - expect(hasThrown).to.be.false; - }); - - it('should delete NexusInfo from the persistent store', async () => { - let uuid = "1"; - - let nexusNotThere = await client.get(uuid); - expect(nexusNotThere).to.be.null; - - // now put it there - await client.put(uuid).value(JSON.stringify(getNexusInfo(true))); - // and read it back - let nexus = await client.get(uuid).json() as NexusInfo; - expect(nexus).not.to.be.null; - - const persistentStore = new PersistentStore([], 1000, () => client); - await persistentStore.destroyNexus(uuid); - - nexusNotThere = await client.get(uuid); - expect(nexusNotThere).to.be.null; - }); - }); - - describe('with mock etcd client', () => { - const client = new Etcd3(); - const persistentStore = new PersistentStore([], 1000, () => client); - - it('should mock the persistent store', async () => { - // hint: remove 'as any' cast to figure out which calls exec - const mock = client.mock({ exec: sinon.default.stub() as any}); - mock.exec.callsFake((_serviceName:any, method:string, payload:any) => { - if (method === 'range' && payload.key == 'foo') { - return { - kvs: [{ value: 'bar' }] - }; - } else - return { - kvs: [{ value: 'bar_not_foo' }] - }; - }); - let output = await client.get('foo'); - expect(output).to.equal('bar'); - output = await client.get('foos'); - expect(output).to.equal('bar_not_foo'); - client.unmock(); - }); - - it('should throw if the persistent store has invalid data', async () => { - const mock = client.mock({ exec: sinon.default.stub() as any}); - let replicas = [new Replica({ uri: 'bdev:///1?uuid=1' }), new Replica({ uri: 'bdev:///1?uuid=2' })]; - - // not a valid json - mock.exec.resolves({ kvs: [{ value: 'not json' }] }); - let hasThrown = false; - try { - await persistentStore.filterReplicas("1", replicas); - } catch (error: any) { - hasThrown = true; - } - expect(hasThrown).to.be.true; - - // valid json but not in the right format - mock.exec.resolves({ kvs: [{ value: '{ "clean_shutdowns": true, "children": [] }' }] }); - hasThrown = false; - try { - await persistentStore.filterReplicas("1", replicas); - } catch (error: any) { - hasThrown = true; - } - expect(hasThrown).to.be.true; - mock.exec.resolves({ kvs: [{ value: '{ "clean_shutdown": true, "childrens": [] }' }] }); - hasThrown = false; - try { - await persistentStore.filterReplicas("1", replicas); - } catch (error: any) { - hasThrown = true; - } - expect(hasThrown).to.be.true; - - // valid json and in the right format, so we should not throw now - mock.exec.resolves({ kvs: [{ value: '{ "clean_shutdown": true, "children": [] }' }] }); - await persistentStore.filterReplicas("1", replicas); - }); - - it('should not filter out replicas on the first nexus creation', async () => { - const mock = client.mock({ exec: sinon.default.stub() as any}); - mock.exec.resolves({ kvs: [] }); - let replicas = [new Replica({ uri: 'bdev:///1?uuid=1' }), new Replica({ uri: 'bdev:///1?uuid=2' })]; - let replicas_filtered = await persistentStore.filterReplicas("1", replicas); - expect(replicas_filtered).equals(replicas); - }); - - it('should return a single healthy child on an unclean shutdown of the nexus', async () => { - const mock = client.mock({ exec: sinon.default.stub() as any}); - let replicas = [new Replica({ uri: 'bdev:///1?uuid=1' }), new Replica({ uri: 'bdev:///1?uuid=2' })]; - - // no children at all in the nexus, which is strange, but nonetheless, means we cannot create the nexus - mock.exec.resolves({ kvs: [{ value: '{ "clean_shutdown": false, "children": [] }' }] }); - let replicasFiltered = await persistentStore.filterReplicas("1", replicas); - expect(replicasFiltered.length).equals(0); - - let nexus = getNexusInfo(false); - mock.exec.resolves({ kvs: [{ value: JSON.stringify(nexus) }] }); - let openChildren = nexus.children.filter((c) => { - return c.healthy === true; - }); - - replicas = openChildren.map((c) => { - return new Replica({ uri: `bdev:///1?uuid=${c.uuid}` }); - }); - expect(replicas.length).greaterThan(1); - - replicasFiltered = await persistentStore.filterReplicas("1", replicas); - expect(replicasFiltered.length).equals(1); - let child = openChildren.find((c) => replicasFiltered[0].realUuid === c.uuid); - expect(child).not.to.be.undefined; - expect(child?.healthy).to.be.true; - }); - - it('should return only healthy children on a clean shutdown of the nexus', async () => { - const mock = client.mock({ exec: sinon.default.stub() as any}); - - let nexus = getNexusInfo(true); - mock.exec.resolves({ kvs: [{ value: JSON.stringify(nexus) }] }); - let openChildren = nexus.children.filter((c) => { - return c.healthy === true; - }); - let replicas = openChildren.map((c) => { - return new Replica({ uri: `bdev:///1?uuid=${c.uuid}` }); - }); - expect(replicas.length).greaterThan(1); - - let replicasFiltered = await persistentStore.filterReplicas("1", replicas); - expect(replicasFiltered.length).equals(replicas.length); - - replicasFiltered.forEach((r) => { - let child = openChildren.find((c) => c.uuid === r.realUuid); - expect(child).not.to.be.undefined; - expect(child?.healthy).to.be.true; - }); - }); - }); -}; diff --git a/csi/moac/test/pool_operator_test.js b/csi/moac/test/pool_operator_test.js deleted file mode 100644 index a7eda6d00..000000000 --- a/csi/moac/test/pool_operator_test.js +++ /dev/null @@ -1,1454 +0,0 @@ -// Unit tests for the pool operator -// -// Pool operator depends on a couple of modules: -// * registry (real) -// * node object (fake) -// * pool object (fake) -// * watcher (mocked) -// -// As you can see most of them must be fake in order to do detailed testing -// of pool operator. That makes the code more complicated and less readable. - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const expect = require('chai').expect; -const sinon = require('sinon'); -const sleep = require('sleep-promise'); -const { KubeConfig } = require('@kubernetes/client-node'); -const { Registry } = require('../dist/registry'); -const { GrpcError, grpcCode } = require('../dist/grpc_client'); -const { PoolOperator, PoolResource } = require('../dist/pool_operator'); -const { Pool } = require('../dist/pool'); -const { Replica } = require('../dist/replica'); -const { mockCache } = require('./watcher_stub'); -const Node = require('./node_stub'); - -const NAMESPACE = 'mayastor'; -const EVENT_PROPAGATION_DELAY = 10; - -const fakeConfig = { - clusters: [ - { - name: 'cluster', - server: 'foo.company.com' - } - ], - contexts: [ - { - cluster: 'cluster', - user: 'user' - } - ], - users: [{ name: 'user' }] -}; - -// Create k8s pool resource object -function createK8sPoolResource ( - name, - node, - disks, - finalizers, - state, - reason, - capacity, - used -) { - const obj = { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorPool', - metadata: { - creationTimestamp: '2019-02-15T18:23:53Z', - generation: 1, - name: name, - namespace: NAMESPACE, - finalizers: finalizers, - resourceVersion: '627981', - selfLink: `/apis/openebs.io/v1alpha1/namespaces/${NAMESPACE}/mayastorpools/${name}`, - uid: 'd99f06a9-314e-11e9-b086-589cfc0d76a7' - }, - spec: { - node: node, - disks: disks - } - }; - if (state) { - const status = { state }; - status.disks = disks.map((d) => `aio://${d}`); - if (reason != null) status.reason = reason; - if (capacity != null) status.capacity = capacity; - if (used != null) status.used = used; - if (state != null) { - status.spec = { - node: node, - disks: disks - }; - } - obj.status = status; - } - return obj; -} - -function createPoolResource ( - name, - node, - disks, - finalizers, - state, - reason, - capacity, - used, - statusSpec -) { - return new PoolResource(createK8sPoolResource( - name, - node, - disks, - finalizers, - state, - reason, - capacity, - used, - statusSpec - )); -} - -// Create a pool operator object suitable for testing - with mocked watcher etc. -function createPoolOperator (nodes) { - const registry = new Registry({}); - registry.Node = Node; - nodes = nodes || []; - nodes.forEach((n) => (registry.nodes[n.name] = n)); - const kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - return new PoolOperator(NAMESPACE, kc, registry); -} - -module.exports = function () { - describe('PoolResource constructor', () => { - it('should create valid mayastor pool with status', () => { - const obj = createPoolResource( - 'pool', - 'node', - ['/dev/sdc', '/dev/sdb'], - ['some.finalizer.com'], - 'offline', - 'The node is down' - ); - expect(obj.metadata.name).to.equal('pool'); - expect(obj.spec.node).to.equal('node'); - // the filter should sort the disks - expect(JSON.stringify(obj.spec.disks)).to.equal( - JSON.stringify(['/dev/sdb', '/dev/sdc']) - ); - expect(obj.status.state).to.equal('offline'); - expect(obj.status.reason).to.equal('The node is down'); - expect(obj.status.disks).to.deep.equal(['aio:///dev/sdc', 'aio:///dev/sdb']); - expect(obj.status.capacity).to.be.undefined; - expect(obj.status.used).to.be.undefined; - }); - - it('should create valid mayastor pool without status', () => { - const obj = createPoolResource('pool', 'node', ['/dev/sdc', '/dev/sdb']); - expect(obj.metadata.name).to.equal('pool'); - expect(obj.spec.node).to.equal('node'); - expect(obj.status.state).to.equal('unknown'); - }); - - it('should not create mayastor pool without node specification', () => { - expect(() => createPoolResource( - 'pool', - undefined, - ['/dev/sdc', '/dev/sdb'] - )).to.throw(); - }); - }); - - describe('init method', () => { - let kc, oper, fakeApiStub; - - beforeEach(() => { - const registry = new Registry({}); - kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - oper = new PoolOperator(NAMESPACE, kc, registry); - const makeApiStub = sinon.stub(kc, 'makeApiClient'); - const fakeApi = { - createCustomResourceDefinition: () => null - }; - fakeApiStub = sinon.stub(fakeApi, 'createCustomResourceDefinition'); - makeApiStub.returns(fakeApi); - }); - - afterEach(() => { - if (oper) { - oper.stop(); - oper = undefined; - } - }); - - it('should create CRD if it does not exist', async () => { - fakeApiStub.resolves(); - await oper.init(kc); - }); - - it('should ignore error if CRD already exists', async () => { - fakeApiStub.rejects({ - statusCode: 409 - }); - await oper.init(kc); - }); - - it('should throw if CRD creation fails', async () => { - fakeApiStub.rejects({ - statusCode: 404 - }); - try { - await oper.init(kc); - } catch (err) { - return; - } - throw new Error('Init did not fail'); - }); - }); - - describe('watcher events', () => { - let oper; // pool operator - - afterEach(() => { - if (oper) { - oper.stop(); - oper = null; - } - }); - - describe('new event', () => { - it('should process resources that existed before the operator was started', async () => { - let stubs; - oper = createPoolOperator([]); - const poolResource1 = createPoolResource('pool', 'node', ['/dev/sdb']); - const poolResource2 = createPoolResource('pool', 'node', ['/dev/sdb']); - poolResource2.status.spec = { node: 'node', disks: ['/dev/sdb'] }; - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.onCall(0).returns(poolResource1); - stubs.get.onCall(1).returns(poolResource2); - stubs.list.onCall(0).returns([poolResource1]); - stubs.list.onCall(1).returns([poolResource2]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - // twice because we update the status to match the spec - sinon.assert.calledTwice(stubs.updateStatus); - expect(stubs.updateStatus.args[1][5].metadata.name).to.equal('pool'); - expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ - state: 'pending', - reason: 'mayastor does not run on node "node"', - disks: undefined, - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - }); - - it('should set "state" to PENDING when creating a pool', async () => { - let stubs; - const node = new Node('node'); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.resolves( - new Pool({ - name: 'pool', - node: node, - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }) - ); - oper = createPoolOperator([node]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].metadata.name).to.equal('pool'); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'pending', - reason: 'Creating the pool', - disks: undefined, - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - }); - - it('should not try to create a pool if the node has not been synced', async () => { - let stubs; - const node = new Node('node'); - sinon.stub(node, 'isSynced').returns(false); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.resolves( - new Pool({ - name: 'pool', - node: node, - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }) - ); - oper = createPoolOperator([node]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(createPoolStub); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - }); - - it('should not try to create a pool when pool with the same name already exists', async () => { - let stubs; - const node = new Node('node', {}, []); - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.resolves(pool); - - oper = createPoolOperator([node]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb', '/dev/sdc']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // this creates the inconsistency between real and k8s state which we are testing - node.pools.push(pool); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(createPoolStub); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'degraded', - reason: '', - disks: ['aio:///dev/sdb'], - capacity: 100, - used: 10, - spec: { node: 'node', disks: ['/dev/sdb', '/dev/sdc'] } - }); - }); - - // important test as moving the pool between nodes would destroy data - it('should leave the pool untouched when pool exists and is on a different node', async () => { - let stubs; - const node1 = new Node('node1', {}, []); - const node2 = new Node('node2'); - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const createPoolStub1 = sinon.stub(node1, 'createPool'); - const createPoolStub2 = sinon.stub(node2, 'createPool'); - createPoolStub1.resolves(pool); - createPoolStub2.resolves(pool); - - oper = createPoolOperator([node1, node2]); - const poolResource = createPoolResource('pool', 'node2', ['/dev/sdb', '/dev/sdc']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // we assign the pool to node1 but later in the event it will be on node2 - node1.pools.push(pool); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(createPoolStub1); - sinon.assert.notCalled(createPoolStub2); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'degraded', - reason: '', - disks: ['aio:///dev/sdb'], - capacity: 100, - used: 10, - spec: { node: 'node2', disks: ['/dev/sdb', '/dev/sdc'] } - }); - }); - - it('should set "reason" to error message when create pool fails', async () => { - let stubs; - const node = new Node('node'); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'create failed') - ); - oper = createPoolOperator([node]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledTwice(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'pending', - reason: 'Creating the pool', - disks: undefined, - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ - state: 'error', - reason: 'Error: create failed', - disks: undefined, - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - }); - - it('should ignore failure to update the resource state', async () => { - let stubs; - const node = new Node('node'); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'create failed') - ); - oper = createPoolOperator([node]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.updateStatus.resolves(new Error('http put error')); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledTwice(stubs.updateStatus); - }); - - it('should not create a pool if node does not exist', async () => { - let stubs; - oper = createPoolOperator([]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "new" event - oper.watcher.emit('new', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'pending', - reason: 'mayastor does not run on node "node"', - disks: undefined, - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - }); - - it('should create a pool once the node arrives and is synced', async () => { - let stubs; - oper = createPoolOperator([]); - const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); - const poolResource2 = createPoolResource('pool', 'node', ['/dev/sdb']); - poolResource2.status.spec = { node: 'node', disks: ['/dev/sdb'] }; - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.list.returns([poolResource]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledTwice(stubs.updateStatus); - expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ - state: 'pending', - reason: 'mayastor does not run on node "node"', - disks: undefined, - spec: undefined - }); - - const node = new Node('node'); - const syncedStub = sinon.stub(node, 'isSynced'); - syncedStub.returns(false); - oper.registry._registerNode(node); - oper.registry.emit('node', { - eventType: 'mod', - object: node - }); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // node is not yet synced - sinon.assert.callCount(stubs.updateStatus, 4); - expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ - state: 'pending', - reason: 'mayastor does not run on node "node"', - disks: undefined, - spec: undefined - }); - expect(stubs.updateStatus.args[3][5].status).to.deep.equal({ - state: 'pending', - reason: 'mayastor on node "node" is offline', - disks: undefined, - spec: undefined - }); - - syncedStub.returns(true); - oper.registry.emit('node', { - eventType: 'mod', - object: node - }); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // tried to create the pool but the node is a fake - sinon.assert.callCount(stubs.updateStatus, 7); - expect(stubs.updateStatus.args[5][5].status).to.deep.equal({ - state: 'pending', - reason: 'Creating the pool', - disks: undefined, - spec: undefined - }); - expect(stubs.updateStatus.args[6][5].status).to.deep.equal({ - state: 'error', - reason: 'Error: Broken connection to mayastor on node "node"', - disks: undefined, - spec: undefined - }); - }); - }); - - describe('del event', () => { - it('should destroy a pool', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const destroyStub = sinon.stub(pool, 'destroy'); - destroyStub.resolves(); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'degraded', - '', - 100, - 10, - { disks: ['/dev/sdb'], node: 'node' } - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "del" event - oper.watcher.emit('del', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.calledOnce(destroyStub); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not fail if pool does not exist', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const destroyStub = sinon.stub(pool, 'destroy'); - destroyStub.resolves(); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'offline', - '' - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // we create the inconsistency between k8s and real state - node.pools = []; - // trigger "del" event - oper.watcher.emit('del', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.calledOnce(stubs.updateStatus); - sinon.assert.notCalled(destroyStub); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - }); - - it('should destroy the pool even if it is on a different node', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const destroyStub = sinon.stub(pool, 'destroy'); - destroyStub.resolves(); - const node1 = new Node('node1', {}, []); - const node2 = new Node('node2', {}, [pool]); - oper = createPoolOperator([node1, node2]); - const poolResource = createPoolResource( - 'pool', - 'node1', - ['/dev/sdb'], - [], - 'degraded', - '', - 100, - 10 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "del" event - oper.watcher.emit('del', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.calledOnce(destroyStub); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not crash if the destroy fails', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const destroyStub = sinon.stub(pool, 'destroy'); - destroyStub.rejects(new GrpcError(grpcCode.INTERNAL, 'destroy failed')); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'degraded', - '', - 100, - 10 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "del" event - oper.watcher.emit('del', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.calledOnce(destroyStub); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - }); - }); - - describe('mod event', () => { - it('should not do anything if pool object has not changed', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb', 'aio:///dev/sdc'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb', '/dev/sdc'], - [], - 'degraded', - '' - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "mod" event - oper.watcher.emit('mod', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.calledOnce(stubs.updateStatus); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - }); - - it('should not do anything if disks change', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdc'], - [], - 'degraded', - '' - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "mod" event - oper.watcher.emit('mod', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.calledOnce(stubs.updateStatus); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - // the real state - expect(node.pools[0].disks[0]).to.equal('aio:///dev/sdb'); - }); - - it('should not do anything if node changes', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const node1 = new Node('node1', {}, [pool]); - const node2 = new Node('node2', {}, []); - oper = createPoolOperator([node1, node2]); - const poolResource = createPoolResource( - 'pool', - 'node2', - ['/dev/sdb'], - [], - 'degraded', - '' - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - // trigger "mod" event - oper.watcher.emit('mod', poolResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - // called in response to registry new event - sinon.assert.calledOnce(stubs.updateStatus); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - }); - }); - }); - - describe('node events', () => { - let oper; // pool operator - - afterEach(async () => { - if (oper) { - await oper.stop(); - oper = null; - } - }); - - it('should create pool upon node sync event if it does not exist', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const node = new Node('node', {}, []); - const createPoolStub = sinon.stub(node, 'createPool'); - const isSyncedStub = sinon.stub(node, 'isSynced'); - createPoolStub.resolves(pool); - isSyncedStub.onCall(0).returns(false); - isSyncedStub.onCall(1).returns(true); - oper = createPoolOperator([node]); - const poolResource1 = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'degraded', - '' - ); - const poolResource2 = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'pending', - 'mayastor on node "node" is offline' - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.onCall(0).returns(poolResource1); - stubs.get.onCall(1).returns(poolResource2); - stubs.list.returns([poolResource1]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - oper.registry.emit('node', { - eventType: 'sync', - object: node - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledTwice(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'pending', - reason: 'mayastor on node "node" is offline', - disks: ['aio:///dev/sdb'], - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ - state: 'pending', - reason: 'Creating the pool', - disks: ['aio:///dev/sdb'], - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - sinon.assert.calledOnce(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['aio:///dev/sdb']); - }); - - it('should add finalizer for new pool resource', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - // replica will trigger finalizer - const replica1 = new Replica({ uuid: 'UUID1' }); - const replica2 = new Replica({ uuid: 'UUID2' }); - replica1.pool = pool; - pool.replicas = [replica1]; - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - - const poolResource = createK8sPoolResource( - 'pool', - 'node1', - ['/dev/sdb'], - [], - 'online', - '', - 100, - 4 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.update.resolves(); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].metadata.finalizers).to.deep.equal([ - 'finalizer.mayastor.openebs.io' - ]); - - // add a second replica - should not change anything - pool.replicas.push(replica2); - oper.registry.emit('replica', { - eventType: 'new', - object: replica2 - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.update); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should remove finalizer when last replica is removed', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - const replica1 = new Replica({ uuid: 'UUID1' }); - const replica2 = new Replica({ uuid: 'UUID2' }); - pool.replicas = [replica1, replica2]; - replica1.pool = pool; - replica2.pool = pool; - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - - const poolResource = createK8sPoolResource( - 'pool', - 'node1', - ['/dev/sdb'], - ['finalizer.mayastor.openebs.io'], - 'online', - '', - 100, - 4 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.update.resolves(); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.update); - pool.replicas.splice(1, 1); - oper.registry.emit('replica', { - eventType: 'del', - object: replica2 - }); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.notCalled(stubs.update); - pool.replicas = []; - oper.registry.emit('replica', { - eventType: 'del', - object: replica1 - }); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].metadata.finalizers).to.have.lengthOf(0); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should not create pool upon node sync event if it exists', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const node = new Node('node', {}, [pool]); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.resolves(pool); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'degraded', - '', - 100, - 10 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.list.returns([poolResource]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(createPoolStub); - }); - - it('should not create pool upon node sync event if it exists on another node', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const node1 = new Node('node1', {}, []); - const node2 = new Node('node2', {}, [pool]); - const createPoolStub1 = sinon.stub(node1, 'createPool'); - const createPoolStub2 = sinon.stub(node2, 'createPool'); - createPoolStub1.resolves(pool); - createPoolStub2.resolves(pool); - oper = createPoolOperator([node1, node2]); - const poolResource = createPoolResource( - 'pool', - 'node1', - ['/dev/sdb'], - [], - 'degraded', - '', - 100, - 10 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.list.returns([poolResource]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.notCalled(createPoolStub1); - sinon.assert.notCalled(createPoolStub2); - }); - - it('should create pool with original spec if the resource spec changes', async () => { - let stubs; - const node = new Node('node'); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'create failed') - ); - const nodeNew = new Node('node_new'); - const createPoolStubNew = sinon.stub(nodeNew, 'createPool'); - createPoolStubNew.rejects( - new GrpcError(grpcCode.INTERNAL, 'create failed') - ); - - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - // modified spec with new node and new disk - 'node_new', - ['/dev/sdb_new'] - ); - // this is the original spec cached in the status - poolResource.status.spec = { node: 'node', disks: ['/dev/sdb'] }; - poolResource.status.disks = undefined; - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.list.returns([poolResource]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.calledTwice(stubs.updateStatus); - // new SPEC points to node_new, but MOAC knows better - sinon.assert.notCalled(createPoolStubNew); - // instead, it tries to recreate the pool based on the original SPEC - sinon.assert.calledOnce(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); - }); - - it('should recreate pool with original disk URI', async () => { - let stubs; - const node = new Node('node'); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'create failed') - ); - - oper = createPoolOperator([node]); - // note this sets the disk URI - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - '', - 'pending' - ); - // this is the original spec cached in the status - poolResource.status.spec = { node: 'node', disks: ['/dev/sdb'] }; - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - stubs.list.returns([poolResource]); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.callCount(stubs.updateStatus, 4); - sinon.assert.calledTwice(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['aio:///dev/sdb']); - }); - - it('should remove pool upon pool new event if there is no pool resource', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - const destroyStub = sinon.stub(pool, 'destroy'); - destroyStub.resolves(); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - - mockCache(oper.watcher, (arg) => { - stubs = arg; - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - sinon.assert.calledOnce(destroyStub); - }); - - it('should update resource properties upon pool mod event', async () => { - let stubs; - const offlineReason = 'mayastor does not run on the node "node"'; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - - const poolResource = createPoolResource( - 'pool', - 'node1', - ['/dev/sdb'], - [], - 'online', - '', - 100, - 4 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - // simulate pool mod event - pool.state = 'POOL_OFFLINE'; - oper.registry.emit('pool', { - eventType: 'mod', - object: pool - }); - // Give event time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'offline', - reason: offlineReason, - capacity: 100, - disks: ['aio:///dev/sdb'], - used: 4, - spec: { node: 'node1', disks: ['/dev/sdb'] } - }); - }); - - it('should ignore pool mod event if pool resource does not exist', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - const node = new Node('node', {}, [pool]); - oper = createPoolOperator([node]); - - mockCache(oper.watcher, (arg) => { - stubs = arg; - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - // simulate pool mod event - pool.state = 'POOL_OFFLINE'; - oper.registry.emit('pool', { - eventType: 'mod', - object: pool - }); - // Give event time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should create pool upon pool del event if pool resource exist', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - const node = new Node('node', {}, [pool]); - const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.resolves(pool); - oper = createPoolOperator([node]); - const poolResource = createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - [], - 'online', - '', - 100, - 4 - ); - mockCache(oper.watcher, (arg) => { - stubs = arg; - stubs.get.returns(poolResource); - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.notCalled(createPoolStub); - - node.pools = []; - oper.registry.emit('pool', { - eventType: 'del', - object: pool - }); - // Give event time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(createPoolStub); - sinon.assert.calledWith(createPoolStub, 'pool', ['aio:///dev/sdb']); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - state: 'pending', - reason: 'Creating the pool', - disks: ['aio:///dev/sdb'], - spec: { node: 'node', disks: ['/dev/sdb'] } - }); - }); - - it('should ignore pool del event if pool resource does not exist', async () => { - let stubs; - const pool = new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }); - const node = new Node('node', {}, []); - oper = createPoolOperator([node]); - mockCache(oper.watcher, (arg) => { - stubs = arg; - }); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - - node.pools = []; - oper.registry.emit('pool', { - eventType: 'del', - object: pool - }); - // Give event time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.updateStatus); - }); - }); -}; diff --git a/csi/moac/test/pool_test.js b/csi/moac/test/pool_test.js deleted file mode 100644 index ba95f1801..000000000 --- a/csi/moac/test/pool_test.js +++ /dev/null @@ -1,316 +0,0 @@ -// Unit tests for the pool object - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const { Node } = require('../dist/node'); -const { Pool } = require('../dist/pool'); -const { Replica } = require('../dist/replica'); -const { shouldFailWith } = require('./utils'); -const { grpcCode, GrpcError } = require('../dist/grpc_client'); - -module.exports = function () { - const props = { - name: 'pool', - disks: ['io_uring:///dev/sda'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }; - - describe('should emit event upon change of volatile property', () => { - let node, eventSpy, pool, newProps; - - beforeEach(() => { - node = new Node('node'); - eventSpy = sinon.spy(node, 'emit'); - pool = new Pool(props); - node._registerPool(pool); - newProps = _.clone(props); - }); - - it('state', () => { - newProps.state = 'POOL_DEGRADED'; - pool.merge(newProps, []); - - // First call is new-pool event upon registering the pool - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'pool', { - eventType: 'mod', - object: pool - }); - expect(pool.state).to.equal('POOL_DEGRADED'); - }); - - it('capacity', () => { - newProps.capacity = 101; - pool.merge(newProps, []); - - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'pool', { - eventType: 'mod', - object: pool - }); - expect(pool.capacity).to.equal(101); - }); - - it('used', () => { - newProps.used = 99; - pool.merge(newProps, []); - - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'pool', { - eventType: 'mod', - object: pool - }); - expect(pool.used).to.equal(99); - }); - - it('disk protocol', () => { - newProps.disks = ['aio:///dev/sda']; - pool.merge(newProps, []); - - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'pool', { - eventType: 'mod', - object: pool - }); - expect(pool.disks[0]).to.equal('aio:///dev/sda'); - }); - - it('disk device', () => { - newProps.disks = ['aio:///dev/sdb']; - pool.merge(newProps, []); - - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'pool', { - eventType: 'mod', - object: pool - }); - expect(pool.disks[0]).to.equal('aio:///dev/sdb'); - }); - }); - - it('should not emit event if nothing changed', () => { - const node = new Node('node'); - const spy = sinon.spy(node, 'emit'); - const pool = new Pool(props); - node._registerPool(pool); - const newProps = _.clone(props); - - pool.merge(newProps, []); - - // Create pool event is expected, but no other. - sinon.assert.calledOnce(spy); - sinon.assert.calledWithMatch(spy, 'pool', { eventType: 'new' }); - }); - - it('should properly merge replicas from the pool', () => { - const node = new Node('node'); - const spy = sinon.spy(node, 'emit'); - const pool = new Pool(props); - const modReplica = new Replica({ uuid: 'to-modify', uri: 'bdev:///to-modify?uuid=1' }); - const delReplica = new Replica({ uuid: 'to-delete', uri: 'bdev:///to-delete?uuid=2' }); - node._registerPool(pool); - pool.registerReplica(modReplica); - pool.registerReplica(delReplica); - - pool.merge(props, [{ uuid: 'to-create', uri: 'bdev:///to-create?uuid=3' }, { uuid: 'to-modify', uri: 'bdev:///to-modify?uuid=1', size: 10 }]); - - expect(pool.replicas).to.have.lengthOf(2); - // first 3 events are for pool create and initial two replicas - expect(spy.callCount).to.equal(6); - sinon.assert.calledWithMatch(spy.getCall(0), 'pool', { eventType: 'new' }); - sinon.assert.calledWith(spy.getCall(1), 'replica', { - eventType: 'new', - object: modReplica - }); - sinon.assert.calledWith(spy.getCall(2), 'replica', { - eventType: 'new', - object: delReplica - }); - // now come the events we want to test - sinon.assert.calledWithMatch(spy.getCall(3), 'replica', { - eventType: 'new', - object: { uuid: 'to-create' } - }); - sinon.assert.calledWith(spy.getCall(4), 'replica', { - eventType: 'mod', - object: modReplica - }); - sinon.assert.calledWith(spy.getCall(5), 'replica', { - eventType: 'del', - object: delReplica - }); - }); - - it('should print the pool name with a node name', () => { - const node = new Node('node'); - const pool = new Pool(props); - node._registerPool(pool); - expect(pool.toString()).to.equal('pool@node'); - }); - - it('should print the pool name without node name if not bound', () => { - const pool = new Pool(props); - expect(pool.toString()).to.equal('pool@nowhere'); - }); - - it('should bind the pool to node and then unbind it', (done) => { - const node = new Node('node'); - const pool = new Pool(props); - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.equal(pool); - expect(pool.node).to.equal(node); - - node.once('pool', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.equal(pool); - setTimeout(() => { - expect(pool.node).to.be.undefined; - done(); - }, 0); - }); - pool.unbind(); - }); - pool.bind(node); - }); - - it('should unregister replica from the pool', () => { - const node = new Node('node'); - const pool = new Pool(props); - const replica = new Replica({ uuid: 'uuid', uri: 'bdev:///uuid?uuid=1' }); - node._registerPool(pool); - pool.registerReplica(replica); - expect(pool.replicas).to.have.lengthOf(1); - pool.unregisterReplica(replica); - expect(pool.replicas).to.have.lengthOf(0); - }); - - it('should destroy the pool with replica', async () => { - const node = new Node('node'); - const eventSpy = sinon.spy(node, 'emit'); - const stub = sinon.stub(node, 'call'); - stub.resolves({}); - const pool = new Pool(props); - node._registerPool(pool); - const replica = new Replica({ uuid: 'uuid', uri: 'bdev:///uuid?uuid=1' }); - pool.registerReplica(replica); - - await pool.destroy(); - - sinon.assert.calledOnce(stub); - sinon.assert.calledWithMatch(stub, 'destroyPool', { name: 'pool' }); - expect(node.pools).to.be.empty; - // first two events are for the new pool and new replica - expect(eventSpy.callCount).to.equal(4); - sinon.assert.calledWith(eventSpy.getCall(2), 'replica', { - eventType: 'del', - object: replica - }); - sinon.assert.calledWith(eventSpy.getCall(3), 'pool', { - eventType: 'del', - object: pool - }); - }); - - it('should offline the pool with replica', () => { - const node = new Node('node'); - const eventSpy = sinon.spy(node, 'emit'); - const pool = new Pool(props); - node._registerPool(pool); - const replica = new Replica({ uuid: 'uuid', uri: 'bdev:///uuid?uuid=1' }); - pool.registerReplica(replica); - - pool.offline(); - - expect(pool.state).to.equal('POOL_OFFLINE'); - expect(replica.isOffline()).to.be.true; - - // first two events are for the new pool and new replica - expect(eventSpy.callCount).to.equal(4); - sinon.assert.calledWith(eventSpy.getCall(2), 'replica', { - eventType: 'mod', - object: replica - }); - sinon.assert.calledWith(eventSpy.getCall(3), 'pool', { - eventType: 'mod', - object: pool - }); - }); - - it('should create replica on the pool', async () => { - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.resolves({ - uuid: 'uuid', - pool: 'pool', - size: 100, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev://blabla?uuid=blabla' - }); - const pool = new Pool(props); - node._registerPool(pool); - - const repl = await pool.createReplica('uuid', 100); - - sinon.assert.calledOnce(stub); - sinon.assert.calledWithMatch(stub, 'createReplica', { - uuid: 'uuid', - pool: 'pool', - size: 100, - thin: false, - share: 'REPLICA_NONE' - }); - expect(pool.replicas).to.have.lengthOf(1); - expect(repl.uuid).to.equal('uuid'); - }); - - it('should throw internal error if createReplica grpc fails', async () => { - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - const pool = new Pool(props); - node._registerPool(pool); - - await shouldFailWith(grpcCode.INTERNAL, async () => { - await pool.createReplica('uuid', 100); - }); - - expect(pool.replicas).to.have.lengthOf(0); - sinon.assert.calledOnce(stub); - sinon.assert.calledWithMatch(stub, 'createReplica', { - uuid: 'uuid', - pool: 'pool', - size: 100, - thin: false, - share: 'REPLICA_NONE' - }); - }); - - it('should correctly indicate if pool is accessible or not', () => { - const poolProps = _.clone(props); - poolProps.state = 'POOL_ONLINE'; - let pool = new Pool(poolProps); - expect(pool.isAccessible()).to.be.true; - - poolProps.state = 'POOL_FAULTED'; - pool = new Pool(poolProps); - expect(pool.isAccessible()).to.be.false; - - poolProps.state = 'POOL_DEGRADED'; - pool = new Pool(poolProps); - expect(pool.isAccessible()).to.be.true; - }); - - it('should return free space in the pool', () => { - const pool = new Pool(props); - expect(pool.freeBytes()).to.equal(96); - }); -}; diff --git a/csi/moac/test/registry_test.js b/csi/moac/test/registry_test.js deleted file mode 100644 index cc1a8f174..000000000 --- a/csi/moac/test/registry_test.js +++ /dev/null @@ -1,497 +0,0 @@ -// Unit tests for the registry class. - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const { Registry } = require('../dist/registry'); -const { Replica } = require('../dist/replica'); -const { Pool } = require('../dist/pool'); -const { Nexus } = require('../dist/nexus'); -const Node = require('./node_stub'); - -module.exports = function () { - it('should add a node to the registry and look up the node', () => { - const registry = new Registry({}); - registry.Node = Node; - let nodeEvent; - - registry.once('node', (ev) => { - nodeEvent = ev; - }); - registry.addNode('node', '127.0.0.1:123'); - expect(nodeEvent.eventType).to.equal('new'); - expect(nodeEvent.object.name).to.equal('node'); - expect(nodeEvent.object.endpoint).to.equal('127.0.0.1:123'); - - const node = registry.getNode('node'); - expect(node.name).to.equal('node'); - expect(node.endpoint).to.equal('127.0.0.1:123'); - - // ensure the events from the node are relayed by the registry - const events = ['node', 'pool', 'replica', 'nexus']; - events.forEach((ev) => { - registry.on(ev, () => { - const idx = events.findIndex((ent) => ent === ev); - expect(idx).to.not.equal(-1); - events.splice(idx, 1); - }); - }); - _.clone(events).forEach((ev) => node.emit(ev, {})); - expect(events).to.be.empty; - }); - - it('should not do anything if the same node already exists in the registry', () => { - const registry = new Registry({}); - registry.Node = Node; - - const nodeEvents = []; - registry.on('node', (ev) => { - nodeEvents.push(ev); - }); - - registry.addNode('node', '127.0.0.1:123'); - expect(nodeEvents).to.have.lengthOf(1); - expect(nodeEvents[0].eventType).to.equal('new'); - - registry.addNode('node', '127.0.0.1:123'); - expect(nodeEvents).to.have.lengthOf(1); - }); - - it('should reconnect node if it exists but grpc endpoint has changed', () => { - const registry = new Registry({}); - registry.Node = Node; - - const nodeEvents = []; - registry.on('node', (ev) => { - nodeEvents.push(ev); - }); - - registry.addNode('node', '127.0.0.1:123'); - registry.addNode('node', '127.0.0.1:124'); - expect(nodeEvents).to.have.lengthOf(2); - expect(nodeEvents[0].eventType).to.equal('new'); - expect(nodeEvents[1].eventType).to.equal('mod'); - }); - - it('should get a list of nodes from registry', () => { - const registry = new Registry({}); - registry.nodes.node1 = new Node('node1'); - registry.nodes.node2 = new Node('node2'); - registry.nodes.node3 = new Node('node3'); - const list = registry.getNodes(); - expect(list).to.have.lengthOf(3); - }); - - it('should remove a node from the registry', () => { - const registry = new Registry({}); - const node = new Node('node'); - registry.nodes.node = node; - let nodeEvent; - registry.once('node', (ev) => { - nodeEvent = ev; - }); - registry.removeNode('node'); - expect(registry.nodes).to.not.have.keys('node'); - expect(nodeEvent.eventType).to.equal('del'); - expect(nodeEvent.object.name).to.equal('node'); - - // ensure the events from the node are not relayed - const events = ['node', 'pool', 'replica', 'nexus']; - events.forEach((ev) => { - registry.on(ev, () => { - throw new Error('Received event after the node was removed'); - }); - }); - events.forEach((ev) => node.emit(ev, {})); - }); - - it('should not do anything if removed node does not exist', () => { - const registry = new Registry({}); - let nodeEvent; - registry.once('node', (ev) => { - nodeEvent = ev; - }); - registry.removeNode('node'); - expect(nodeEvent).to.be.undefined; - }); - - it('should get a list of pools from registry', () => { - const registry = new Registry({}); - const node1 = new Node('node1', {}, [ - new Pool({ name: 'pool1', disks: [] }) - ]); - const node2 = new Node('node2', {}, [ - new Pool({ name: 'pool2a', disks: [] }), - new Pool({ name: 'pool2b', disks: [] }) - ]); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - const pools = registry.getPools(); - pools.sort(); - expect(pools).to.have.lengthOf(3); - expect(pools[0].name).to.equal('pool1'); - expect(pools[1].name).to.equal('pool2a'); - expect(pools[2].name).to.equal('pool2b'); - const pool = registry.getPool('pool2a'); - expect(pool.name).to.equal('pool2a'); - }); - - it('should get a list of nexus from registry', () => { - const UUID1 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb1'; - const UUID2 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb2'; - const UUID3 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb3'; - const registry = new Registry({}); - const node1 = new Node('node1', {}, [], [new Nexus({ uuid: UUID1 })]); - const node2 = new Node( - 'node2', - {}, - [], - [new Nexus({ uuid: UUID2 }), new Nexus({ uuid: UUID3 })] - ); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - const nexuses = registry.getNexuses(); - nexuses.sort(); - expect(nexuses).to.have.lengthOf(3); - expect(nexuses[0].uuid).to.equal(UUID1); - expect(nexuses[1].uuid).to.equal(UUID2); - expect(nexuses[2].uuid).to.equal(UUID3); - const nexus = registry.getNexus(UUID2); - expect(nexus.uuid).to.equal(UUID2); - }); - - it('should get a list of replicas from registry', () => { - const UUID1 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb1'; - const UUID2 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb2'; - const UUID3 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb3'; - const pool1 = new Pool({ name: 'pool1', disks: [] }); - const pool2a = new Pool({ name: 'pool2a', disks: [] }); - const pool2b = new Pool({ name: 'pool2b', disks: [] }); - const node1 = new Node('node1'); - node1.pools = [pool1]; - const node2 = new Node('node2'); - node2.pools = [pool2a, pool2b]; - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - pool1.replicas = [ - new Replica({ uuid: UUID1 }), - new Replica({ uuid: UUID2 }) - ]; - pool2b.replicas = [new Replica({ uuid: UUID3 })]; - - let replicas = registry.getReplicas(); - replicas.sort(); - expect(replicas).to.have.lengthOf(3); - expect(replicas[0].uuid).to.equal(UUID1); - expect(replicas[1].uuid).to.equal(UUID2); - expect(replicas[2].uuid).to.equal(UUID3); - replicas = registry.getReplicaSet(UUID1); - expect(replicas).to.have.lengthOf(1); - expect(replicas[0].uuid).to.equal(UUID1); - }); - - it('should close the registry', () => { - const registry = new Registry({}); - const node = new Node('node'); - const connectStub = sinon.stub(node, 'connect'); - const disconnectStub = sinon.stub(node, 'disconnect'); - registry.nodes.node = node; - registry.close(); - - sinon.assert.notCalled(connectStub); - sinon.assert.calledOnce(disconnectStub); - expect(registry.nodes).to.not.have.keys('node'); - }); - - it('should get capacity of pools on all or specified nodes', () => { - // should count - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 10 - }); - // should count - const pool2a = new Pool({ - name: 'pool2a', - disks: [], - state: 'POOL_DEGRADED', - capacity: 100, - used: 25 - }); - // should not count - const pool2b = new Pool({ - name: 'pool2b', - disks: [], - state: 'POOL_FAULTED', - capacity: 100, - used: 55 - }); - // should not count - const pool2c = new Pool({ - name: 'pool2c', - disks: [], - state: 'POOL_OFFLINE', - capacity: 100, - used: 99 - }); - const node1 = new Node('node1'); - node1.pools = [pool1]; - pool1.bind(node1); - const node2 = new Node('node2'); - node2.pools = [pool2a, pool2b, pool2c]; - pool2a.bind(node2); - pool2b.bind(node2); - pool2c.bind(node2); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - let cap = registry.getCapacity(); - expect(cap).to.equal(90 + 75); - cap = registry.getCapacity('node2'); - expect(cap).to.equal(75); - }); - - describe('pool selection', function () { - it('should prefer ONLINE pool', () => { - // has more free space but is degraded - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 25 - }); - const pool3 = new Pool({ - name: 'pool3', - disks: [], - state: 'POOL_OFFLINE', - capacity: 100, - used: 0 - }); - const node1 = new Node('node1', {}, [pool1]); - const node2 = new Node('node2', {}, [pool2]); - const node3 = new Node('node3', {}, [pool3]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - registry.nodes.node3 = node3; - - let pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool2'); - expect(pools[0].state).to.equal('POOL_ONLINE'); - expect(pools[1].name).to.equal('pool1'); - pool1.state = 'POOL_ONLINE'; - pool2.state = 'POOL_DEGRADED'; - pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool1'); - expect(pools[1].name).to.equal('pool2'); - }); - - it('should prefer pool with fewer volumes', () => { - const UUID1 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb1'; - const UUID2 = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cb2'; - // has more free space but has more replicas - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 10 - }); - pool1.replicas = [ - new Replica({ uuid: UUID1 }), - new Replica({ uuid: UUID2 }) - ]; - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 25 - }); - pool2.replicas = [new Replica({ uuid: UUID1 })]; - const node1 = new Node('node1', {}, [pool1]); - const node2 = new Node('node2', {}, [pool2]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - let pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool2'); - expect(pools[1].name).to.equal('pool1'); - pool1.replicas = []; - pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool1'); - expect(pools[1].name).to.equal('pool2'); - }); - - it('should prefer pool with more free space', () => { - // has more free space - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_DEGRADED', - capacity: 100, - used: 10 - }); - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_DEGRADED', - capacity: 100, - used: 20 - }); - const node1 = new Node('node1', {}, [pool1]); - const node2 = new Node('node2', {}, [pool2]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - let pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool1'); - expect(pools[1].name).to.equal('pool2'); - pool1.used = 25; - pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool2'); - expect(pools[1].name).to.equal('pool1'); - }); - - it('should not return any pool if no suitable pool was found', () => { - // this one is corrupted - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_FAULTED', - capacity: 100, - used: 10 - }); - // this one is too small - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 26 - }); - // is not in must list - const pool3 = new Pool({ - name: 'pool3', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 10 - }); - const node1 = new Node('node1', {}, [pool1]); - const node2 = new Node('node2', {}, [pool2]); - const node3 = new Node('node3', {}, [pool3]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - registry.nodes.node3 = node3; - - const pools = registry.choosePools(75, ['node1', 'node2'], []); - expect(pools).to.have.lengthOf(0); - }); - - it('should not return two pools on the same node', () => { - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 11 - }); - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 10 - }); - const node1 = new Node('node1', {}, [pool1, pool2]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - - const pools = registry.choosePools(75, [], []); - expect(pools).to.have.lengthOf(1); - }); - - it('should choose a pool on node requested by user', () => { - // this one would be normally preferred - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 0 - }); - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_DEGRADED', - capacity: 100, - used: 25 - }); - const node1 = new Node('node1', {}, [pool1]); - const node2 = new Node('node2', {}, [pool2]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - const pools = registry.choosePools(75, ['node2'], []); - expect(pools).to.have.lengthOf(1); - expect(pools[0].name).to.equal('pool2'); - }); - - it('should prefer pool on node preferred by user', () => { - // this one would be normally preferred - const pool1 = new Pool({ - name: 'pool1', - disks: [], - state: 'POOL_ONLINE', - capacity: 100, - used: 0 - }); - const pool2 = new Pool({ - name: 'pool2', - disks: [], - state: 'POOL_DEGRADED', - capacity: 100, - used: 25 - }); - const node1 = new Node('node1', {}, [pool1]); - const node2 = new Node('node2', {}, [pool2]); - const registry = new Registry({}); - registry.nodes.node1 = node1; - registry.nodes.node2 = node2; - - const pools = registry.choosePools(75, [], ['node2']); - expect(pools).to.have.lengthOf(2); - expect(pools[0].name).to.equal('pool2'); - expect(pools[1].name).to.equal('pool1'); - }); - }); -}; diff --git a/csi/moac/test/replica_test.js b/csi/moac/test/replica_test.js deleted file mode 100644 index 8aaa82036..000000000 --- a/csi/moac/test/replica_test.js +++ /dev/null @@ -1,250 +0,0 @@ -// Unit tests for the replica object - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const { Node } = require('../dist/node'); -const { Pool } = require('../dist/pool'); -const { Replica } = require('../dist/replica'); -const { shouldFailWith } = require('./utils'); -const { grpcCode, GrpcError } = require('../dist/grpc_client'); - -const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; - -module.exports = function () { - const poolProps = { - name: 'pool', - disks: ['/dev/sda'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }; - const props = { - uuid: UUID, - pool: 'pool', - size: 100, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=1' - }; - - describe('mod event', () => { - let node, eventSpy, replica, pool, newProps; - - beforeEach(() => { - node = new Node('node'); - eventSpy = sinon.spy(node, 'emit'); - pool = new Pool(poolProps); - node._registerPool(pool); - replica = new Replica(props); - pool.registerReplica(replica); - newProps = _.clone(props); - }); - - it('should ignore change of pool property', () => { - newProps.pool = 'some-other-pool'; - replica.merge(newProps); - - // First two events are new pool and new replica events - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.firstCall, 'pool', { - eventType: 'new', - object: pool - }); - sinon.assert.calledWith(eventSpy.secondCall, 'replica', { - eventType: 'new', - object: replica - }); - expect(replica.pool).to.equal(pool); - expect(replica.pool.name).to.equal('pool'); - }); - - it('should emit event upon change of size property', () => { - newProps.size = 1000; - replica.merge(newProps); - - // First two events are new pool and new replica events - sinon.assert.calledThrice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'replica', { - eventType: 'mod', - object: replica - }); - expect(replica.size).to.equal(1000); - }); - - it('should emit event upon change of share and uri property', () => { - newProps.share = 'REPLICA_NVMF'; - newProps.uri = 'nvmf://blabla'; - replica.merge(newProps); - - // First two events are new pool and new replica events - sinon.assert.calledThrice(eventSpy); - sinon.assert.calledWith(eventSpy.lastCall, 'replica', { - eventType: 'mod', - object: replica - }); - expect(replica.share).to.equal('REPLICA_NVMF'); - expect(replica.uri).to.equal('nvmf://blabla'); - }); - }); - - it('should bind the replica to pool and then unbind it', (done) => { - const node = new Node('node'); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('new'); - expect(ev.object).to.equal(replica); - expect(replica.pool).to.equal(pool); - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.equal(replica); - setTimeout(() => { - expect(replica.pool).to.be.undefined; - done(); - }, 0); - }); - replica.unbind(); - }); - replica.bind(pool); - }); - - it('should offline the replica', (done) => { - const node = new Node('node'); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('mod'); - expect(ev.object).to.equal(replica); - expect(replica.isOffline()).to.be.true; - done(); - }); - replica.offline(); - }); - - it('should share the replica', async () => { - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.resolves({ uri: 'nvmf://blabla' }); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - const uri = await replica.setShare('REPLICA_NVMF'); - - sinon.assert.calledOnce(stub); - sinon.assert.calledWith(stub, 'shareReplica', { - uuid: UUID, - share: 'REPLICA_NVMF' - }); - expect(uri).to.equal('nvmf://blabla'); - expect(replica.share).to.equal('REPLICA_NVMF'); - expect(replica.uri).to.equal('nvmf://blabla'); - }); - - it('should throw if grpc fails during sharing', async () => { - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - await shouldFailWith(grpcCode.INTERNAL, async () => { - await replica.setShare('REPLICA_NVMF'); - }); - expect(replica.share).to.equal('REPLICA_NONE'); - }); - - it('should destroy the replica', (done) => { - const node = new Node('node'); - const callStub = sinon.stub(node, 'call'); - callStub.resolves({}); - const isSyncedStub = sinon.stub(node, 'isSynced'); - isSyncedStub.returns(true); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.equal(replica); - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'destroyReplica', { uuid: UUID }); - setTimeout(() => { - expect(replica.pool).to.be.undefined; - expect(pool.replicas).to.have.lengthOf(0); - done(); - }, 0); - }); - replica.destroy(); - }); - - it('should not remove the replica if grpc fails', async () => { - const node = new Node('node'); - const callStub = sinon.stub(node, 'call'); - callStub.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - const isSyncedStub = sinon.stub(node, 'isSynced'); - isSyncedStub.returns(true); - const eventSpy = sinon.spy(node, 'emit'); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - await shouldFailWith(grpcCode.INTERNAL, async () => { - await replica.destroy(); - }); - - sinon.assert.calledOnce(callStub); - sinon.assert.calledWith(callStub, 'destroyReplica', { uuid: UUID }); - // it is called when creating the pool and replica - sinon.assert.calledTwice(eventSpy); - sinon.assert.calledWith(eventSpy.firstCall, 'pool', { - eventType: 'new', - object: pool - }); - sinon.assert.calledWith(eventSpy.secondCall, 'replica', { - eventType: 'new', - object: replica - }); - expect(replica.pool).to.equal(pool); - expect(pool.replicas).to.have.lengthOf(1); - }); - - it('should fake the destroy of the replica if the node is offline', (done) => { - const node = new Node('node'); - const callStub = sinon.stub(node, 'call'); - callStub.rejects(new GrpcError(grpcCode.INTERNAL, 'Node is offline')); - const isSyncedStub = sinon.stub(node, 'isSynced'); - isSyncedStub.returns(false); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.equal(replica); - sinon.assert.notCalled(callStub); - setTimeout(() => { - expect(replica.pool).to.be.undefined; - expect(pool.replicas).to.have.lengthOf(0); - done(); - }, 0); - }); - replica.destroy(); - }); -}; diff --git a/csi/moac/test/rest_api_test.js b/csi/moac/test/rest_api_test.js deleted file mode 100644 index 037ba19fa..000000000 --- a/csi/moac/test/rest_api_test.js +++ /dev/null @@ -1,169 +0,0 @@ -// Unit tests for the REST API - -'use strict'; - -const expect = require('chai').expect; -const http = require('http'); -const sinon = require('sinon'); -const { Registry } = require('../dist/registry'); -const { Node } = require('../dist/node'); -const { GrpcError, grpcCode } = require('../dist/grpc_client'); -const { ApiServer } = require('../dist/rest_api'); - -const PORT = 12312; -const STAT_COUNTER = 1000000; // feels good! -const UUID1 = '02de3df9-ce18-4164-89e1-b1cbf7a88e51'; -const UUID2 = '02de3df9-ce18-4164-89e1-b1cbf7a88e52'; -const UUID3 = '02de3df9-ce18-4164-89e1-b1cbf7a88e53'; - -module.exports = function () { - let apiServer; - let call1, call2, call3, call4; - - before(() => { - const node1 = new Node('node1'); - const node2 = new Node('node2'); - const node3 = new Node('node3'); - const node4 = new Node('node4'); - const registry = new Registry({}); - registry.nodes = { - node1, - node2, - node3, - node4 - }; - call1 = sinon.stub(node1, 'call'); - call2 = sinon.stub(node2, 'call'); - call3 = sinon.stub(node3, 'call'); - call4 = sinon.stub(node4, 'call'); - call1.resolves({ - replicas: [ - { - uuid: UUID1, - pool: 'pool1', - stats: { - numReadOps: STAT_COUNTER, - numWriteOps: STAT_COUNTER, - bytesRead: STAT_COUNTER, - bytesWritten: STAT_COUNTER - } - }, - { - uuid: UUID2, - pool: 'pool2', - stats: { - numReadOps: STAT_COUNTER, - numWriteOps: STAT_COUNTER, - bytesRead: STAT_COUNTER, - bytesWritten: STAT_COUNTER - } - } - ] - }); - call2.rejects(new GrpcError(grpcCode.INTERNAL, 'test failure')); - call3.resolves({ - replicas: [ - { - uuid: UUID3, - pool: 'pool3', - stats: { - numReadOps: STAT_COUNTER, - numWriteOps: STAT_COUNTER, - bytesRead: STAT_COUNTER, - bytesWritten: STAT_COUNTER - } - } - ] - }); - call4.resolves({ - replicas: [] - }); - - apiServer = new ApiServer(registry); - apiServer.start(PORT); - }); - - after(() => { - apiServer.stop(); - }); - - it('should get ok for root url', (done) => { - // TODO: Use user-friendly "request" lib when we have more tests - http - .get('http://127.0.0.1:' + PORT + '/', (resp) => { - expect(resp.statusCode).to.equal(200); - - let data = ''; - resp.on('data', (chunk) => { - data += chunk; - }); - resp.on('end', () => { - const obj = JSON.parse(data); - expect(obj).to.deep.equal({}); - done(); - }); - }) - .on('error', done); - }); - - it('should get volume stats', (done) => { - http - .get('http://127.0.0.1:' + PORT + '/stats', (resp) => { - expect(resp.statusCode).to.equal(200); - - let data = ''; - resp.on('data', (chunk) => { - data += chunk; - }); - resp.on('end', () => { - const vols = JSON.parse(data); - sinon.assert.calledOnce(call1); - sinon.assert.calledWith(call1, 'statReplicas', {}); - sinon.assert.calledOnce(call2); - sinon.assert.calledWith(call2, 'statReplicas', {}); - sinon.assert.calledOnce(call3); - sinon.assert.calledWith(call3, 'statReplicas', {}); - sinon.assert.calledOnce(call4); - sinon.assert.calledWith(call4, 'statReplicas', {}); - - expect(vols).to.have.lengthOf(3); - - expect(vols[0].uuid).equal(UUID1); - expect(vols[0].pool).equal('pool1'); - expect(vols[0].node).equal('node1'); - expect(vols[0].timestamp).to.be.a('string'); - // time delta between now and then is unlikely to be > 1s - expect(new Date() - new Date(vols[0].timestamp)).to.be.below(1000); - expect(vols[0].num_read_ops).equal(STAT_COUNTER); - expect(vols[0].num_write_ops).equal(STAT_COUNTER); - expect(vols[0].bytes_read).equal(STAT_COUNTER); - expect(vols[0].bytes_written).equal(STAT_COUNTER); - - expect(vols[1].uuid).equal(UUID2); - expect(vols[1].pool).equal('pool2'); - expect(vols[1].node).equal('node1'); - expect(vols[1].timestamp).to.be.a('string'); - // time delta between now and then is unlikely to be > 1s - expect(new Date() - new Date(vols[1].timestamp)).to.be.below(1000); - expect(vols[1].num_read_ops).equal(STAT_COUNTER); - expect(vols[1].num_write_ops).equal(STAT_COUNTER); - expect(vols[1].bytes_read).equal(STAT_COUNTER); - expect(vols[1].bytes_written).equal(STAT_COUNTER); - - expect(vols[2].uuid).equal(UUID3); - expect(vols[2].pool).equal('pool3'); - expect(vols[2].node).equal('node3'); - expect(vols[2].timestamp).to.be.a('string'); - // time delta between now and then is unlikely to be > 1s - expect(new Date() - new Date(vols[2].timestamp)).to.be.below(1000); - expect(vols[2].num_read_ops).equal(STAT_COUNTER); - expect(vols[2].num_write_ops).equal(STAT_COUNTER); - expect(vols[2].bytes_read).equal(STAT_COUNTER); - expect(vols[2].bytes_written).equal(STAT_COUNTER); - - done(); - }); - }) - .on('error', done); - }); -}; diff --git a/csi/moac/test/utils.js b/csi/moac/test/utils.js deleted file mode 100644 index 3fec63980..000000000 --- a/csi/moac/test/utils.js +++ /dev/null @@ -1,56 +0,0 @@ -// Common utility functions used throughout the tests - -'use strict'; - -const sleep = require('sleep-promise'); - -// Wait until the test function yields true, calling it in exponential -// backoff intervals. -async function waitUntil (test, timeout, reason) { - let delay = 1; - if (typeof timeout === 'string') { - reason = timeout; - timeout = undefined; - } - timeout = timeout || 1024; - reason = reason || 'something'; - - while (true) { - const done = await test(); - if (done) { - return; - } - if (timeout <= 0) { - throw new Error(`Timed out waiting for ${reason}`); - } - await sleep(delay); - timeout -= delay; - delay *= 2; - if (delay > 100) { - delay = 100; - } - } -} - -// Check that the test callback which should return a future fails with -// given grpc error code. -// -// TODO: Combination of external function that is awaited here and wrapped -// inside try-catch, results in freaky behaviour sometimes like printing -// error to output. -async function shouldFailWith (code, test) { - try { - await test(); - } catch (err) { - if (err.code !== code) { - throw new Error(`Expected error code ${code} but got: ${err}`); - } - return; - } - throw new Error('Expected error'); -} - -module.exports = { - shouldFailWith, - waitUntil -}; diff --git a/csi/moac/test/volume_operator_test.js b/csi/moac/test/volume_operator_test.js deleted file mode 100644 index b48529e41..000000000 --- a/csi/moac/test/volume_operator_test.js +++ /dev/null @@ -1,973 +0,0 @@ -// Unit tests for the volume operator - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const EventEmitter = require('events'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const sleep = require('sleep-promise'); -const { KubeConfig } = require('@kubernetes/client-node'); -const { Registry } = require('../dist/registry'); -const { Volume } = require('../dist/volume'); -const { Volumes } = require('../dist/volumes'); -const { VolumeOperator, VolumeResource } = require('../dist/volume_operator'); -const { GrpcError, grpcCode } = require('../dist/grpc_client'); -const { mockCache } = require('./watcher_stub'); -const Node = require('./node_stub'); -const { Nexus } = require('../dist/nexus'); -const { Replica } = require('../dist/replica'); -const { Pool } = require('../dist/pool'); - -const UUID = 'd01b8bfb-0116-47b0-a03a-447fcbdc0e99'; -const NAMESPACE = 'mayastor'; -const EVENT_PROPAGATION_DELAY = 10; - -const fakeConfig = { - clusters: [ - { - name: 'cluster', - server: 'foo.company.com' - } - ], - contexts: [ - { - cluster: 'cluster', - user: 'user' - } - ], - users: [{ name: 'user' }] -}; - -function defaultMeta (uuid) { - return { - creationTimestamp: '2019-02-15T18:23:53Z', - generation: 1, - name: uuid, - namespace: NAMESPACE, - resourceVersion: '627981', - selfLink: `/apis/openebs.io/v1alpha1/namespaces/${NAMESPACE}/mayastorvolumes/${uuid}`, - uid: 'd99f06a9-314e-11e9-b086-589cfc0d76a7' - }; -} - -const defaultSpec = { - replicaCount: 2, - local: true, - preferredNodes: ['node1', 'node2'], - requiredNodes: ['node3', 'node2', 'node1'], - requiredBytes: 100, - limitBytes: 120, - protocol: 'nvmf' -}; - -const defaultStatus = { - size: 110, - targetNodes: ['node2'], - state: 'healthy', - nexus: { - deviceUri: 'nvmf://host/nqn', - state: 'NEXUS_ONLINE', - node: 'node2', - children: [ - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE' - }, - { - uri: 'nvmf://node1/' + UUID, - state: 'CHILD_ONLINE' - } - ] - }, - replicas: [ - { - uri: 'bdev:///' + UUID, - node: 'node2', - pool: 'pool2', - offline: false - }, - { - uri: 'nvmf://node1/' + UUID, - node: 'node1', - pool: 'pool1', - offline: false - } - ] -}; - -// Function that creates a volume object corresponding to default spec and -// status defined above. -function createDefaultVolume (registry) { - const node1 = new Node('node1'); - const node2 = new Node('node2'); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volume.state = defaultStatus.state; - volume.size = defaultStatus.size; - volume.publishedOn = defaultStatus.targetNodes[0]; - volume.nexus = new Nexus({ - uuid: UUID, - size: defaultStatus.size, - deviceUri: defaultStatus.nexus.deviceUri, - state: defaultStatus.nexus.state, - children: defaultStatus.nexus.children - }); - volume.nexus.node = node2; - - volume.replicas.node1 = new Replica({ - uuid: UUID, - size: defaultStatus.size, - share: 'NVMF', - uri: defaultStatus.replicas[1].uri - }); - volume.replicas.node1.pool = new Pool({ - name: 'pool1', - disks: ['/dev/sda'], - state: 'POOL_ONLINE', - capacity: 1000, - used: 100 - }); - volume.replicas.node1.pool.node = node1; - - volume.replicas.node2 = new Replica({ - uuid: UUID, - size: defaultStatus.size, - share: 'NONE', - uri: defaultStatus.replicas[0].uri - }); - volume.replicas.node2.pool = new Pool({ - name: 'pool2', - disks: ['/dev/sda'], - state: 'POOL_ONLINE', - capacity: 1000, - used: 100 - }); - volume.replicas.node2.pool.node = node2; - - return volume; -} - -// Create k8s volume resource object -function createK8sVolumeResource (uuid, spec, status) { - const obj = { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorVolume', - metadata: defaultMeta(uuid), - spec: spec - }; - if (status) { - obj.status = status; - } - return obj; -} - -// Create volume resource object -function createVolumeResource (uuid, spec, status) { - return new VolumeResource(createK8sVolumeResource(uuid, spec, status)); -} - -// Create a pool operator object suitable for testing - with fake watcher -// and fake k8s api client. -async function createVolumeOperator (volumes, stubsCb) { - const kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - const oper = new VolumeOperator(NAMESPACE, kc, volumes); - mockCache(oper.watcher, stubsCb); - await oper.start(); - // give time to registry to install its callbacks - await sleep(EVENT_PROPAGATION_DELAY); - return oper; -} - -module.exports = function () { - describe('VolumeResource constructor', () => { - it('should create mayastor volume with status', () => { - const res = createVolumeResource(UUID, defaultSpec, defaultStatus); - expect(res.metadata.name).to.equal(UUID); - expect(res.spec.replicaCount).to.equal(2); - expect(res.spec.local).to.be.true; - expect(res.spec.preferredNodes).to.have.lengthOf(2); - expect(res.spec.preferredNodes[0]).to.equal('node1'); - expect(res.spec.preferredNodes[1]).to.equal('node2'); - expect(res.spec.requiredNodes).to.have.lengthOf(3); - expect(res.spec.requiredNodes[0]).to.equal('node3'); - expect(res.spec.requiredNodes[1]).to.equal('node2'); - expect(res.spec.requiredNodes[2]).to.equal('node1'); - expect(res.spec.requiredBytes).to.equal(100); - expect(res.spec.limitBytes).to.equal(120); - expect(res.status.size).to.equal(110); - expect(res.status.state).to.equal('healthy'); - expect(res.status.nexus.deviceUri).to.equal('nvmf://host/nqn'); - expect(res.status.nexus.state).to.equal('NEXUS_ONLINE'); - expect(res.status.nexus.node).to.equal('node2'); - expect(res.status.nexus.children).to.have.length(2); - expect(res.status.nexus.children[0].uri).to.equal('bdev:///' + UUID); - expect(res.status.nexus.children[0].state).to.equal('CHILD_ONLINE'); - expect(res.status.nexus.children[1].uri).to.equal('nvmf://node1/' + UUID); - expect(res.status.nexus.children[1].state).to.equal('CHILD_ONLINE'); - expect(res.status.replicas).to.have.lengthOf(2); - // replicas should be sorted by node name - expect(res.status.replicas[0].uri).to.equal('nvmf://node1/' + UUID); - expect(res.status.replicas[0].node).to.equal('node1'); - expect(res.status.replicas[0].pool).to.equal('pool1'); - expect(res.status.replicas[0].offline).to.equal(false); - expect(res.status.replicas[1].uri).to.equal('bdev:///' + UUID); - expect(res.status.replicas[1].node).to.equal('node2'); - expect(res.status.replicas[1].pool).to.equal('pool2'); - expect(res.status.replicas[1].offline).to.equal(false); - }); - - it('should create mayastor volume with unknown state', () => { - const res = createVolumeResource( - UUID, - { - replicaCount: 1, - requiredBytes: 100 - }, - { - size: 100, - targetNodes: ['node2'], - state: 'online' // "online" is not a valid volume state - } - ); - expect(res.metadata.name).to.equal(UUID); - expect(res.spec.replicaCount).to.equal(1); - expect(res.status.size).to.equal(100); - expect(res.status.targetNodes).to.deep.equal(['node2']); - expect(res.status.state).to.equal('unknown'); - }); - - it('should create mayastor volume with status without nexus', () => { - const res = createVolumeResource( - UUID, - { - replicaCount: 3, - local: false, - preferredNodes: ['node1', 'node2'], - requiredNodes: ['node2'], - requiredBytes: 100, - limitBytes: 120 - }, - { - size: 110, - targetNodes: ['node2'], - state: 'healthy', - replicas: [] - } - ); - - expect(res.metadata.name).to.equal(UUID); - expect(res.spec.replicaCount).to.equal(3); - expect(res.spec.local).to.be.false; - expect(res.spec.preferredNodes).to.have.lengthOf(2); - expect(res.spec.preferredNodes[0]).to.equal('node1'); - expect(res.spec.preferredNodes[1]).to.equal('node2'); - expect(res.spec.requiredNodes).to.have.lengthOf(1); - expect(res.spec.requiredNodes[0]).to.equal('node2'); - expect(res.spec.requiredBytes).to.equal(100); - expect(res.spec.limitBytes).to.equal(120); - expect(res.status.size).to.equal(110); - expect(res.status.targetNodes).to.deep.equal(['node2']); - expect(res.status.state).to.equal('healthy'); - expect(res.status.nexus).is.undefined; - expect(res.status.replicas).to.have.lengthOf(0); - }); - - it('should create mayastor volume without status', () => { - const res = createVolumeResource(UUID, { - replicaCount: 3, - local: true, - preferredNodes: ['node1', 'node2'], - requiredNodes: ['node2'], - requiredBytes: 100, - limitBytes: 120 - }); - expect(res.metadata.name).to.equal(UUID); - expect(res.spec.replicaCount).to.equal(3); - expect(res.status).to.be.undefined; - }); - - it('should create mayastor volume without optional parameters', () => { - const res = createVolumeResource(UUID, { - requiredBytes: 100 - }); - expect(res.metadata.name).to.equal(UUID); - expect(res.spec.replicaCount).to.equal(1); - expect(res.spec.local).to.be.false; - expect(res.spec.preferredNodes).to.have.lengthOf(0); - expect(res.spec.requiredNodes).to.have.lengthOf(0); - expect(res.spec.requiredBytes).to.equal(100); - expect(res.spec.limitBytes).to.equal(0); - expect(res.status).to.be.undefined; - }); - - it('should throw if requiredSize is missing', () => { - expect(() => createVolumeResource(UUID, { - replicaCount: 3, - local: true, - preferredNodes: ['node1', 'node2'], - requiredNodes: ['node2'], - limitBytes: 120 - })).to.throw(); - }); - - it('should throw if UUID is invalid', () => { - expect(() => createVolumeResource('blabla', { - replicaCount: 3, - local: true, - preferredNodes: ['node1', 'node2'], - requiredNodes: ['node2'], - requiredBytes: 100, - limitBytes: 120 - })).to.throw(); - }); - }); - - describe('init method', () => { - let kc, oper, fakeApiStub; - - beforeEach(() => { - const registry = new Registry({}); - kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - oper = new VolumeOperator(NAMESPACE, kc, registry); - const makeApiStub = sinon.stub(kc, 'makeApiClient'); - const fakeApi = { - createCustomResourceDefinition: () => null - }; - fakeApiStub = sinon.stub(fakeApi, 'createCustomResourceDefinition'); - makeApiStub.returns(fakeApi); - }); - - afterEach(() => { - if (oper) { - oper.stop(); - oper = undefined; - } - }); - - it('should create CRD if it does not exist', async () => { - fakeApiStub.resolves(); - await oper.init(kc); - }); - - it('should ignore error if CRD already exists', async () => { - fakeApiStub.rejects({ - statusCode: 409 - }); - await oper.init(kc); - }); - - it('should throw if CRD creation fails', async () => { - fakeApiStub.rejects({ - statusCode: 404 - }); - try { - await oper.init(kc); - } catch (err) { - return; - } - throw new Error('Init did not fail'); - }); - }); - - describe('watcher events', () => { - let oper; // volume operator - - afterEach(async () => { - if (oper) { - await oper.stop(); - oper = null; - } - }); - - it('should call import volume for existing resources when starting the operator', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const importVolumeStub = sinon.stub(volumes, 'importVolume'); - // return value is not used so just return something - importVolumeStub.returns({ uuid: UUID }); - - const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - }); - // trigger "new" event - oper.watcher.emit('new', volumeResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(importVolumeStub); - sinon.assert.calledWith(importVolumeStub, UUID, defaultSpec); - }); - - it('should set reason in resource if volume import fails upon "new" event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const importVolumeStub = sinon.stub(volumes, 'importVolume'); - importVolumeStub.throws( - new GrpcError(grpcCode.INTERNAL, 'create failed') - ); - - const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - }); - // trigger "new" event - oper.watcher.emit('new', volumeResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(importVolumeStub); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status.state).to.equal('error'); - expect(stubs.updateStatus.args[0][5].status.reason).to.equal('Error: create failed'); - }); - - it('should destroy the volume upon "del" event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const destroyVolumeStub = sinon.stub(volumes, 'destroyVolume'); - destroyVolumeStub.resolves(); - const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - }); - const getVolumeStub = sinon.stub(volumes, 'get'); - getVolumeStub.returns({ uuid: UUID }); - // trigger "del" event - oper.watcher.emit('del', volumeResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(destroyVolumeStub); - sinon.assert.calledWith(destroyVolumeStub, UUID); - }); - - it('should handle gracefully if destroy of a volume fails upon "del" event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const destroyVolumeStub = sinon.stub(volumes, 'destroyVolume'); - destroyVolumeStub.rejects( - new GrpcError(grpcCode.INTERNAL, 'destroy failed') - ); - const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - }); - const getVolumeStub = sinon.stub(volumes, 'get'); - getVolumeStub.returns({ uuid: UUID }); - // trigger "del" event - oper.watcher.emit('del', volumeResource); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(destroyVolumeStub); - sinon.assert.calledWith(destroyVolumeStub, UUID); - }); - - it('should modify the volume upon "mod" event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volume.size = 110; - const fsaStub = sinon.stub(volume, 'fsa'); - fsaStub.returns(); - sinon - .stub(volumes, 'get') - .withArgs(UUID) - .returns(volume); - sinon - .stub(volumes, 'list') - .withArgs() - .returns([volume]); - const oldObj = createVolumeResource(UUID, defaultSpec, defaultStatus); - // new changed specification of the object - const newObj = createVolumeResource( - UUID, - { - replicaCount: 3, - local: true, - preferredNodes: ['node1'], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 130, - protocol: 'nvmf' - }, - defaultStatus - ); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(oldObj); - }); - // trigger "mod" event - oper.watcher.emit('mod', newObj); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(fsaStub); - expect(volume.spec.replicaCount).to.equal(3); - expect(volume.spec.local).to.be.true; - expect(volume.spec.preferredNodes).to.have.lengthOf(1); - expect(volume.spec.requiredNodes).to.have.lengthOf(0); - expect(volume.spec.requiredBytes).to.equal(90); - expect(volume.spec.limitBytes).to.equal(130); - }); - - it('should not crash if update volume fails upon "mod" event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volume.size = 110; - const fsaStub = sinon.stub(volume, 'fsa'); - fsaStub.resolves(); - sinon - .stub(volumes, 'get') - .withArgs(UUID) - .returns(volume); - sinon - .stub(volumes, 'list') - .withArgs() - .returns([volume]); - const oldObj = createVolumeResource(UUID, defaultSpec, defaultStatus); - // new changed specification of the object - const newObj = createVolumeResource( - UUID, - { - replicaCount: 3, - local: true, - preferredNodes: ['node1'], - requiredNodes: [], - requiredBytes: 111, - limitBytes: 130, - protocol: 'nvmf' - }, - defaultStatus - ); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(oldObj); - }); - // trigger "mod" event - oper.watcher.emit('mod', newObj); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(fsaStub); - expect(volume.spec.replicaCount).to.equal(2); - expect(volume.spec.requiredBytes).to.equal(100); - expect(volume.spec.limitBytes).to.equal(120); - }); - - it('should not do anything if volume params stay the same upon "mod" event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volume.size = 110; - const fsaStub = sinon.stub(volume, 'fsa'); - fsaStub.returns(); - sinon - .stub(volumes, 'get') - .withArgs(UUID) - .returns(volume); - sinon - .stub(volumes, 'list') - .withArgs() - .returns([]); - const oldObj = createVolumeResource(UUID, defaultSpec, defaultStatus); - // new specification of the object that is the same - const newObj = createVolumeResource(UUID, defaultSpec, defaultStatus); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(oldObj); - }); - // trigger "mod" event - oper.watcher.emit('mod', newObj); - // give event callbacks time to propagate - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(fsaStub); - }); - }); - - describe('volume events', () => { - let oper; // volume operator - - afterEach(async () => { - if (oper) { - await oper.stop(); - oper = null; - } - }); - - it('should create a resource upon "new" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - const volumes = new Volumes(registry); - sinon - .stub(volumes, 'get') - .withArgs(UUID) - .returns(volume); - sinon - .stub(volumes, 'list') - .withArgs() - .returns([volume]); - - const volumeResource = createVolumeResource(UUID, defaultSpec); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.onFirstCall().returns(); - stubs.get.onSecondCall().returns(volumeResource); - stubs.create.resolves(); - stubs.updateStatus.resolves(); - }); - - sinon.assert.calledOnce(stubs.create); - expect(stubs.create.args[0][4].metadata.name).to.equal(UUID); - expect(stubs.create.args[0][4].metadata.namespace).to.equal(NAMESPACE); - expect(stubs.create.args[0][4].spec).to.deep.equal(defaultSpec); - sinon.assert.calledOnce(stubs.updateStatus); - expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - replicas: [], - size: 0, - state: 'pending' - }); - expect(stubs.updateStatus.args[0][5].status.targetNodes).to.be.undefined; - }); - - it('should not crash if POST fails upon "new" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - sinon.stub(volumes, 'get').returns([]); - - const volumeResource = createVolumeResource(UUID, defaultSpec); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.onFirstCall().returns(); - stubs.get.onSecondCall().returns(volumeResource); - stubs.create.rejects(new Error('POST failed')); - stubs.updateStatus.resolves(); - }); - - volumes.emit('volume', { - eventType: 'new', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.calledOnce(stubs.create); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should update the resource upon "new" volume event if it exists', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const newSpec = _.cloneDeep(defaultSpec); - newSpec.replicaCount += 1; - const volume = new Volume(UUID, registry, new EventEmitter(), newSpec); - sinon - .stub(volumes, 'get') - .withArgs(UUID) - .returns(volume); - sinon - .stub(volumes, 'list') - .withArgs() - .returns([volume]); - - const volumeResource = createVolumeResource(UUID, defaultSpec); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.update.resolves(); - stubs.updateStatus.resolves(); - }); - - sinon.assert.notCalled(stubs.create); - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].spec).to.deep.equal(newSpec); - sinon.assert.calledOnce(stubs.updateStatus); - }); - - it('should not update the resource upon "new" volume event if it is the same', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec, 'pending', 100, 'node2'); - sinon - .stub(volumes, 'get') - .withArgs(UUID) - .returns(volume); - sinon - .stub(volumes, 'list') - .withArgs() - .returns([volume]); - - const volumeResource = createVolumeResource(UUID, defaultSpec, { - size: 100, - targetNodes: ['node2'], - state: 'pending', - replicas: [] - }); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.update.resolves(); - stubs.updateStatus.resolves(); - }); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should update the resource upon "mod" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - const volumeResource = createVolumeResource(UUID, defaultSpec); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.update.resolves(); - stubs.updateStatus.resolves(); - }); - - const newSpec = { - replicaCount: 3, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 130, - protocol: 'nvmf' - }; - const volume = new Volume(UUID, registry, new EventEmitter(), newSpec); - volumes.emit('volume', { - eventType: 'mod', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.update); - expect(stubs.update.args[0][5].spec).to.deep.equal(newSpec); - sinon.assert.calledOnce(stubs.updateStatus); - }); - - it('should update just the status if spec has not changed upon "mod" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - const volumeResource = createVolumeResource(UUID, defaultSpec); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.update.resolves(); - stubs.updateStatus.resolves(); - }); - - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volumes.emit('volume', { - eventType: 'mod', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - }); - - it('should not update the status if only the order of entries in arrays differ', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.update.resolves(); - stubs.updateStatus.resolves(); - }); - - const volume = createDefaultVolume(registry); - volumeResource.status.replicas.reverse(); - sinon.stub(volume, 'getReplicas').returns( - [].concat(Object.values(volume.replicas)) - // reverse the order of replicas - .sort((a, b) => { - return (-1) * a.pool.node.name.localeCompare(b.pool.node.name); - }) - ); - volumes.emit('volume', { - eventType: 'mod', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should not crash if PUT fails upon "mod" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - const volumeResource = createVolumeResource(UUID, defaultSpec); - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.update.rejects(new Error('PUT failed')); - stubs.updateStatus.resolves(); - }); - - const newSpec = { - replicaCount: 3, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 130, - protocol: 'nvmf' - }; - const volume = new Volume(UUID, registry, new EventEmitter(), newSpec); - volumes.emit('volume', { - eventType: 'mod', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledTwice(stubs.update); - sinon.assert.calledOnce(stubs.updateStatus); - }); - - it('should not crash if the resource does not exist upon "mod" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(); - }); - - const newSpec = { - replicaCount: 3, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 130, - protocol: 'nvmf' - }; - const volume = new Volume(UUID, registry, new EventEmitter(), newSpec); - volumes.emit('volume', { - eventType: 'mod', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - sinon.assert.notCalled(stubs.updateStatus); - }); - - it('should delete the resource upon "del" volume event', async () => { - let stubs; - const volumeResource = createVolumeResource(UUID, defaultSpec); - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.delete.resolves(); - }); - - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volumes.emit('volume', { - eventType: 'del', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.delete); - }); - - it('should not crash if DELETE fails upon "del" volume event', async () => { - let stubs; - const volumeResource = createVolumeResource(UUID, defaultSpec); - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(volumeResource); - stubs.delete.rejects(new Error('delete failed')); - }); - - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volumes.emit('volume', { - eventType: 'del', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.calledOnce(stubs.delete); - }); - - it('should not crash if the resource does not exist upon "del" volume event', async () => { - let stubs; - const registry = new Registry({}); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - - oper = await createVolumeOperator(volumes, (arg) => { - stubs = arg; - stubs.get.returns(); - stubs.delete.resolves(); - }); - - const volume = new Volume(UUID, registry, new EventEmitter(), defaultSpec); - volumes.emit('volume', { - eventType: 'del', - object: volume - }); - await sleep(EVENT_PROPAGATION_DELAY); - - sinon.assert.notCalled(stubs.delete); - sinon.assert.notCalled(stubs.create); - sinon.assert.notCalled(stubs.update); - }); - }); -}; diff --git a/csi/moac/test/volume_test.js b/csi/moac/test/volume_test.js deleted file mode 100644 index 99f6ae281..000000000 --- a/csi/moac/test/volume_test.js +++ /dev/null @@ -1,85 +0,0 @@ -// Unit tests for the volume object -// -// The tests for more complex volume methods are in volumes_test.js mainly -// because volumes.js takes care of routing registry events to the volume -// and it makes sense to test this together. - -'use strict'; - -const EventEmitter = require('events'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const { Node } = require('../dist/node'); -const { Registry } = require('../dist/registry'); -const { Volume } = require('../dist/volume'); -const { shouldFailWith } = require('./utils'); -const { grpcCode } = require('../dist/grpc_client'); - -const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; - -const defaultOpts = { - replicaCount: 1, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 100, - limitBytes: 100 -}; - -module.exports = function () { - it('should stringify volume name', () => { - const registry = new Registry({}); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultOpts); - expect(volume.toString()).to.equal(UUID); - }); - - it('should get name of the node where the volume has been published', () => { - const registry = new Registry({}); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultOpts, 'degraded', 100, 'node'); - expect(volume.getNodeName()).to.equal('node'); - expect(volume.state).to.equal('degraded'); - }); - - it('should get zero size of a volume that has not been created yet', () => { - const registry = new Registry({}); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultOpts); - expect(volume.getSize()).to.equal(0); - }); - - it('should get the right size of a volume that has been imported', () => { - const registry = new Registry({}); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultOpts, 'healthy', 100); - expect(volume.getSize()).to.equal(100); - expect(volume.state).to.equal('healthy'); - }); - - it('should set the preferred nodes for the volume', () => { - let modified = false; - const registry = new Registry({}); - const emitter = new EventEmitter(); - emitter.on('volume', (ev) => { - if (ev.eventType === 'mod') { - modified = true; - } - }); - const volume = new Volume(UUID, registry, emitter, defaultOpts); - expect(volume.spec.preferredNodes).to.have.lengthOf(0); - volume.update({ preferredNodes: ['node1', 'node2'] }); - expect(modified).to.equal(true); - expect(volume.spec.preferredNodes).to.have.lengthOf(2); - }); - - it('should not publish volume that is known to be broken', async () => { - const registry = new Registry({}); - const volume = new Volume(UUID, registry, new EventEmitter(), defaultOpts, 'faulted', 100); - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.onCall(0).resolves({}); - stub.onCall(1).resolves({ deviceUri: 'nvmf://host/nqn' }); - - shouldFailWith(grpcCode.INTERNAL, async () => { - await volume.publish('nvmf'); - }); - sinon.assert.notCalled(stub); - }); -}; diff --git a/csi/moac/test/volumes_test.js b/csi/moac/test/volumes_test.js deleted file mode 100644 index 8678ba7d0..000000000 --- a/csi/moac/test/volumes_test.js +++ /dev/null @@ -1,2122 +0,0 @@ -// Unit tests for the volume manager and volume object. -// -// Volume ensure method is tested here rather than in volume tests because -// it's easier to test with volume manager, which routes events from registry -// to volumes. - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const EventEmitter = require('events'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const { Nexus } = require('../dist/nexus'); -const { Node } = require('../dist/node'); -const { Pool } = require('../dist/pool'); -const { Registry } = require('../dist/registry'); -const { Replica } = require('../dist/replica'); -const { Volume } = require('../dist/volume'); -const { Volumes } = require('../dist/volumes'); -const { grpcCode, GrpcError } = require('../dist/grpc_client'); -const { shouldFailWith, waitUntil } = require('./utils'); -const enums = require('./grpc_enums'); -const sleep = require('sleep-promise'); -const Etcd3 = require('etcd3'); -const { PersistentStore } = require('../dist/persistent_store'); - -const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; -const UUID2 = 'aa5e39e9-0c0e-4973-8a3a-0dccada09cbc'; -const EYE_BLINK_MS = 30; - -module.exports = function () { - let client; - let mock; - - before(() => { - client = new Etcd3.Etcd3(); - mock = client.mock({ exec: sinon.stub() }); - mock.exec.resolves({ - kvs: [] - }); - }); - - after(() => { - client.unmock(); - }); - - let registry, volumes; - let pool1, pool2, pool3; - let node1, node2, node3; - let stub1, stub2, stub3; - let nexus, replica1, replica2; - let volume; - let volEvents; - let isSynced1, isSynced2, isSynced3; - let persistentStore; - - // Create pristine test env with 3 pools on 3 nodes - function createTestEnv () { - persistentStore = new PersistentStore([], 1000, () => client); - registry = new Registry({}, persistentStore); - - volumes = new Volumes(registry); - node1 = new Node('node1'); - node2 = new Node('node2'); - node3 = new Node('node3'); - isSynced1 = sinon.stub(node1, 'isSynced'); - isSynced1.returns(true); - isSynced2 = sinon.stub(node2, 'isSynced'); - isSynced2.returns(true); - isSynced3 = sinon.stub(node3, 'isSynced'); - isSynced3.returns(true); - - // pools sorted from the most to the least preferred - pool1 = new Pool({ - name: 'pool1', - disks: [], - capacity: 100, - used: 0, - state: 'POOL_ONLINE' - }); - pool2 = new Pool({ - name: 'pool2', - disks: [], - capacity: 100, - used: 4, - state: 'POOL_ONLINE' - }); - pool3 = new Pool({ - name: 'pool3', - disks: [], - capacity: 100, - used: 4, - state: 'POOL_DEGRADED' - }); - // we don't want connect and disconnect to do anything - sinon.spy(node1, 'connect'); - sinon.spy(node2, 'connect'); - sinon.spy(node3, 'connect'); - sinon.spy(node1, 'disconnect'); - sinon.spy(node2, 'disconnect'); - sinon.spy(node3, 'disconnect'); - stub1 = sinon.stub(node1, 'call'); - stub2 = sinon.stub(node2, 'call'); - stub3 = sinon.stub(node3, 'call'); - - registry._registerNode(node1); - registry._registerNode(node2); - registry._registerNode(node3); - node1._registerPool(pool1); - node2._registerPool(pool2); - node3._registerPool(pool3); - - volEvents = []; - volumes.on('volume', (ev) => { - volEvents.push(_.cloneDeep(ev)); - }); - } - - // Create a setup with standard env (from createTestEnv()) and on top of that - // a volume with two replicas on node1 and node2 and nexus on node1 if the - // volume should be created in published state. - async function setUpReferenceEnv (published) { - createTestEnv(); - // set up clean etcd by deleting all entries - await client.delete().all(); - - replica1 = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - pool1.registerReplica(replica1); - - replica2 = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NVMF', - uri: `nvmf://node2/${UUID}?uuid=2` - }); - pool2.registerReplica(replica2); - - if (published) { - nexus = new Nexus({ - uuid: UUID, - size: 95, - deviceUri: 'nvmf://node1/nqn', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - node1._registerNexus(nexus); - } - - // Fake the volume - volume = new Volume(UUID, registry, volumes, { - replicaCount: 2, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }, 'pending', 95, published ? 'node1' : undefined); - volumes.volumes[UUID] = volume; - - volumes.start(); - await waitUntil(() => { - return volEvents.length >= (published ? 3 : 2); - }, 'volume events'); - volume.state = 'healthy'; - } - - function tearDownReferenceEnv () { - volumes.stop(); - } - - // Each test creates a volume so the setup needs to run for each case. - describe('create volume', function () { - // this creates an env with 3 pools on 3 nodes without any replica and nexus - beforeEach(createTestEnv); - - afterEach(() => { - volumes.stop(); - }); - - it('should return error when there is no suitable pool', async () => { - volumes.start(); - await shouldFailWith(grpcCode.RESOURCE_EXHAUSTED, () => - // node2 and node3 are too small - volumes.createVolume(UUID, { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: ['node2', 'node3'], - requiredBytes: 100, - limitBytes: 110, - protocol: 'nvmf' - }) - ); - expect(volEvents).to.have.lengthOf(2); - expect(volEvents[0].eventType).to.equal('new'); - expect(volEvents[1].eventType).to.equal('mod'); - // 'del' event as well, but it is emitted just after running the test - // expect(volEvents[2].eventType).to.equal('del'); - // expect(volEvents[2].object.uuid).to.equal(UUID); - // expect(volEvents[2].object.state).to.equal('destroyed'); - }); - - it('should create local volume', async () => { - stub2.onCall(0).resolves({ - uuid: UUID, - pool: 'pool2', - size: 90, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=2' - }); - stub2.onCall(1).resolves({ - uuid: UUID, - size: 90, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID + '?uuid=2', - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - stub1.onCall(2).resolves({}); - - volumes.start(); - volume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: true, - preferredNodes: ['node2', 'node1', 'node3'], - requiredNodes: ['node1', 'node2', 'node3'], - requiredBytes: 90, - limitBytes: 0, - protocol: 'nvmf' - }); - expect(volume.state).to.equal('healthy'); - expect(volume.nexus).to.be.null; - expect(volume.spec.local).to.be.true; - sinon.assert.calledWithMatch(stub2.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool2', - size: 90, - thin: false, - share: 'REPLICA_NONE' - }); - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub3); - }); - - it('should create non-local volume', async () => { - stub1.onCall(0).resolves({ - uuid: UUID, - pool: 'pool1', - size: 90, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=1' - }); - stub1.onCall(1).resolves({ - uuid: UUID, - size: 90, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID + '?uuid=1', - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - stub1.onCall(2).resolves({}); - - volumes.start(); - volume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: ['node2', 'node1', 'node3'], - requiredNodes: ['node2', 'node1', 'node3'], - requiredBytes: 90, - limitBytes: 0, - protocol: 'nvmf' - }); - expect(volume.state).to.equal('healthy'); - expect(volume.nexus).to.be.null; - expect(volume.spec.local).to.be.false; - sinon.assert.calledWithMatch(stub1.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool1', - size: 90, - thin: false, - share: 'REPLICA_NONE' - }); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - }); - - it('should set the size of the volume to required minimum if limit is not set', async () => { - // on node 1 is created replica and nexus - stub1.onCall(0).resolves({ - uuid: UUID, - pool: 'pool1', - size: 90, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=1' - }); - stub1.onCall(1).resolves({ - uuid: UUID, - size: 90, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID + '?uuid=1', - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - - volumes.start(); - volume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 0, - protocol: 'nvmf' - }); - expect(volume.state).to.equal('healthy'); - expect(volume.size).to.equal(90); - sinon.assert.calledWithMatch(stub1.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool1', - size: 90, - thin: false, - share: 'REPLICA_NONE' - }); - expect(volEvents).to.have.lengthOf(3); - }); - - it('should limit the size of created volume', async () => { - // on node 1 is created replica and nexus - stub1.onCall(0).resolves({ - uuid: UUID, - pool: 'pool1', - size: 50, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=1' - }); - stub1.onCall(1).resolves({ - uuid: UUID, - size: 50, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID + '?uuid=1', - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - - volumes.start(); - volume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nvmf' - }); - expect(volume.state).to.equal('healthy'); - expect(volume.size).to.equal(50); - sinon.assert.calledWithMatch(stub1.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool1', - size: 50, - thin: false, - share: 'REPLICA_NONE' - }); - expect(volEvents).to.have.lengthOf(3); - }); - - it('should fail if the size is zero', async () => { - volumes.start(); - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 0, - limitBytes: 0, - protocol: 'nvmf' - }) - ); - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - expect(volEvents).to.have.lengthOf(0); - }); - - it('should create the volume and include pre-existing replicas', async () => { - const replica = new Replica({ - uuid: UUID, - size: 10, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica.pool = pool1; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - - volumes.start(); - volume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nvmf' - }); - expect(volume.state).to.equal('healthy'); - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - expect(Object.keys(volume.replicas)).to.have.lengthOf(1); - expect(Object.values(volume.replicas)[0]).to.equal(replica); - expect(volEvents).to.have.lengthOf(3); - expect(volEvents[0].eventType).to.equal('new'); - expect(volEvents[1].eventType).to.equal('mod'); - }); - - it('should distribute nexuses evenly over available nodes', async () => { - const replica1 = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - const replica2 = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=2` - }); - const replica3 = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=3` - }); - const replica4 = new Replica({ - uuid: UUID2, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID2}?uuid=4` - }); - const replica5 = new Replica({ - uuid: UUID2, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID2}?uuid=5` - }); - const replica6 = new Replica({ - uuid: UUID2, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID2}?uuid=6` - }); - replica1.pool = pool1; - replica2.pool = pool2; - replica3.pool = pool3; - replica4.pool = pool1; - replica5.pool = pool2; - replica6.pool = pool3; - - // Fake the volume - volume = new Volume(UUID, registry, new EventEmitter(), { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - volume.newReplica(replica1); - volume.newReplica(replica2); - volume.newReplica(replica3); - - const volume2 = new Volume(UUID2, registry, new EventEmitter(), { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - volume2.newReplica(replica4); - volume2.newReplica(replica5); - volume2.newReplica(replica6); - volumes.volumes[UUID] = volume; - volumes.volumes[UUID2] = volume2; - volume.state = 'healthy'; - volume2.state = 'healthy'; - - volumes.start(); - - // set share pcols for replicas of the first volume - stub2.onCall(0).resolves({ uri: `nvmf://node2/${UUID}?uuid=2` }); - stub3.onCall(0).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - // create first nexus reply - stub1.onCall(0).resolves({ - uuid: UUID, - deviceUri: '', - size: 95, - state: 'NEXUS_ONLINE', - children: [{ - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE' - }, { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE' - }, { - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_ONLINE' - }] - }); - // nexus publish reply - stub1.onCall(1).resolves({ - deviceUri: `nvmf://node1/${UUID}` - }); - - // publish the first volume (with app scheduled to a node that does - // not run mayastor so other criteria will kick in than simply assigning - // the nexus to given application node). - let uri = await volume.publish('node4'); - expect(uri).to.equal(`nvmf://node1/${UUID}`); - expect(volume.publishedOn).to.equal('node1'); - - // set share pcols for replicas of the second volume - stub1.onCall(2).resolves({ uri: `nvmf://node1/${UUID2}?uuid=4` }); - stub3.onCall(1).resolves({ uri: `nvmf://node3/${UUID2}?uuid=6` }); - // create second nexus reply - stub2.onCall(1).resolves({ - uuid: UUID2, - deviceUri: '', - size: 95, - state: 'NEXUS_ONLINE', - children: [{ - uri: `bdev:///${UUID2}?uuid=5`, - state: 'CHILD_ONLINE' - }, { - uri: `nvmf://node1/${UUID2}?uuid=4`, - state: 'CHILD_ONLINE' - }, { - uri: `nvmf://node3/${UUID2}?uuid=6`, - state: 'CHILD_ONLINE' - }] - }); - // nexus publish reply - stub2.onCall(2).resolves({ - deviceUri: `nvmf://node2/${UUID2}` - }); - - // Publish the second volume - should be on a different node than the - // first one. The same note about non-existing mayastor node applies here - uri = await volume2.publish('node5'); - expect(uri).to.equal(`nvmf://node2/${UUID2}`); - expect(volume2.publishedOn).to.equal('node2'); - }); - - it('should return error if the volume is destroyed before it is created', (done) => { - // on node 1 is created a replica then it is interrupted by the destroy - stub1.onCall(0).resolves({ - uuid: UUID, - pool: 'pool1', - size: 10, - thin: false, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}` - }); - // now comes the destroy interrupting the create - stub1.onCall(2).resolves({}); - - volumes.start(); - - // Create & dispatch promises for both - const create = volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nvmf' - }); - const destroy = volumes.destroyVolume(UUID); - - // the create should have failed because it was interrupted - create - .then(() => { - done(new Error('Expected an error from create')); - }) - .catch((err) => { - expect(err.code).to.equal(grpcCode.INTERNAL); - }); - // the destroy should pass - destroy - .then(done) - .catch(done); - }); - }); - - describe('import volume', function () { - // this creates an env with 3 pools on 3 nodes without any replica and nexus - beforeEach(createTestEnv); - - afterEach(() => { - volumes.stop(); - }); - - const volumeSpec = { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nvmf' - }; - - it('should import a volume and fault it if there are no replicas', async () => { - volumes.start(); - volume = volumes.importVolume(UUID, volumeSpec, { size: 40 }); - // give FSA a chance to run - await sleep(EYE_BLINK_MS); - expect(volume.state).to.equal('faulted'); - expect(Object.keys(volume.replicas)).to.have.lengthOf(0); - }); - - it('should import a volume without nexus', async () => { - // we use two replicas in this test because it uncovers some corner cases - const customVolumeSpec = { - replicaCount: 2, - local: true, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nvmf' - }; - const replica1 = new Replica({ - uuid: UUID, - size: 40, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica1.pool = pool1; - const replica2 = new Replica({ - uuid: UUID, - size: 40, - share: 'REPLICA_NVMF', - uri: `nvmf://node2/${UUID}` - }); - replica2.pool = pool2; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica1, replica2]); - - // import creates a nexus - stub1.onCall(0).resolves({ - uuid: UUID, - deviceUri: '', - size: 95, - state: 'NEXUS_ONLINE', - children: [{ - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE' - }, { - uri: `nvmf://node2/${UUID}`, - state: 'CHILD_ONLINE' - }] - }); - // and then it is destroyed again - stub1.onCall(1).resolves({}); - - volumes.start(); - volume = volumes.importVolume(UUID, customVolumeSpec, { size: 40 }); - expect(volume.state).to.equal('unknown'); - expect(Object.keys(volume.replicas)).to.have.lengthOf(2); - // give FSA a chance to run - await sleep(EYE_BLINK_MS); - expect(volume.nexus).to.be.null; - expect(volume.state).to.equal('healthy'); - expect(volume.size).to.equal(40); - expect(volEvents).to.have.lengthOf(3); - }); - - it('should import unpublished volume with nexus', async () => { - const replica = new Replica({ - uuid: UUID, - size: 40, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica.pool = pool1; - const nexus = new Nexus({ - uuid: UUID, - size: 20, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE' - } - ] - }); - nexus.node = node1; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - const getNexusStub = sinon.stub(registry, 'getNexus'); - getNexusStub.returns(nexus); - - volumes.start(); - volume = volumes.importVolume(UUID, volumeSpec, { size: 40 }); - // give FSA a chance to run - await sleep(EYE_BLINK_MS); - expect(volume.nexus).to.be.null; - expect(Object.keys(volume.replicas)).to.have.lengthOf(1); - expect(Object.values(volume.replicas)[0]).to.equal(replica); - expect(volume.state).to.equal('healthy'); - expect(volEvents).to.have.lengthOf(4); - }); - - it('should import published volume with nexus', async () => { - const deviceUri = 'nvmf://node1/nqn'; - const replica = new Replica({ - uuid: UUID, - size: 40, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica.pool = pool1; - const nexus = new Nexus({ - uuid: UUID, - size: 20, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE' - } - ] - }); - nexus.node = node1; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - const getNexusStub = sinon.stub(registry, 'getNexus'); - getNexusStub.returns(nexus); - - stub1.onCall(0).resolves({ deviceUri }); - volumes.start(); - volume = volumes.importVolume(UUID, volumeSpec, { - size: 40, - targetNodes: ['node1'] - }); - await waitUntil(() => volume.nexus.deviceUri === deviceUri, 'published nexus'); - expect(Object.keys(volume.replicas)).to.have.lengthOf(1); - expect(Object.values(volume.replicas)[0]).to.equal(replica); - expect(volume.state).to.equal('healthy'); - expect(volEvents).to.have.lengthOf(4); - }); - - it('should import local volume and create missing local replica', async () => { - const volumeSpec = { - replicaCount: 2, - local: true, - preferredNodes: ['node3', 'node2', 'node1'], - requiredNodes: ['node1', 'node2', 'node3'], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nvmf' - }; - const replica = new Replica({ - uuid: UUID, - size: 40, - share: 'REPLICA_NVMF', - uri: `nvmf://node2/${UUID}?uuid=2` - }); - replica.pool = pool2; - const nexus = new Nexus({ - uuid: UUID, - size: 40, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE' - } - ] - }); - nexus.node = node3; - - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - const getNexusStub = sinon.stub(registry, 'getNexus'); - getNexusStub.returns(nexus); - - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 40, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(1).resolves({ - uri: `bdev:///${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - - volumes.start(); - volume = volumes.importVolume(UUID, volumeSpec, { size: 40 }); - await waitUntil( - () => volume.state === 'degraded' && volume.nexus.children.length === 2, - 'degraded volume with two replicas' - ); - expect(Object.keys(volume.replicas)).to.have.lengthOf(2); - // expect the new replica on the "local" node - expect(Object.values(volume.replicas)[0]).to.equal(replica); - expect(Object.values(volume.replicas)[1].pool.name).to.equal('pool3'); - - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub2); - sinon.assert.calledTwice(stub3); - sinon.assert.calledWithMatch(stub3.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool3', - size: 40, - thin: false, - share: 'REPLICA_NONE' - }); - sinon.assert.calledWithMatch(stub3.secondCall, 'addChildNexus', { - uuid: UUID, - uri: `bdev:///${UUID}?uuid=3`, - norebuild: false - }); - }); - - it('should import a volume without status', async () => { - volumes.start(); - volume = volumes.importVolume(UUID, volumeSpec); - expect(volume.state).to.equal('unknown'); - expect(volume.size).to.equal(0); - }); - }); - - describe('publish volume', function () { - // We create an artificial volume at the beginning of each test. - this.beforeEach(() => { - createTestEnv(); - - const replica = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica.pool = pool1; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - - // Fake the volume - volume = new Volume(UUID, registry, new EventEmitter(), { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }, 'healthy', 95); - volume.newReplica(replica); - volumes.volumes[UUID] = volume; - volumes.start(); - }); - - this.afterEach(() => { - volumes.stop(); - console.log('Stopped!'); - }); - - it('should publish the volume', async () => { - stub1.onCall(0).resolves({ uri: `nvmf://node1/${UUID}` }); - stub2.onCall(0).resolves({ uuid: UUID, size: 100, state: 'NEXUS_ONLINE', children: [{ uri: `nvmf://node1/${UUID}`, state: 'CHILD_ONLINE' }] }); - stub2.onCall(1).resolves({ deviceUri: 'nvmf://node2/nqn' }); - - const uri = await volume.publish('node2'); - expect(uri).to.equal('nvmf://node2/nqn'); - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'shareReplica', { - uuid: UUID, - share: 'REPLICA_NVMF' - }); - sinon.assert.calledTwice(stub2); - sinon.assert.calledWithMatch(stub2.firstCall, 'createNexus', { - uuid: UUID, - size: 95, - children: [`nvmf://node1/${UUID}`] - }); - sinon.assert.calledWithMatch(stub2.secondCall, 'publishNexus', { - uuid: UUID, - key: '' - }); - }); - - it('should publish the volume that already has a nexus on a different node', async () => { - const nexus = new Nexus({ - uuid: UUID, - size: 95, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - nexus.node = node1; - const getNexusStub = sinon.stub(registry, 'getNexus'); - getNexusStub.returns(nexus); - volume.newNexus(nexus); - - stub1.onCall(0).resolves({}); - stub1.onCall(1).resolves({ uri: `nvmf://node1/${UUID}` }); - stub2.onCall(0).resolves({ uuid: UUID, size: 100, state: 'NEXUS_ONLINE', children: [{ uri: `nvmf://node1/${UUID}`, state: 'CHILD_ONLINE' }] }); - stub2.onCall(1).resolves({ deviceUri: 'nvmf://node2/nqn' }); - const uri = await volume.publish('node2'); - expect(uri).to.equal('nvmf://node2/nqn'); - sinon.assert.calledTwice(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'destroyNexus', { uuid: UUID }); - sinon.assert.calledWithMatch(stub1.secondCall, 'shareReplica', { - uuid: UUID, - share: 'REPLICA_NVMF' - }); - sinon.assert.calledTwice(stub2); - sinon.assert.calledWithMatch(stub2.firstCall, 'createNexus', { - uuid: UUID, - size: 95, - children: [`nvmf://node1/${UUID}`] - }); - sinon.assert.calledWithMatch(stub2.secondCall, 'publishNexus', { - uuid: UUID, - key: '' - }); - }); - - it('should fail to publish a volume that is supposed to be published on a node that does not exist', async () => { - volume.publishedOn = 'nodeX'; - await shouldFailWith(grpcCode.INTERNAL, () => volume.publish('nvmf')); - expect(volume.publishedOn).to.equal('nodeX'); - expect(volume.nexus).to.be.null; - }); - - it('should fail to publish if setting share protocol on replica fails', async () => { - stub1.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - - await shouldFailWith(grpcCode.INTERNAL, () => volume.publish('node2')); - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'shareReplica', { - uuid: UUID, - share: 'REPLICA_NVMF' - }); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - // await sleep(1700); - }); - - it('should fail to publish if create nexus grpc fails', async () => { - stub1.rejects(new GrpcError(grpcCode.INTERNAL, 'Test failure')); - - await shouldFailWith(grpcCode.INTERNAL, () => volume.publish('node1')); - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'createNexus', { - uuid: UUID, - size: 95, - children: [`bdev:///${UUID}?uuid=1`] - }); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - }); - }); - - describe('unpublish volume', function () { - // We create an artificial volume at the beginning of each test. - this.beforeEach(async () => { - createTestEnv(); - - const replica = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica.pool = pool1; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - - const nexus = new Nexus({ - uuid: UUID, - size: 95, - state: 'NEXUS_ONLINE', - deviceUri: 'nvmf://node1/nqn', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - node1.nexus.push(nexus); - nexus.bind(node1); - const getNexusStub = sinon.stub(registry, 'getNexus'); - getNexusStub.returns(nexus); - - // Fake the volume - volume = new Volume(UUID, registry, new EventEmitter(), { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }, 'healthy', 95, 'node1'); - volume.newReplica(replica); - volume.newNexus(nexus); - - volumes.volumes[UUID] = volume; - volumes.start(); - // let new/mod/del events to happen before we start testing - await sleep(EYE_BLINK_MS); - }); - - this.afterEach(() => { - volumes.stop(); - }); - - it('should unpublish a volume', async () => { - await volume.unpublish(); - expect(volume.getNodeName()).to.be.undefined; - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1, 'unpublishNexus', { - uuid: UUID - }); - // destroy nexus is called afterwards - await sleep(EYE_BLINK_MS); - sinon.assert.calledTwice(stub1); - sinon.assert.calledWithMatch(stub1, 'destroyNexus', { - uuid: UUID - }); - }); - - it('should unpublish a volume with unreachable nexus', async () => { - isSynced1.returns(false); - node1._offline(); - await waitUntil( - () => volume.state === 'offline' && volume.nexus.isOffline(), - 'offline volume' - ); - // The state of the vol should be as if the nexus was really unpublished - // and destroyed even though that it's not possible because the node is - // offline. - await volume.unpublish(); - expect(volume.getNodeName()).to.be.undefined; - sinon.assert.notCalled(stub1); - await sleep(EYE_BLINK_MS); - expect(volume.nexus).to.be.null; - sinon.assert.notCalled(stub1); - }); - }); - - describe('update volume', function () { - let modCount; - - // We create an artificial volume at the beginning of each test. - this.beforeEach(() => { - createTestEnv(); - - const nexus = new Nexus({ - uuid: UUID, - size: 95, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - nexus.node = node1; - const replica = new Replica({ - uuid: UUID, - size: 95, - share: 'REPLICA_NONE', - uri: `bdev:///${UUID}?uuid=1` - }); - replica.pool = pool1; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); - const getNexusStub = sinon.stub(registry, 'getNexus'); - getNexusStub.returns(nexus); - - // Fake the volume - const emitter = new EventEmitter(); - emitter.on('mod', () => { modCount += 1; }); - volume = new Volume(UUID, registry, emitter, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - volume.newReplica(replica); - volumes.volumes[UUID] = volume; - volume.newNexus(nexus); - volume.state = 'healthy'; - modCount = 0; - - volumes.start(); - }); - - this.afterEach(() => { - volumes.stop(); - modCount = 0; - }); - - it('should update volume parameters if a volume to be created already exists', async () => { - // We intentionally update parameters in a way that won't require - // scaling up and down, that is tested by different tests. - const returnedVolume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: true, - preferredNodes: [node2.name], - requiredNodes: [node1.name], - requiredBytes: 89, - limitBytes: 111, - protocol: 'nvmf' - }); - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - expect(returnedVolume).to.equal(volume); - expect(volume.spec.replicaCount).to.equal(1); - expect(volume.size).to.equal(95); - expect(volume.spec.local).to.be.true; - expect(volume.spec.preferredNodes[0]).to.equal(node2.name); - expect(volume.spec.requiredNodes[0]).to.equal(node1.name); - expect(volume.spec.requiredBytes).to.equal(89); - expect(volume.spec.limitBytes).to.equal(111); - expect(volume.state).to.equal('healthy'); - expect(modCount).to.equal(0); - }); - - it('should not do anything if creating a volume that exists and has the same parameters', async () => { - const returnedVolume = await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - expect(returnedVolume).to.equal(volume); - expect(modCount).to.equal(0); - }); - - it('should fail to shrink the volume', async () => { - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 94, - protocol: 'nvmf' - }) - ); - }); - - it('should fail to extend the volume', async () => { - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => - volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 96, - limitBytes: 110, - protocol: 'nvmf' - }) - ); - }); - - it('should fail to change the protocol', async () => { - await shouldFailWith(grpcCode.INVALID_ARGUMENT, () => volumes.createVolume(UUID, { - replicaCount: 1, - local: true, - preferredNodes: [node2.name], - requiredNodes: [node1.name], - requiredBytes: 89, - limitBytes: 111, - protocol: 'iscsi' - })); - }); - }); - - describe('scale up/down', function () { - describe('with used nexus', function () { - beforeEach(() => setUpReferenceEnv(true)); - afterEach(tearDownReferenceEnv); - - it('should scale up if a child is faulted', async () => { - // on node 3 is created the new replica - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 95, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(1).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - // the faulted replica should be eventually removed - stub2.onCall(0).resolves({}); - // nexus should be updated twice (add and remove a replica) - stub1.onCall(0).resolves({ - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - stub1.onCall(1).resolves({}); - // and finally the nexus should be destroyed again - stub1.onCall(2).resolves({}); - - nexus.children[1].state = 'CHILD_FAULTED'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - - console.log('B'); - - await waitUntil( - () => - nexus.children.length === 3 && - nexus.children.find((ch) => ch.uri === `nvmf://node3/${UUID}?uuid=3`), - 'new replica' - ); - - console.log('A'); - - expect(volume.state).to.equal('degraded'); - const child = nexus.children.find((ch) => ch.uri === `nvmf://node3/${UUID}?uuid=3`); - child.state = 'CHILD_ONLINE'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - - await waitUntil( - () => - nexus.children.length === 2 && - !nexus.children.find((ch) => ch.uri === `nvmf://node2/${UUID}?uuid=2`) && - nexus.children.find((ch) => ch.uri === `nvmf://node3/${UUID}?uuid=3`), - 'faulted replica removal' - ); - expect(volume.state).to.equal('healthy'); - }); - - it('should not scale up if the replica is there but just being rebuilt', async () => { - // this would have been normally done but should not be the case now - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 95, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(1).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - stub1.onCall(0).resolves({ - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - - nexus.children[0].state = 'CHILD_DEGRADED'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil(() => volume.state === 'degraded', 'degraded volume'); - - try { - await waitUntil( - () => nexus.children.length === 3, - 100, // 100 ms - 'new replica not to appear' - ); - } catch (err) { - // we are fine - expect(volume.nexus.children).to.have.lengthOf(2); - expect(volume.state).to.equal('degraded'); - return; - } - throw new Error('well, the new replica did appear'); - }); - - it('should not scale up if replica is offline but the child is online', async () => { - // this would have been normally done but should not be the case now - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 95, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(1).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - stub1.onCall(0).resolves({ - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - - replica1.offline(); - - try { - await waitUntil( - () => nexus.children.length === 3, - 100, // 100 ms - 'new replica not to appear' - ); - } catch (err) { - // we are fine - expect(volume.nexus.children).to.have.lengthOf(2); - expect(volume.state).to.equal('healthy'); - return; - } - throw new Error('well, the new replica did appear'); - }); - - it('should not scale down if a rebuild is in progress', async () => { - // node 1: updated nexus (remove-child) - stub1.onCall(0).resolves({}); - // node 2: destroyed replica - stub2.onCall(1).resolves({}); - - nexus.children[0].state = 'CHILD_DEGRADED'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil(() => volume.state === 'degraded', 'degraded volume'); - - // update the spec - await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - - try { - await waitUntil( - () => nexus.children.length === 1, - 100, - 'replica to be destroyed' - ); - } catch (err) { - expect(volume.state).to.equal('degraded'); - return; - } - throw new Error('The replica was removed even if in rebuild state'); - }); - - it('should scale up and then scale down when a volume is moved', async () => { - // on node 3 is created the new replica - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 95, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(1).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - // nexus should be updated to add the new child - stub1.onCall(0).resolves({ - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - - // update the spec: node2 remains but the first replica should move - // from node1 to node3 - volume = await volumes.createVolume(UUID, { - replicaCount: 2, - local: false, - preferredNodes: [], - requiredNodes: ['node2', 'node3'], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - - await waitUntil( - () => nexus.children.length === 3 && volume.state === 'degraded', - 'new replica' - ); - - const newChild = volume.nexus.children.find( - (ch) => ch.state === 'CHILD_DEGRADED' - ); - expect(newChild.uri).to.equal(`nvmf://node3/${UUID}?uuid=3`); - newChild.state = 'CHILD_ONLINE'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - - await waitUntil(() => nexus.children.length === 2, 'replica removal'); - expect(volume.state).to.equal('healthy'); - expect(Object.keys(volume.replicas)).to.deep.equal(['node2', 'node3']); - }); - - it('should scale up if a new pool is created', async () => { - // on node 3 we destroy (and create) the pool and create the new replica - stub3.onCall(0).resolves({}); - stub3.onCall(1).resolves({ - uuid: UUID, - pool: 'pool3', - size: 95, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(2).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - // nexus should be updated to add the new child - stub1.onCall(0).resolves({ - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - - // delete the third pool to pretend we ran out of pools - await pool3.destroy(); - - // now we cannot create the new replica (this is the update op in fact) - await volumes.createVolume(UUID, { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - await waitUntil(() => volume.state === 'degraded', 'degraded volume'); - - // now create the pool and see if it gets used for the new replica - pool3 = new Pool({ - name: 'pool3', - disks: [], - capacity: 100, - used: 4, - state: 'POOL_DEGRADED' - }); - node3._registerPool(pool3); - - await waitUntil( - () => nexus.children.length === 3 && volume.state === 'degraded', - 'degraded volume with new replica' - ); - - const newChild = volume.nexus.children.find( - (ch) => ch.state === 'CHILD_DEGRADED' - ); - expect(newChild.uri).to.equal(`nvmf://node3/${UUID}?uuid=3`); - newChild.state = 'CHILD_ONLINE'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - - await waitUntil( - () => nexus.children.length === 3 && volume.state === 'healthy', - 'healthy volume' - ); - }); - }); - - describe('without nexus', function () { - beforeEach(() => setUpReferenceEnv(false)); - afterEach(tearDownReferenceEnv); - - it('should scale up if replicaCount is increased', async () => { - // scale up involves nexus creation - stub1.onCall(0).resolves({ - uuid: UUID, - size: 95, - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - // on node 3 is created the new replica - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 95, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - stub3.onCall(1).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - // nexus should be updated to add the new child - stub1.onCall(1).resolves({ - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_DEGRADED', - rebuildProgress: 10 - }); - // nexus will be destroyed at the end - stub1.onCall(2).resolves({}); - - // update the spec - volume = await volumes.createVolume(UUID, { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - - await waitUntil( - () => - volume.state === 'degraded' && - Object.keys(volume.replicas).length === 3 && - volume.nexus.children.length === 3, - 'new replica' - ); - const newReplica = Object.values(volume.replicas).find((r) => r.uri === `nvmf://node3/${UUID}?uuid=3`); - const nexus = volume.nexus; - const child = nexus.children[2]; - expect(nexus).not.to.be.null; - expect(newReplica.pool.name).to.equal('pool3'); - expect(child.state).to.equal('CHILD_DEGRADED'); - - // simulate rebuild finish - the nexus should go away - child.state = 'CHILD_ONLINE'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil( - () => volume.state === 'healthy' && volume.nexus === null, - 'healthy volume' - ); - expect(Object.keys(volume.replicas)).has.lengthOf(3); - }); - - it('should scale down if replicaCount is decreased', async () => { - // scale down involves nexus creation - stub1.onCall(0).resolves({ - uuid: UUID, - size: 95, - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - // node 1: updated nexus (remove-child) - stub1.onCall(1).resolves({}); - // node 2: destroyed replica - stub2.onCall(0).resolves({}); - // nexus will be destroyed at the end - stub1.onCall(2).resolves({}); - - // update the spec - await volumes.createVolume(UUID, { - replicaCount: 1, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - - // Nexus gets created and destroyed inbetween but it's difficult to - // capture that moment in the tests because we don't know the precise - // timing. - await waitUntil( - () => - Object.keys(volume.replicas).length === 1 && - volume.nexus === null, - 'replica to be destroyed' - ); - expect(volume.state).to.equal('healthy'); - const oldReplica = Object.values(volume.replicas).find((r) => r.uri === `nvmf://node2/${UUID}`); - expect(oldReplica).to.be.undefined; - }); - }); - }); - - describe('state transitions on a volume without nexus', function () { - beforeEach(() => setUpReferenceEnv(false)); - afterEach(tearDownReferenceEnv); - - it('should move to "faulted" when none of replicas is online', async () => { - node3._offline(); // prevent FSA from scheduling a new replica - replica1.offline(); - replica2.offline(); - await waitUntil(() => volume.state === 'faulted', 'faulted volume'); - }); - - it('should move to "offline" when volume is published on unknown node', async () => { - volume.publishedOn = 'nodeX'; - volume.fsa(); - await waitUntil(() => volume.state === 'offline', 'offline volume'); - }); - }); - - describe('state transitions on a volume with nexus', function () { - beforeEach(() => setUpReferenceEnv(true)); - afterEach(tearDownReferenceEnv); - - it('should move to "faulted" when none of replicas is online', async () => { - nexus.children.forEach((ch) => (ch.state = 'CHILD_FAULTED')); - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - - await waitUntil(() => volume.state === 'faulted', 'faulted volume'); - expect(nexus.children).to.have.length(2); - }); - - it('should move to "degraded" when rebuild starts and back to healthy when it ends', async () => { - nexus.children[0].state = 'CHILD_DEGRADED'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil(() => volume.state === 'degraded', 'degraded volume'); - - nexus.children[0].state = 'CHILD_ONLINE'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil(() => volume.state === 'healthy', 'healthy volume'); - }); - - it('should move to "offline" state when nexus goes offline', async () => { - isSynced1.returns(false); - nexus.offline(); - await waitUntil(() => volume.state === 'offline', 'offline volume'); - }); - - it('should remain what it was when volume is unpublished', async () => { - nexus.children[0].state = 'CHILD_DEGRADED'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil(() => volume.state === 'degraded', 'degraded volume'); - await volume.unpublish(); - await sleep(EYE_BLINK_MS); - await waitUntil(() => volume.state === 'degraded', 'degraded volume'); - }); - - it('should not move to any state when in "destroyed" state', async () => { - volume.state = 'destroyed'; - // try to move all replicas to faulted and the state should not change - nexus.children.forEach((ch) => (ch.state = 'CHILD_FAULTED')); - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - try { - await waitUntil(() => volume.state === 'faulted', 100, 'faulted volume'); - } catch (err) { - // ok - the state did not change - } finally { - // this will throw - expect(volume.state).to.equal('destroyed'); - } - }); - }); - - describe('nexus failover', function () { - beforeEach(() => setUpReferenceEnv(true)); - afterEach(tearDownReferenceEnv); - - it('should create nexus on the same node where it was published', async () => { - // FSA should try to create and share the nexus again - stub1.onCall(0).resolves({ - uuid: UUID, - size: 96, - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - stub1.onCall(1).resolves({ - deviceUri: 'nvmf://node1/nqn' - }); - - // pretend that node1 is down - isSynced1.returns(false); - // we unbind the nexus - that happens when node goes down - nexus.unbind(); - await waitUntil(() => volume.state === 'offline', 'volume offline'); - expect(volume.nexus).to.be.null; - expect(volume.publishedOn).to.equal('node1'); - - // this simulates node that has been just successfully sync'd - isSynced1.returns(true); - node1.emit('node', { - eventType: 'mod', - object: node1 - }); - await waitUntil(() => volume.state === 'healthy', 'healthy volume'); - expect(volume.nexus.deviceUri).to.equal('nvmf://node1/nqn'); - expect(volume.publishedOn).to.equal('node1'); - }); - - it('should set state to healthy again when nexus comes online', async () => { - isSynced1.returns(false); - nexus.offline(); - await waitUntil(() => volume.state === 'offline', 'volume offline'); - - isSynced1.returns(true); - nexus.state = 'NEXUS_ONLINE'; - registry.emit('nexus', { - eventType: 'mod', - object: nexus - }); - await waitUntil(() => volume.state === 'healthy', 'healthy volume'); - }); - - it('should destroy a new nexus on wrong node', async () => { - stub2.onCall(0).resolves({}); - const wrongNexus = new Nexus({ - uuid: UUID, - size: 95, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node2/${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - node2._registerNexus(wrongNexus); - - await waitUntil(() => stub2.callCount > 0, 'destroy grpc call'); - sinon.assert.calledOnce(stub2); - sinon.assert.calledWithMatch(stub2, 'destroyNexus', { uuid: UUID }); - expect(volume.nexus).to.equal(nexus); - expect(volume.state).to.equal('healthy'); - }); - - it('should replace a nexus in volume on wrong node', async () => { - volume.publishedOn = 'node2'; - stub1.onCall(0).resolves({ }); - stub1.onCall(1).resolves({ uri: `nvmf://node1/${UUID}?uuid=1` }); - stub2.onCall(0).resolves({ uri: `bdev:///${UUID}?uuid=2` }); - const newNexus = new Nexus({ - uuid: UUID, - size: 95, - deviceUri: `nvmf://node2/${UUID}`, - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node1/${UUID}?uuid=1`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - node2._registerNexus(newNexus); - - await waitUntil(() => stub1.callCount > 0, 'destroy grpc call'); - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1, 'destroyNexus', { uuid: UUID }); - expect(volume.nexus).to.equal(newNexus); - expect(volume.state).to.equal('healthy'); - }); - }); - - // Volume is created once in the first test and then all tests use it. - // This tests the typical life-cycle of a volume from create to destroy. - describe('misc', function () { - const deviceUri = 'nvmf://node1/nqn'; - - before(createTestEnv); - - afterEach(() => { - stub1.resetHistory(); - stub2.resetHistory(); - stub3.resetHistory(); - volEvents = []; - }); - - after(() => { - volumes.stop(); - }); - - // this creates a volume used in subsequent cases - it('should create a new volume', async () => { - // on node 1 is created replica and nexus - stub1.onCall(0).resolves({ - uuid: UUID, - pool: 'pool1', - size: 96, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=1' - }); - // on node 2 is created replica and it is shared - stub2.onCall(0).resolves({ - uuid: UUID, - pool: 'pool2', - size: 96, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=2' - }); - // on node 3 is created replica and it is shared - stub3.onCall(0).resolves({ - uuid: UUID, - pool: 'pool3', - size: 96, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + '?uuid=3' - }); - - volumes.start(); - volume = await volumes.createVolume(UUID, { - replicaCount: 3, - local: false, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 90, - limitBytes: 110, - protocol: 'nvmf' - }); - - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool1', - size: 96, - thin: false, - share: 'REPLICA_NONE' - }); - - sinon.assert.calledOnce(stub2); - sinon.assert.calledWithMatch(stub2.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool2', - size: 96, - thin: false, - share: 'REPLICA_NONE' - }); - - sinon.assert.calledOnce(stub3); - sinon.assert.calledWithMatch(stub3.firstCall, 'createReplica', { - uuid: UUID, - pool: 'pool3', - size: 96, - thin: false, - share: 'REPLICA_NONE' - }); - - expect(volumes.get(UUID)).to.equal(volume); - expect(volume.uuid).to.equal(UUID); - expect(volume.getSize()).to.equal(96); - expect(volume.getNodeName()).to.be.undefined; - expect(volume.spec.local).to.be.false; - expect(volume.spec.replicaCount).to.equal(3); - expect(volume.spec.preferredNodes).to.have.lengthOf(0); - expect(volume.spec.requiredNodes).to.have.lengthOf(0); - expect(volume.spec.requiredBytes).to.equal(90); - expect(volume.spec.limitBytes).to.equal(110); - expect(volume.nexus).to.be.null; - expect(Object.keys(volume.replicas)).to.have.lengthOf(3); - expect(volume.replicas.node1.uuid).to.equal(UUID); - expect(volume.replicas.node2.uuid).to.equal(UUID); - expect(volume.replicas.node3.uuid).to.equal(UUID); - expect(volume.state).to.equal('healthy'); - - expect(volEvents).to.have.lengthOf(5); - }); - - it('should publish the volume', async () => { - // on node 1 is created nexus - stub1.onCall(0).resolves({ - uuid: UUID, - size: 96, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID + '?uuid=1', - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node2/${UUID}?uuid=2`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://node3/${UUID}?uuid=3`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - stub2.onCall(0).resolves({ uri: `nvmf://node2/${UUID}?uuid=2` }); - stub3.onCall(0).resolves({ uri: `nvmf://node3/${UUID}?uuid=3` }); - stub1.onCall(1).resolves({ deviceUri }); - - const uri = await volume.publish('node1'); - expect(uri).to.equal(deviceUri); - - sinon.assert.calledTwice(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'createNexus', { - uuid: UUID, - size: 96, - children: ['bdev:///' + UUID + '?uuid=1', `nvmf://node2/${UUID}?uuid=2`, `nvmf://node3/${UUID}?uuid=3`] - }); - sinon.assert.calledWithMatch(stub1.secondCall, 'publishNexus', { - uuid: UUID, - key: '', - share: enums.NEXUS_NVMF - }); - - sinon.assert.calledOnce(stub2); - sinon.assert.calledOnce(stub3); - - expect(volume.getNodeName()).to.equal('node1'); - expect(volume.getSize()).to.equal(96); - expect(volume.spec.replicaCount).to.equal(3); - expect(volume.nexus.uuid).to.equal(UUID); - expect(Object.keys(volume.replicas)).to.have.lengthOf(3); - expect(volume.state).to.equal('healthy'); - - // 1 new nexus, 1 publish nexus, 2 replica share mods - expect(volEvents).to.have.lengthOf(4); - }); - - it('should publish the volume that has been already published', async () => { - const uri = await volume.publish('node1'); - expect(uri).to.equal(deviceUri); - sinon.assert.notCalled(stub1); - }); - - it('should unpublish the volume', async () => { - stub1.onCall(0).resolves({}); - stub1.onCall(1).resolves({}); - await volume.unpublish(); - // wait for the nexus to be destroyed after unpublish - await sleep(EYE_BLINK_MS); - sinon.assert.calledTwice(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'unpublishNexus', { - uuid: UUID - }); - sinon.assert.calledWithMatch(stub1.secondCall, 'destroyNexus', { - uuid: UUID - }); - expect(volume.getNodeName()).to.be.undefined; - expect(volume.uuid).to.equal(UUID); - expect(volume.nexus).is.null; - expect(volume.state).to.equal('healthy'); - expect(Object.keys(volume.replicas)).to.have.length(3); - expect(volEvents).to.have.lengthOf(2); - }); - - it('should unpublish volume that has not been published', async () => { - await volume.unpublish(); - expect(volume.getNodeName()).to.be.undefined; - sinon.assert.notCalled(stub1); - }); - - it('should destroy the volume', async () => { - stub1.onCall(0).resolves({}); - stub2.onCall(0).resolves({}); - stub3.onCall(0).resolves({}); - - await volumes.destroyVolume(UUID); - - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1, 'destroyReplica', { uuid: UUID }); - sinon.assert.calledOnce(stub2); - sinon.assert.calledWithMatch(stub2, 'destroyReplica', { uuid: UUID }); - sinon.assert.calledOnce(stub3); - sinon.assert.calledWithMatch(stub3, 'destroyReplica', { uuid: UUID }); - - expect(volumes.get(UUID)).is.undefined; - expect(volume.getNodeName()).to.be.undefined; - expect(volume.nexus).is.null; - expect(volume.state).to.equal('destroyed'); - expect(Object.keys(volume.replicas)).to.have.length(0); - // 3 replicas and 1 del volume event - expect(volEvents).to.have.lengthOf(5); - }); - - it('should not fail if destroying a volume that does not exist', async () => { - stub1.onCall(0).resolves({}); - stub2.onCall(0).resolves({}); - stub3.onCall(0).resolves({}); - expect(volumes.get(UUID)).is.undefined; - - await volumes.destroyVolume(UUID); - - sinon.assert.notCalled(stub1); - sinon.assert.notCalled(stub2); - sinon.assert.notCalled(stub3); - expect(volEvents).to.have.lengthOf(0); - }); - }); -}; diff --git a/csi/moac/test/watcher_stub.js b/csi/moac/test/watcher_stub.js deleted file mode 100644 index 8dbbeeb49..000000000 --- a/csi/moac/test/watcher_stub.js +++ /dev/null @@ -1,49 +0,0 @@ -// Fake watcher that isolates the watcher from k8s api server using sinon stubs. - -'use strict'; - -const sinon = require('sinon'); - -// stubsCb callback can override default return values of k8s api calls -function mockCache (cache, stubsCb) { - // do not wait for confirming events from k8s - cache.eventTimeout = 0; - - // mock k8s api calls - cache.createStub = sinon.stub(cache.k8sApi, 'createNamespacedCustomObject'); - cache.updateStub = sinon.stub(cache.k8sApi, 'replaceNamespacedCustomObject'); - cache.updateStatusStub = sinon.stub(cache.k8sApi, 'replaceNamespacedCustomObjectStatus'); - cache.deleteStub = sinon.stub(cache.k8sApi, 'deleteNamespacedCustomObject'); - cache.getStub = sinon.stub(cache.listWatch, 'get'); - cache.listStub = sinon.stub(cache.listWatch, 'list'); - const stubs = { - create: cache.createStub, - update: cache.updateStub, - updateStatus: cache.updateStatusStub, - delete: cache.deleteStub, - get: cache.getStub, - list: cache.listStub - }; - stubs.create.resolves(); - stubs.update.resolves(); - stubs.updateStatus.resolves(); - stubs.delete.resolves(); - stubs.get.returns(); - stubs.list.returns([]); - if (stubsCb) stubsCb(stubs); - - // convenience function for emitting watcher events - stubs.emitKubeEvent = (ev, data) => { - cache.listWatch.callbackCache[ev].forEach((cb) => cb(data)); - }; - - // mock the watcher to start even without k8s - const startStub = sinon.stub(cache.listWatch, 'start'); - startStub.callsFake(async () => { - stubs.list().forEach((ent) => { - stubs.emitKubeEvent('add', ent); - }); - }); -} - -module.exports = { mockCache }; diff --git a/csi/moac/test/watcher_test.js b/csi/moac/test/watcher_test.js deleted file mode 100644 index 803bdc2e9..000000000 --- a/csi/moac/test/watcher_test.js +++ /dev/null @@ -1,525 +0,0 @@ -// Tests for the object cache (watcher). - -'use strict'; - -/* eslint-disable no-unused-expressions */ - -const _ = require('lodash'); -const expect = require('chai').expect; -const sinon = require('sinon'); -const sleep = require('sleep-promise'); -const { KubeConfig } = require('@kubernetes/client-node'); -const { CustomResourceCache } = require('../dist/watcher'); - -// slightly modified cache tunings not to wait too long when testing things -const IDLE_TIMEOUT_MS = 500; -const RESTART_DELAY_MS = 300; -const EVENT_TIMEOUT_MS = 200; -const EVENT_DELAY_MS = 100; -const EYE_BLINK_MS = 30; -// Believe it or not but it is possible that timeout callback triggers a bit -// earlier than it should (although that nodejs documentation says that it is -// not possible). Accomodate this weird behaviour. -const TOLERATE_MS = 2; - -const fakeConfig = { - clusters: [ - { - name: 'cluster', - server: 'foo.company.com' - } - ], - contexts: [ - { - cluster: 'cluster', - user: 'user' - } - ], - users: [{ name: 'user' }] -}; - -// Create fake k8s object. Example of true k8s object follows: -// -// "object": { -// "apiVersion": "csi.storage.k8s.io/v1alpha1", -// "kind": "CSINodeInfo", -// "metadata": { -// "creationTimestamp": "2019-02-15T18:23:53Z", -// "generation": 13, -// "name": "node1", -// "ownerReferences": [ -// { -// "apiVersion": "v1", -// "kind": "Node", -// "name": "node1", -// "uid": "c696b8e5-fd8c-11e8-a41c-589cfc0d76a7" -// } -// ], -// "resourceVersion": "627981", -// "selfLink": "/apis/csi.storage.k8s.io/v1alpha1/csinodeinfos/node1", -// "uid": "d99f06a9-314e-11e9-b086-589cfc0d76a7" -// }, -// "spec": { -// ... -// }, -// "status": { -// ... -// } -// } -function createApple (name, finalizers, spec) { - return { - apiVersion: 'my.group.io/v1alpha1', - kind: 'apple', - metadata: { name, finalizers }, - spec - }; -} - -// Test class -class Apple { - constructor (obj) { - this.metadata = { - name: obj.metadata.name - }; - if (obj.spec === 'invalid') { - throw new Error('Invalid object'); - } - this.spec = obj.spec; - } -} - -// Create a cache with a listWatch object with fake start method that does -// nothing instead of connecting to k8s cluster. -function createMockedCache () { - const kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - const watcher = new CustomResourceCache('namespace', 'apple', kc, Apple, { - restartDelay: RESTART_DELAY_MS, - eventTimeout: EVENT_TIMEOUT_MS, - idleTimeout: IDLE_TIMEOUT_MS - }); - // convenience function for generating k8s watcher events - watcher.emitKubeEvent = (ev, data) => { - watcher.listWatch.callbackCache[ev].forEach((cb) => cb(data)); - }; - const startStub = sinon.stub(watcher.listWatch, 'start'); - startStub.onCall(0).resolves(); - return [watcher, startStub]; -} - -module.exports = function () { - this.timeout(10000); - - it('should create a cache and block in start until connected', async () => { - const kc = new KubeConfig(); - Object.assign(kc, fakeConfig); - const watcher = new CustomResourceCache('namespace', 'apple', kc, Apple, { - restartDelay: RESTART_DELAY_MS, - eventTimeout: EVENT_TIMEOUT_MS - }); - const startStub = sinon.stub(watcher.listWatch, 'start'); - startStub.onCall(0).rejects(); - startStub.onCall(1).rejects(); - startStub.onCall(2).resolves(); - const startTime = new Date(); - await watcher.start(); - const delta = new Date() - startTime; - sinon.assert.calledThrice(startStub); - expect(watcher.isConnected()).to.be.true; - expect(delta).to.be.within(2 * RESTART_DELAY_MS, 3 * RESTART_DELAY_MS); - watcher.stop(); - }); - - it('should reconnect watcher if it gets disconnected', async () => { - const [watcher, startStub] = createMockedCache(); - await watcher.start(); - sinon.assert.calledOnce(startStub); - expect(watcher.isConnected()).to.be.true; - startStub.onCall(1).rejects(new Error('start failed')); - startStub.onCall(2).resolves(); - watcher.emitKubeEvent('error', new Error('got disconnected')); - await sleep(RESTART_DELAY_MS * 1.5); - sinon.assert.calledTwice(startStub); - expect(watcher.isConnected()).to.be.false; - await sleep(RESTART_DELAY_MS); - sinon.assert.calledThrice(startStub); - expect(watcher.isConnected()).to.be.true; - watcher.stop(); - }); - - it('should reset watcher if idle for too long', async () => { - const [watcher, startStub] = createMockedCache(); - await watcher.start(); - sinon.assert.calledOnce(startStub); - expect(watcher.isConnected()).to.be.true; - startStub.onCall(1).resolves(); - await sleep(IDLE_TIMEOUT_MS * 1.5); - sinon.assert.calledTwice(startStub); - expect(watcher.isConnected()).to.be.true; - watcher.stop(); - }); - - describe('methods', function () { - let watcher; - let timeout; - - beforeEach(async () => { - let startStub; - timeout = undefined; - [watcher, startStub] = createMockedCache(); - startStub.resolves(); - await watcher.start(); - }); - - afterEach(() => { - if (watcher) { - watcher.stop(); - watcher = undefined; - } - if (timeout) { - clearTimeout(timeout); - } - }); - - function assertReplaceCalledWith (stub, name, obj, attrs) { - const newObj = _.cloneDeep(obj); - _.merge(newObj, attrs); - sinon.assert.calledOnce(stub); - sinon.assert.calledWith(stub, 'openebs.io', 'v1alpha1', 'namespace', - 'apples', name, newObj); - } - - it('should list all objects', () => { - const listStub = sinon.stub(watcher.listWatch, 'list'); - listStub.returns([ - createApple('name1', [], 'valid'), - createApple('name2', [], 'invalid'), - createApple('name3', [], 'valid') - ]); - const objs = watcher.list(); - expect(objs).to.have.length(2); - expect(objs[0].metadata.name).to.equal('name1'); - expect(objs[1].metadata.name).to.equal('name3'); - }); - - it('should get object by name', () => { - const getStub = sinon.stub(watcher.listWatch, 'get'); - getStub.returns(createApple('name1', [], 'valid')); - const obj = watcher.get('name1'); - expect(obj).to.be.an.instanceof(Apple); - expect(obj.metadata.name).to.equal('name1'); - sinon.assert.calledWith(getStub, 'name1'); - }); - - it('should get undefined if object does not exist', () => { - const getStub = sinon.stub(watcher.listWatch, 'get'); - getStub.returns(undefined); - const obj = watcher.get('name1'); - expect(obj).to.be.undefined; - sinon.assert.calledWith(getStub, 'name1'); - }); - - it('should create an object and wait for new event', async () => { - const createStub = sinon.stub(watcher.k8sApi, 'createNamespacedCustomObject'); - createStub.resolves(); - const apple = createApple('name1', [], 'valid'); - const startTime = new Date(); - timeout = setTimeout(() => watcher.emitKubeEvent('add', apple), EVENT_DELAY_MS); - await watcher.create(apple); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS - TOLERATE_MS, EVENT_DELAY_MS + EYE_BLINK_MS); - sinon.assert.calledOnce(createStub); - }); - - it('should timeout when "add" event does not come after a create', async () => { - const createStub = sinon.stub(watcher.k8sApi, 'createNamespacedCustomObject'); - createStub.resolves(); - const apple = createApple('name1', [], 'valid'); - const startTime = new Date(); - await watcher.create(apple); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_TIMEOUT_MS - TOLERATE_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); - sinon.assert.calledOnce(createStub); - }); - - it('should update object and wait for mod event', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - const newApple = createApple('name1', [], 'also valid'); - getStub.returns(apple); - const startTime = new Date(); - timeout = setTimeout(() => watcher.emitKubeEvent('update', newApple), EVENT_DELAY_MS); - await watcher.update('name1', (orig) => { - return createApple(orig.metadata.name, [], 'also valid'); - }); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS - TOLERATE_MS, EVENT_DELAY_MS + EYE_BLINK_MS); - assertReplaceCalledWith(replaceStub, 'name1', apple, { - spec: 'also valid' - }); - }); - - it('should not try to update object if it does not exist', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - getStub.returns(); - await watcher.update('name1', (orig) => { - return createApple(orig.metadata.name, [], 'also valid'); - }); - sinon.assert.notCalled(replaceStub); - }); - - it('should timeout when "update" event does not come after an update', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - await watcher.update('name1', (orig) => { - return createApple(orig.metadata.name, [], 'also valid'); - }); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_TIMEOUT_MS - TOLERATE_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); - sinon.assert.calledOnce(replaceStub); - }); - - it('should retry update of an object if it fails', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.onCall(0).rejects(new Error('update failed')); - replaceStub.onCall(1).resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - await watcher.update('name1', (orig) => { - return createApple(orig.metadata.name, [], 'also valid'); - }); - sinon.assert.calledTwice(replaceStub); - }); - - it('should update status of object', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - await watcher.updateStatus('name1', (orig) => { - return _.assign({}, apple, { - status: 'some-state' - }); - }); - assertReplaceCalledWith(replaceStub, 'name1', apple, { - status: 'some-state' - }); - }); - - it('should not try to update status of object if it does not exist', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(); - await watcher.updateStatus('name1', (orig) => { - return _.assign({}, apple, { - status: 'some-state' - }); - }); - sinon.assert.notCalled(replaceStub); - }); - - it('should timeout when "update" event does not come after status update', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - await watcher.updateStatus('name1', (orig) => { - return _.assign({}, apple, { - status: 'some-state' - }); - }); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_TIMEOUT_MS - TOLERATE_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); - sinon.assert.calledOnce(replaceStub); - }); - - it('should retry status update of an object if it fails', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); - replaceStub.onCall(0).rejects(new Error('update failed')); - replaceStub.onCall(1).resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - await watcher.updateStatus('name1', (orig) => { - return _.assign({}, apple, { - status: 'some-state' - }); - }); - sinon.assert.calledTwice(replaceStub); - }); - - it('should fail if status update fails twice', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); - replaceStub.onCall(0).rejects(new Error('update failed first time')); - replaceStub.onCall(1).rejects(new Error('update failed second time')); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - let error; - try { - await watcher.updateStatus('name1', (orig) => { - return _.assign({}, apple, { - status: 'some-state' - }); - }); - } catch (err) { - error = err; - } - expect(error.message).to.equal('Status update of apple "name1" failed: update failed second time'); - sinon.assert.calledTwice(replaceStub); - }); - - it('should delete the object and wait for "delete" event', async () => { - const deleteStub = sinon.stub(watcher.k8sApi, 'deleteNamespacedCustomObject'); - deleteStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - timeout = setTimeout(() => watcher.emitKubeEvent('delete', apple), EVENT_DELAY_MS); - await watcher.delete('name1'); - const delta = new Date() - startTime; - sinon.assert.calledOnce(deleteStub); - sinon.assert.calledWith(deleteStub, 'openebs.io', 'v1alpha1', 'namespace', - 'apples', 'name1'); - expect(delta).to.be.within(EVENT_DELAY_MS - TOLERATE_MS, EVENT_DELAY_MS + EYE_BLINK_MS); - }); - - it('should timeout when "delete" event does not come after a delete', async () => { - const deleteStub = sinon.stub(watcher.k8sApi, 'deleteNamespacedCustomObject'); - deleteStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - await watcher.delete('name1'); - const delta = new Date() - startTime; - sinon.assert.calledOnce(deleteStub); - expect(delta).to.be.within(EVENT_TIMEOUT_MS - TOLERATE_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); - }); - - it('should not try to delete object that does not exist', async () => { - const deleteStub = sinon.stub(watcher.k8sApi, 'deleteNamespacedCustomObject'); - deleteStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(); - timeout = setTimeout(() => watcher.emitKubeEvent('delete', apple), EVENT_DELAY_MS); - await watcher.delete('name1'); - sinon.assert.notCalled(deleteStub); - }); - - it('should add finalizer to object without any', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.addFinalizer('name1', 'test.finalizer.com'); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS - TOLERATE_MS, EVENT_DELAY_MS + EYE_BLINK_MS); - assertReplaceCalledWith(replaceStub, 'name1', apple, { - metadata: { - finalizers: ['test.finalizer.com'] - } - }); - }); - - it('should add another finalizer to object', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', ['test.finalizer.com', 'test2.finalizer.com'], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.addFinalizer('name1', 'new.finalizer.com'); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS - TOLERATE_MS, EVENT_DELAY_MS + EYE_BLINK_MS); - assertReplaceCalledWith(replaceStub, 'name1', apple, { - metadata: { - finalizers: ['new.finalizer.com', 'test.finalizer.com', 'test2.finalizer.com'] - } - }); - }); - - it('should not add twice the same finalizer', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', ['test.finalizer.com', 'test2.finalizer.com'], 'valid'); - getStub.returns(apple); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.addFinalizer('name1', 'test.finalizer.com'); - sinon.assert.notCalled(replaceStub); - }); - - it('should not add the finalizer if object does not exist', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', [], 'valid'); - getStub.returns(); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.addFinalizer('name1', 'test.finalizer.com'); - sinon.assert.notCalled(replaceStub); - }); - - it('should remove finalizer from object', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', ['test.finalizer.com', 'test2.finalizer.com'], 'valid'); - getStub.returns(apple); - const startTime = new Date(); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.removeFinalizer('name1', 'test.finalizer.com'); - const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS - TOLERATE_MS, EVENT_DELAY_MS + EYE_BLINK_MS); - sinon.assert.calledOnce(replaceStub); - assertReplaceCalledWith(replaceStub, 'name1', apple, { - metadata: { - finalizers: ['test2.finalizer.com'] - } - }); - }); - - it('should not try to remove finalizer that does not exist', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', ['test2.finalizer.com'], 'valid'); - getStub.returns(apple); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.removeFinalizer('name1', 'test.finalizer.com'); - sinon.assert.notCalled(replaceStub); - }); - - it('should not try to remove finalizer if object does not exist', async () => { - const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); - replaceStub.resolves(); - const getStub = sinon.stub(watcher.listWatch, 'get'); - const apple = createApple('name1', ['test.finalizer.com'], 'valid'); - getStub.returns(); - timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); - await watcher.removeFinalizer('name1', 'test.finalizer.com'); - sinon.assert.notCalled(replaceStub); - }); - }); -}; diff --git a/csi/moac/test/workq_test.js b/csi/moac/test/workq_test.js deleted file mode 100644 index ef5696492..000000000 --- a/csi/moac/test/workq_test.js +++ /dev/null @@ -1,152 +0,0 @@ -// Unit tests for the work queue class - -'use strict'; - -const expect = require('chai').expect; -const sinon = require('sinon'); -const sleep = require('sleep-promise'); -const { Workq } = require('../dist/workq'); - -class Task { - constructor (id, delay) { - this.id = id; - this.delay = delay || 1; - } - - async doIt (arg) { - if (arg === 'throw here') { - throw new Error('Testing exception in sync context'); - } - await sleep(this.delay); - if (arg === 'throw there') { - throw new Error('Testing exception in async context'); - } - return { - id: this.id, - arg: arg, - timestamp: Date.now() - }; - } -} - -module.exports = function () { - let clock; - - beforeEach(() => { - clock = sinon.useFakeTimers(); - }); - - afterEach(() => { - clock.restore(); - }); - - it('should execute a task that is a closure', async () => { - const wq = new Workq(); - const result = await wq.push(100, async (arg) => { - expect(arg).to.equal(100); - return arg; - }); - expect(result).to.equal(100); - }); - - it('should execute a task that is a bound method', (done) => { - const task = new Task(0); - const wq = new Workq(); - - wq.push(100, task.doIt.bind(task)).then((result) => { - expect(result.id).to.equal(0); - expect(result.arg).to.equal(100); - done(); - }); - clock.tick(1); - }); - - it('should propagate an exception from sync context', (done) => { - const task = new Task(0); - const wq = new Workq(); - - wq.push('throw here', task.doIt.bind(task)) - .then((res) => done(new Error('it should have thrown the exception'))) - .catch(() => done()); - clock.tick(1); - }); - - it('should propagate an exception from async context', (done) => { - const task = new Task(0); - const wq = new Workq(); - - wq.push('throw there', task.doIt.bind(task)) - .then((res) => done(new Error('it should have thrown the exception'))) - .catch(() => done()); - clock.tick(1); - }); - - it('should finish tasks in the same order they were pushed', async () => { - const task1 = new Task(1, 10); - const task2 = new Task(2, 10); - const task3 = new Task(3, 10); - const wq = new Workq(); - - const promise1 = wq.push(100, task1.doIt.bind(task1)); - const promise2 = wq.push(100, task2.doIt.bind(task2)); - const promise3 = wq.push(100, task3.doIt.bind(task3)); - - clock.tick(10); - let res = await promise1; - expect(res.id).to.equal(1); - // we must restore the clock here because the next item in workq hasn't been - // dispatched yet so moving the clock head now would not help. It wasn't the - // case with nodejs v10 when try-catch-finally was done differently. - clock.restore(); - res = await promise2; - expect(res.id).to.equal(2); - res = await promise3; - expect(res.id).to.equal(3); - }); - - it('should put a new task on hold if a previous task is in progress', async () => { - const task1 = new Task(1, 100); - const task2 = new Task(2); - const wq = new Workq(); - - const promise1 = wq.push(100, task1.doIt.bind(task1)); - clock.tick(50); - const promise2 = wq.push(100, task2.doIt.bind(task2)); - clock.tick(50); - const res1 = await promise1; - expect(res1.id).to.equal(1); - clock.restore(); - const res2 = await promise2; - expect(res2.id).to.equal(2); - expect(res1.timestamp).to.be.below(res2.timestamp); - }); - - it('should continue with the next task even if previous one failed', (done) => { - const task1 = new Task(1); - const task2 = new Task(2); - const task3 = new Task(3); - const wq = new Workq(); - - clock.restore(); - - const promise1 = wq.push('throw here', task1.doIt.bind(task1)); - const promise2 = wq.push('throw there', task2.doIt.bind(task2)); - const promise3 = wq.push(100, task3.doIt.bind(task3)); - - promise1 - .then((res) => done(new Error('it should have thrown the exception'))) - .catch((e) => { - promise2 - .then((res) => done(new Error('it should have thrown the exception'))) - .catch((e) => { - promise3 - .then((res) => { - expect(res.id).to.equal(3); - expect(res.arg).to.equal(100); - done(); - }) - .catch((e) => done(e)); - }); - }); - }); -}; diff --git a/csi/moac/tsconfig.json b/csi/moac/tsconfig.json deleted file mode 100644 index 286c7baa0..000000000 --- a/csi/moac/tsconfig.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "compilerOptions": { - /* Visit https://aka.ms/tsconfig.json to read more about this file */ - /* Basic Options */ - "incremental": true, /* Enable incremental compilation */ - "target": "ES2019", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */ - "module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */ - // "lib": [], /* Specify library files to be included in the compilation. */ - // "allowJs": true, /* Allow javascript files to be compiled. */ - // "checkJs": true, /* Report errors in .js files. */ - // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */ - // "declaration": true, /* Generates corresponding '.d.ts' file. */ - // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */ - "sourceMap": true, /* Generates corresponding '.map' file. */ - // "outFile": "./", /* Concatenate and emit output to single file. */ - "outDir": "./dist", /* Redirect output structure to the directory. */ - // "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */ - // "composite": true, /* Enable project compilation */ - // "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */ - // "removeComments": true, /* Do not emit comments to output. */ - // "noEmit": true, /* Do not emit outputs. */ - // "importHelpers": true, /* Import emit helpers from 'tslib'. */ - // "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */ - // "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */ - /* Strict Type-Checking Options */ - "strict": true, /* Enable all strict type-checking options. */ - // "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */ - // "strictNullChecks": true, /* Enable strict null checks. */ - // "strictFunctionTypes": true, /* Enable strict checking of function types. */ - // "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */ - // "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */ - // "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */ - // "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */ - /* Additional Checks */ - // "noUnusedLocals": true, /* Report errors on unused locals. */ - // "noUnusedParameters": true, /* Report errors on unused parameters. */ - // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ - // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ - /* Module Resolution Options */ - // "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */ - // "baseUrl": "./", /* Base directory to resolve non-absolute module names. */ - // "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */ - // "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */ - // "typeRoots": [], /* List of folders to include type definitions from. */ - // "types": [], /* Type declaration files to be included in compilation. */ - // "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */ - "esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */ - // "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */ - // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ - /* Source Map Options */ - // "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */ - // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ - // "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */ - // "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */ - /* Experimental Options */ - // "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */ - // "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */ - /* Advanced Options */ - "skipLibCheck": true, /* Skip type checking of declaration files. */ - "forceConsistentCasingInFileNames": true, /* Disallow inconsistently-cased references to the same file. */ - "resolveJsonModule": true /* allows for importing, extracting types from and generating .json files */ - }, - "include": ["src/**/*"], - "exclude":["node_modules"] -} diff --git a/csi/src/dev.rs b/csi/src/dev.rs index 4ff387d42..ca90771a9 100644 --- a/csi/src/dev.rs +++ b/csi/src/dev.rs @@ -112,9 +112,14 @@ impl Device { if let Some(devname) = match_dev::match_nvmf_device(&device, &nvmf_key) { + let nqn = if std::env::var("MOAC").is_ok() { + format!("{}:nexus-{}", NVME_NQN_PREFIX, uuid.to_string()) + } else { + format!("{}:{}", NVME_NQN_PREFIX, uuid.to_string()) + }; return Ok(Some(Box::new(nvmf::NvmfDetach::new( devname.to_string(), - format!("{}:nexus-{}", NVME_NQN_PREFIX, uuid.to_string()), + nqn, )))); } } diff --git a/csi/src/filesystem_vol.rs b/csi/src/filesystem_vol.rs index e47bef6f8..b88437cd0 100644 --- a/csi/src/filesystem_vol.rs +++ b/csi/src/filesystem_vol.rs @@ -129,13 +129,13 @@ pub async fn unstage_fs_volume( if let Some(mount) = mount::find_mount(None, Some(fs_staging_path)) { debug!( - "Unstaging filesystem volume {}, unmounting device {} from {}", + "Unstaging filesystem volume {}, unmounting device {:?} from {}", volume_id, mount.source, fs_staging_path ); if let Err(error) = mount::filesystem_unmount(fs_staging_path) { return Err(failure!( Code::Internal, - "Failed to unstage volume {}: failed to unmount device {} from {}: {}", + "Failed to unstage volume {}: failed to unmount device {:?} from {}: {}", volume_id, mount.source, fs_staging_path, diff --git a/csi/src/format.rs b/csi/src/format.rs index aa1b7a8ac..df0e6007d 100644 --- a/csi/src/format.rs +++ b/csi/src/format.rs @@ -2,7 +2,7 @@ use std::process::Command; -use blkid::probe::Probe; +use devinfo::blkid::probe::Probe; pub(crate) async fn prepare_device( device: &str, diff --git a/csi/src/mount.rs b/csi/src/mount.rs index 627e52bb7..adc28535a 100644 --- a/csi/src/mount.rs +++ b/csi/src/mount.rs @@ -2,7 +2,7 @@ use std::{collections::HashSet, io::Error}; -use proc_mounts::MountIter; +use devinfo::mountinfo::{MountInfo, MountIter}; use sys_mount::{unmount, FilesystemType, Mount, MountFlags, UnmountFlags}; // Simple trait for checking if the readonly (ro) option @@ -24,32 +24,12 @@ impl ReadOnly for &str { } } -// Information about a mounted filesystem. -#[derive(Debug)] -pub struct MountInfo { - pub source: String, - pub dest: String, - pub fstype: String, - pub options: Vec, -} - -impl From for MountInfo { - fn from(mount: proc_mounts::MountInfo) -> MountInfo { - MountInfo { - source: mount.source.to_string_lossy().to_string(), - dest: mount.dest.to_string_lossy().to_string(), - fstype: mount.fstype, - options: mount.options, - } - } -} - /// Return mountinfo matching source and/or destination. pub fn find_mount( source: Option<&str>, target: Option<&str>, ) -> Option { - let mut found: Option = None; + let mut found: Option = None; for mount in MountIter::new().unwrap().flatten() { if let Some(value) = source { diff --git a/csi/src/nodeplugin_svc.rs b/csi/src/nodeplugin_svc.rs index 008f1fd40..e17c8781a 100644 --- a/csi/src/nodeplugin_svc.rs +++ b/csi/src/nodeplugin_svc.rs @@ -54,7 +54,8 @@ async fn fsfreeze( { let device_path = device.devname(); if let Some(mnt) = mount::find_mount(Some(&device_path), None) { - let args = [freeze_op, &mnt.dest]; + let dest = mnt.dest.display().to_string(); + let args = [freeze_op, &dest]; let output = Command::new(FSFREEZE).args(&args).output().await.context( IoError { diff --git a/csi/src/server.rs b/csi/src/server.rs index b95c6c447..94764114b 100644 --- a/csi/src/server.rs +++ b/csi/src/server.rs @@ -12,6 +12,7 @@ extern crate tracing; use std::{ fs, io::{ErrorKind, Write}, + sync::Arc, }; use crate::{identity::Identity, mount::probe_filesystems, node::Node}; @@ -55,10 +56,25 @@ mod node; mod nodeplugin_grpc; mod nodeplugin_svc; +#[derive(Clone, Debug)] +pub struct UdsConnectInfo { + pub peer_addr: Option>, + pub peer_cred: Option, +} + #[derive(Debug)] struct UnixStream(tokio::net::UnixStream); -impl Connected for UnixStream {} +impl Connected for UnixStream { + type ConnectInfo = UdsConnectInfo; + + fn connect_info(&self) -> Self::ConnectInfo { + UdsConnectInfo { + peer_addr: self.0.peer_addr().ok().map(Arc::new), + peer_cred: self.0.peer_cred().ok(), + } + } +} impl AsyncRead for UnixStream { fn poll_read( @@ -96,6 +112,14 @@ impl AsyncWrite for UnixStream { const GRPC_PORT: u16 = 10199; +// Returns only base hostname, stripping all (sub)domain parts. +fn normalize_hostname(hostname: &str) -> &str { + match hostname.find('.') { + Some(idx) => &hostname[0 .. idx], + None => hostname, + } +} + #[tokio::main] async fn main() -> Result<(), String> { let matches = App::new("Mayastor CSI plugin") @@ -147,7 +171,7 @@ async fn main() -> Result<(), String> { ) .get_matches(); - let node_name = matches.value_of("node-name").unwrap(); + let node_name = normalize_hostname(matches.value_of("node-name").unwrap()); let endpoint = matches.value_of("grpc-endpoint").unwrap(); let csi_socket = matches .value_of("csi-socket") @@ -232,7 +256,7 @@ impl CsiServer { info!("CSI plugin bound to {}", csi_socket); async_stream::stream! { - while let item = uds.accept().map_ok(|(st, _)| wrapped_stream::UnixStream(st)).await { + while let item = uds.accept().map_ok(|(st, _)| UnixStream(st)).await { yield item; } } @@ -254,54 +278,3 @@ impl CsiServer { Ok(()) } } - -// Contained in https://github.com/hyperium/tonic/blob/61555ff2b5b76e4e3172717354aed1e6f31d6611/examples/src/uds/server.rs#L45-L108 -#[cfg(unix)] -mod wrapped_stream { - use std::{ - pin::Pin, - task::{Context, Poll}, - }; - - use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; - use tonic::transport::server::Connected; - - #[derive(Debug)] - pub struct UnixStream(pub tokio::net::UnixStream); - - impl Connected for UnixStream {} - - impl AsyncRead for UnixStream { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.0).poll_read(cx, buf) - } - } - - impl AsyncWrite for UnixStream { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.0).poll_write(cx, buf) - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.0).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.0).poll_shutdown(cx) - } - } -} diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index 887544616..6a52fe3a9 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -30,7 +30,7 @@ spec: # the same. containers: - name: mayastor-csi - image: mayadata/mayastor-csi:v0.8.1 + image: mayadata/mayastor:v1.0.0 imagePullPolicy: IfNotPresent # we need privileged because we mount filesystems and use mknod securityContext: @@ -52,6 +52,8 @@ spec: - "--grpc-endpoint=$(MY_POD_IP):10199" - "--nvme-core-io-timeout=30" - "-v" + command: + - mayastor-csi volumeMounts: - name: device mountPath: /dev diff --git a/deploy/etcd/statefulset.yaml b/deploy/etcd/statefulset.yaml index e4094f625..ede6db9a6 100644 --- a/deploy/etcd/statefulset.yaml +++ b/deploy/etcd/statefulset.yaml @@ -11,7 +11,7 @@ metadata: app.kubernetes.io/instance: mayastor app.kubernetes.io/managed-by: Helm spec: - replicas: 1 + replicas: 3 selector: matchLabels: app.kubernetes.io/name: etcd @@ -47,10 +47,32 @@ spec: securityContext: fsGroup: 1001 serviceAccountName: "default" + initContainers: + - name: volume-permissions + image: docker.io/bitnami/bitnami-shell:10 + imagePullPolicy: "Always" + command: + - /bin/bash + - -ec + - | + chown -R 1001:1001 /bitnami/etcd + securityContext: + runAsUser: 0 + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/etcd containers: - name: etcd image: docker.io/bitnami/etcd:3.4.15-debian-10-r43 imagePullPolicy: "IfNotPresent" + lifecycle: + preStop: + exec: + command: + - /opt/bitnami/scripts/etcd/prestop.sh securityContext: runAsNonRoot: true runAsUser: 1001 @@ -89,6 +111,14 @@ spec: value: "http://$(MY_POD_NAME).mayastor-etcd-headless.mayastor.svc.cluster.local:2380" - name: ETCD_LISTEN_PEER_URLS value: "http://0.0.0.0:2380" + - name: ETCD_INITIAL_CLUSTER_TOKEN + value: "etcd-cluster-k8s" + - name: ETCD_INITIAL_CLUSTER_STATE + value: "new" + - name: ETCD_INITIAL_CLUSTER + value: "mayastor-etcd-0=http://mayastor-etcd-0.mayastor-etcd-headless.mayastor.svc.cluster.local:2380,mayastor-etcd-1=http://mayastor-etcd-1.mayastor-etcd-headless.mayastor.svc.cluster.local:2380,mayastor-etcd-2=http://mayastor-etcd-2.mayastor-etcd-headless.mayastor.svc.cluster.local:2380" + - name: ETCD_CLUSTER_DOMAIN + value: "mayastor-etcd-headless.mayastor.svc.cluster.local" envFrom: ports: - name: client @@ -122,5 +152,13 @@ spec: - name: data mountPath: /bitnami/etcd volumes: - - name: data - emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "2Gi" + storageClassName: manual diff --git a/deploy/etcd/storage/localpv.yaml b/deploy/etcd/storage/localpv.yaml new file mode 100644 index 000000000..2e5a6ee0e --- /dev/null +++ b/deploy/etcd/storage/localpv.yaml @@ -0,0 +1,57 @@ +--- +# Source: mayastor/templates/etcd/storage/localpv.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: mayastor + name: etcd-volume-0 + labels: + statefulset.kubernetes.io/pod-name: mayastor-etcd-0 +spec: + storageClassName: manual + # You must also delete the hostpath on the node + persistentVolumeReclaimPolicy: Retain + capacity: + storage: "2Gi" + accessModes: + - ReadWriteOnce + hostPath: + path: "/var/local/mayastor/etcd/pod-0" +--- +# Source: mayastor/templates/etcd/storage/localpv.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: mayastor + name: etcd-volume-1 + labels: + statefulset.kubernetes.io/pod-name: mayastor-etcd-1 +spec: + storageClassName: manual + # You must also delete the hostpath on the node + persistentVolumeReclaimPolicy: Retain + capacity: + storage: "2Gi" + accessModes: + - ReadWriteOnce + hostPath: + path: "/var/local/mayastor/etcd/pod-1" +--- +# Source: mayastor/templates/etcd/storage/localpv.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: mayastor + name: etcd-volume-2 + labels: + statefulset.kubernetes.io/pod-name: mayastor-etcd-2 +spec: + storageClassName: manual + # You must also delete the hostpath on the node + persistentVolumeReclaimPolicy: Retain + capacity: + storage: "2Gi" + accessModes: + - ReadWriteOnce + hostPath: + path: "/var/local/mayastor/etcd/pod-2" diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 1c2d78e04..660432ea2 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -33,9 +33,11 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:v0.8.1 - imagePullPolicy: Always + image: mayadata/mayastor:v1.0.0 + imagePullPolicy: IfNotPresent env: + - name: RUST_LOG + value: info,mayastor=info - name: MY_NODE_NAME valueFrom: fieldRef: @@ -44,8 +46,6 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - - name: IMPORT_NEXUSES - value: "false" args: # The -l argument accepts cpu-list. Indexing starts at zero. # For example -l 1,2,10-20 means use core 1, 2, 10 to 20. @@ -57,6 +57,8 @@ spec: - "-P/var/local/mayastor/pools.yaml" - "-l1" - "-pmayastor-etcd" + command: + - mayastor securityContext: privileged: true volumeMounts: diff --git a/deploy/mayastorpoolcrd.yaml b/deploy/mayastorpoolcrd.yaml deleted file mode 120000 index 41db9f018..000000000 --- a/deploy/mayastorpoolcrd.yaml +++ /dev/null @@ -1 +0,0 @@ -../csi/moac/crds/mayastorpool.yaml \ No newline at end of file diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml deleted file mode 100644 index f93d7d983..000000000 --- a/deploy/moac-deployment.yaml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Source: mayastor/templates/moac-deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: moac - namespace: mayastor -spec: - replicas: 1 - selector: - matchLabels: - app: moac - template: - metadata: - labels: - app: moac - spec: - serviceAccount: moac - containers: - - name: csi-provisioner - image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.1 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology=false" - - "--default-fstype=ext4" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: csi-attacher - image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: moac - image: mayadata/moac:v0.8.1 - imagePullPolicy: Always - args: - - "--csi-address=$(CSI_ENDPOINT)" - - "--port=3000" - - "--watcher-idle-timeout=600000" - - "--etcd-endpoint=mayastor-etcd" - - "--message-bus=nats" - - "-v" - env: - - name: CSI_ENDPOINT - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - livenessProbe: - httpGet: - path: / - port: 3000 - initialDelaySeconds: 20 - timeoutSeconds: 10 - readinessProbe: - httpGet: - path: / - port: 3000 - initialDelaySeconds: 20 - timeoutSeconds: 10 - volumes: - - name: socket-dir - emptyDir: diff --git a/deploy/moac-rbac.yaml b/deploy/moac-rbac.yaml deleted file mode 100644 index 89211347e..000000000 --- a/deploy/moac-rbac.yaml +++ /dev/null @@ -1,96 +0,0 @@ ---- -# Source: mayastor/templates/moac-rbac.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: moac - namespace: mayastor ---- -# Source: mayastor/templates/moac-rbac.yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: moac -rules: - # must create mayastor crd if it doesn't exist -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create"] - # must read csi plugin info -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - # must read/write mayastor node resources -- apiGroups: ["openebs.io"] - resources: ["mayastornodes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - # must update mayastor node status -- apiGroups: ["openebs.io"] - resources: ["mayastornodes/status"] - verbs: ["update"] - # must read mayastor pools info -- apiGroups: ["openebs.io"] - resources: ["mayastorpools"] - verbs: ["get", "list", "watch", "update", "replace"] - # must update mayastor pools status -- apiGroups: ["openebs.io"] - resources: ["mayastorpools/status"] - verbs: ["update"] - # must read/write mayastor volume resources -- apiGroups: ["openebs.io"] - resources: ["mayastorvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - # must update mayastor volumes status -- apiGroups: ["openebs.io"] - resources: ["mayastorvolumes/status"] - verbs: ["update"] - - # external provisioner & attacher -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - # external provisioner -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list"] -- apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["get", "list"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - # external attacher -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments/status"] - verbs: ["patch"] ---- -# Source: mayastor/templates/moac-rbac.yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: moac -subjects: -- kind: ServiceAccount - name: moac - namespace: mayastor -roleRef: - kind: ClusterRole - name: moac - apiGroup: rbac.authorization.k8s.io diff --git a/deploy/nats-deployment.yaml b/deploy/nats-deployment.yaml index c1dcaeb55..84d55ade6 100644 --- a/deploy/nats-deployment.yaml +++ b/deploy/nats-deployment.yaml @@ -10,6 +10,18 @@ data: pid_file: "/var/run/nats/nats.pid" http: 8222 + cluster { + port: 6222 + + routes [ + nats://nats-0.nats.mayastor.svc:6222 + nats://nats-1.nats.mayastor.svc:6222 + nats://nats-2.nats.mayastor.svc:6222 + ] + + cluster_advertise: $CLUSTER_ADVERTISE + connect_retries: 30 + } --- # Source: mayastor/templates/nats-deployment.yaml apiVersion: v1 @@ -45,7 +57,7 @@ spec: selector: matchLabels: app: nats - replicas: 1 + replicas: 3 serviceName: "nats" template: metadata: diff --git a/deploy/storage-class.yaml b/deploy/storage-class.yaml index d4c03c036..aa697bb39 100644 --- a/deploy/storage-class.yaml +++ b/deploy/storage-class.yaml @@ -1,19 +1,6 @@ --- kind: StorageClass apiVersion: storage.k8s.io/v1 -metadata: - name: mayastor-iscsi -parameters: - repl: '1' - protocol: 'iscsi' - local: 'yes' - # It is recommended to use xfs for Mayastor - # fsType: 'xfs' -provisioner: io.openebs.csi-mayastor -volumeBindingMode: WaitForFirstConsumer ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1 metadata: name: mayastor-nvmf parameters: diff --git a/devinfo/Cargo.toml b/devinfo/Cargo.toml index ec83d14db..4145f683d 100644 --- a/devinfo/Cargo.toml +++ b/devinfo/Cargo.toml @@ -4,9 +4,10 @@ version = "0.1.0" authors = ["Jeffry Molanus "] edition = "2018" - [dependencies] -snafu = "0.6" -udev = "0.6" -url = "2.1" -uuid = { version = "0.8", features = ["v4"] } +snafu = "0.6.10" +udev = "0.6.2" +url = "2.2.2" +uuid = { version = "0.8.2", features = ["v4"] } +[build-dependencies] +bindgen = "0.59.1" diff --git a/devinfo/build.rs b/devinfo/build.rs new file mode 100644 index 000000000..ab9f8f802 --- /dev/null +++ b/devinfo/build.rs @@ -0,0 +1,15 @@ +use std::{env, path::PathBuf}; +fn main() { + let bindings = bindgen::Builder::default() + .header("wrapper.h") + .allowlist_function("^blkid.*") + .generate() + .expect("Unable to generate bindings"); + + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + bindings + .write_to_file(out_path.join("libblkid.rs")) + .expect("failed to generate bindings"); + + println!("cargo:rustc-link-lib=blkid"); +} diff --git a/devinfo/src/blkid/mod.rs b/devinfo/src/blkid/mod.rs new file mode 100644 index 000000000..4a48b5703 --- /dev/null +++ b/devinfo/src/blkid/mod.rs @@ -0,0 +1,42 @@ +pub mod partition; +pub mod probe; +use crate::DevInfoError; + +include!(concat!(env!("OUT_DIR"), "/libblkid.rs")); +pub(crate) trait CResult: Copy { + fn is_error(self) -> bool; +} + +impl CResult for i32 { + fn is_error(self) -> bool { + self < 0 + } +} + +impl CResult for i64 { + fn is_error(self) -> bool { + self < 0 + } +} + +impl CResult for *const T { + fn is_error(self) -> bool { + self.is_null() + } +} + +impl CResult for *mut T { + fn is_error(self) -> bool { + self.is_null() + } +} + +pub(crate) fn to_result(result: T) -> Result { + if result.is_error() { + return Err(DevInfoError::Io { + source: std::io::Error::last_os_error(), + }); + } + + Ok(result) +} diff --git a/devinfo/src/blkid/partition.rs b/devinfo/src/blkid/partition.rs new file mode 100644 index 000000000..e891d51d5 --- /dev/null +++ b/devinfo/src/blkid/partition.rs @@ -0,0 +1,77 @@ +use crate::{ + blkid::{ + blkid_partition, + blkid_partition_get_name, + blkid_partition_get_type_string, + blkid_partition_get_uuid, + blkid_partlist, + blkid_partlist_get_partition, + blkid_partlist_get_partition_by_partno, + blkid_partlist_numof_partitions, + to_result, + }, + DevInfoError, +}; +use std::{ffi::CStr, os::raw::c_int}; +pub struct Partition(pub(crate) blkid_partition); + +impl Partition { + pub fn get_name(&self) -> Option { + let ptr = unsafe { blkid_partition_get_name(self.0) }; + if ptr.is_null() { + return None; + } + + Some(unsafe { CStr::from_ptr(ptr) }.to_string_lossy().to_string()) + } + + pub fn get_type_string(&self) -> Option { + let ptr = unsafe { blkid_partition_get_type_string(self.0) }; + if ptr.is_null() { + return None; + } + + Some(unsafe { CStr::from_ptr(ptr) }.to_string_lossy().to_string()) + } + + pub fn get_uuid(&self) -> Option { + let ptr = unsafe { blkid_partition_get_uuid(self.0) }; + if ptr.is_null() { + return None; + } + + Some(unsafe { CStr::from_ptr(ptr) }.to_string_lossy().to_string()) + } +} + +pub struct PartList(pub(crate) blkid_partlist); + +impl PartList { + pub fn get_partition(&self, partition: i32) -> Option { + if let Ok(p) = to_result(unsafe { + blkid_partlist_get_partition(self.0, partition as c_int) + }) { + return Some(Partition(p)); + } + { + None + } + } + + pub fn get_partition_by_partno(&self, partition: i32) -> Option { + if let Ok(p) = to_result(unsafe { + blkid_partlist_get_partition_by_partno(self.0, partition as c_int) + }) { + return Some(Partition(p)); + } + { + None + } + } + + pub fn numof_partitions(&self) -> Result { + unsafe { + to_result(blkid_partlist_numof_partitions(self.0)).map(|v| v as u32) + } + } +} diff --git a/devinfo/src/blkid/probe.rs b/devinfo/src/blkid/probe.rs new file mode 100644 index 000000000..0e13bc508 --- /dev/null +++ b/devinfo/src/blkid/probe.rs @@ -0,0 +1,90 @@ +use crate::blkid::{ + blkid_do_probe, + blkid_do_safeprobe, + blkid_free_probe, + blkid_new_probe, + blkid_new_probe_from_filename, + blkid_probe, + blkid_probe_has_value, + blkid_probe_lookup_value, +}; +use core::slice; +use std::{ + ffi::{CStr, CString}, + path::Path, +}; + +use crate::blkid::to_result; +pub struct Probe(blkid_probe); +use crate::DevInfoError; + +impl Probe { + pub fn new() -> Result { + unsafe { Ok(Probe(to_result(blkid_new_probe())?)) } + } + + pub fn new_from_filename>( + path: P, + ) -> Result { + let path = + CString::new(path.as_ref().as_os_str().to_string_lossy().as_ref()) + .expect("provided path contained null bytes"); + + unsafe { + Ok(Probe(to_result(blkid_new_probe_from_filename( + path.as_ptr(), + ))?)) + } + } + + pub fn do_probe(&self) -> Result { + unsafe { to_result(blkid_do_probe(self.0)).map(|v| v == 1) } + } + + pub fn do_safe_probe(&self) -> Result { + unsafe { to_result(blkid_do_safeprobe(self.0)) } + } + + pub fn has_value(self, name: &str) -> bool { + let name = CString::new(name).unwrap(); + let ret = unsafe { blkid_probe_has_value(self.0, name.as_ptr()) }; + ret == 1 + } + + /// Fetch a value by name. + pub fn lookup_value(self, name: &str) -> Result { + let name = CString::new(name).unwrap(); + let mut data_ptr = std::ptr::null(); + let mut len = 0; + unsafe { + to_result::(blkid_probe_lookup_value( + self.0, + name.as_ptr(), + &mut data_ptr, + &mut len, + ))?; + + let str = CStr::from_bytes_with_nul(slice::from_raw_parts( + data_ptr.cast(), + len as usize, + )) + .map_err(|_e| DevInfoError::InvalidStr {})? + .to_str() + .map_err(|_e| DevInfoError::InvalidStr {})? + .to_string(); + Ok(str) + } + } +} + +impl Drop for Probe { + fn drop(&mut self) { + if self.0.is_null() { + // No cleanup needed + return; + } + unsafe { + blkid_free_probe(self.0); + } + } +} diff --git a/devinfo/src/lib.rs b/devinfo/src/lib.rs index d9f9419a6..6b229c474 100644 --- a/devinfo/src/lib.rs +++ b/devinfo/src/lib.rs @@ -3,7 +3,11 @@ pub use block_device::BlkDev; mod block_device; use snafu::Snafu; +pub mod mountinfo; +pub mod partition; +#[allow(non_camel_case_types)] +pub mod blkid; #[derive(Debug, Snafu)] pub enum DevInfoError { #[snafu(display("Device {} not found", path))] @@ -16,6 +20,10 @@ pub enum DevInfoError { NotSupported { value: String }, #[snafu(display("udev internal error {}", value))] Udev { value: String }, + #[snafu(display("I/O error: {}", source))] + Io { source: std::io::Error }, + #[snafu(display("non-UTF8 string"))] + InvalidStr, } #[test] diff --git a/devinfo/src/mountinfo/mod.rs b/devinfo/src/mountinfo/mod.rs new file mode 100644 index 000000000..f9edadaca --- /dev/null +++ b/devinfo/src/mountinfo/mod.rs @@ -0,0 +1,213 @@ +use crate::partition::PartitionID; +use std::{ + ffi::OsString, + fmt::{self, Display, Formatter}, + fs::File, + io::{self, BufRead, BufReader, Error, ErrorKind}, + os::unix::prelude::OsStringExt, + path::{Path, PathBuf}, + str::FromStr, +}; +#[derive(Debug, Default, Clone, Hash, Eq, PartialEq)] +pub struct MountInfo { + pub source: PathBuf, + pub dest: PathBuf, + pub fstype: String, + pub options: Vec, +} + +impl Display for MountInfo { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!( + fmt, + "{} {} {} {}", + self.source.display(), + self.dest.display(), + self.fstype, + if self.options.is_empty() { + "defaults".into() + } else { + self.options.join(",") + }, + ) + } +} + +impl FromStr for MountInfo { + type Err = io::Error; + + fn from_str(line: &str) -> Result { + let mut parts = line.split_whitespace(); + + fn map_err(why: &'static str) -> io::Error { + Error::new(ErrorKind::InvalidData, why) + } + + let source = parts.next().ok_or_else(|| map_err("missing source"))?; + let dest = parts.next().ok_or_else(|| map_err("missing dest"))?; + let fstype = parts.next().ok_or_else(|| map_err("missing type"))?; + let options = parts.next().ok_or_else(|| map_err("missing options"))?; + + let _dump = parts.next().map_or(Ok(0), |value| { + value + .parse::() + .map_err(|_| map_err("dump value is not a number")) + })?; + + let _pass = parts.next().map_or(Ok(0), |value| { + value + .parse::() + .map_err(|_| map_err("pass value is not a number")) + })?; + + let path = Self::parse_value(source)?; + let path = path + .to_str() + .ok_or_else(|| map_err("non-utf8 paths are unsupported"))?; + + let source = if path.starts_with("/dev/disk/by-") { + Self::fetch_from_disk_by_path(path)? + } else { + PathBuf::from(path) + }; + + let path = Self::parse_value(dest)?; + let path = path + .to_str() + .ok_or_else(|| map_err("non-utf8 paths are unsupported"))?; + + let dest = PathBuf::from(path); + + Ok(MountInfo { + source, + dest, + fstype: fstype.to_owned(), + options: options.split(',').map(String::from).collect(), + }) + } +} + +impl MountInfo { + /// Attempt to parse a `/proc/mounts`-like line. + + fn fetch_from_disk_by_path(path: &str) -> io::Result { + PartitionID::from_disk_by_path(path) + .map_err(|why| { + Error::new(ErrorKind::InvalidData, format!("{}: {}", path, why)) + })? + .get_device_path() + .ok_or_else(|| { + Error::new( + ErrorKind::NotFound, + format!("device path for {} was not found", path), + ) + }) + } + + fn parse_value(value: &str) -> io::Result { + let mut ret = Vec::new(); + + let mut bytes = value.bytes(); + while let Some(b) = bytes.next() { + match b { + b'\\' => { + let mut code = 0; + for _i in 0 .. 3 { + if let Some(b) = bytes.next() { + code *= 8; + code += u32::from_str_radix( + &(b as char).to_string(), + 8, + ) + .map_err(|err| Error::new(ErrorKind::Other, err))?; + } else { + return Err(Error::new( + ErrorKind::Other, + "truncated octal code", + )); + } + } + ret.push(code as u8); + } + _ => { + ret.push(b); + } + } + } + + Ok(OsString::from_vec(ret)) + } +} + +/// Iteratively parse the `/proc/mounts` file. +pub struct MountIter { + file: R, + buffer: String, +} + +impl MountIter> { + pub fn new() -> io::Result { + Self::new_from_file("/proc/mounts") + } + + /// Read mounts from any mount-tab-like file. + pub fn new_from_file>(path: P) -> io::Result { + Ok(Self::new_from_reader(BufReader::new(File::open(path)?))) + } +} + +impl MountIter { + /// Read mounts from any in-memory buffer. + pub fn new_from_reader(readable: R) -> Self { + Self { + file: readable, + buffer: String::with_capacity(512), + } + } + + /// Iterator-based variant of `source_mounted_at`. + /// + /// Returns true if the `source` is mounted at the given `dest`. + /// + /// Due to iterative parsing of the mount file, an error may be returned. + pub fn source_mounted_at, P: AsRef>( + source: D, + path: P, + ) -> io::Result { + let source = source.as_ref(); + let path = path.as_ref(); + + let mut is_found = false; + + let mounts = MountIter::new()?; + for mount in mounts { + let mount = mount?; + if mount.source == source { + is_found = mount.dest == path; + break; + } + } + + Ok(is_found) + } +} + +impl Iterator for MountIter { + type Item = io::Result; + + fn next(&mut self) -> Option { + loop { + self.buffer.clear(); + match self.file.read_line(&mut self.buffer) { + Ok(read) if read == 0 => return None, + Ok(_) => { + let line = self.buffer.trim_start(); + if !(line.starts_with('#') || line.is_empty()) { + return Some(MountInfo::from_str(line)); + } + } + Err(why) => return Some(Err(why)), + } + } + } +} diff --git a/devinfo/src/partition/mod.rs b/devinfo/src/partition/mod.rs new file mode 100644 index 000000000..6062aa050 --- /dev/null +++ b/devinfo/src/partition/mod.rs @@ -0,0 +1,330 @@ +use self::PartitionSource::{Path as SourcePath, *}; +use std::{ + borrow::Cow, + fmt::{self, Display, Formatter}, + fs, + io::{Error, ErrorKind}, + path::{Path, PathBuf}, + str::FromStr, +}; + +/// Describes a partition identity. +/// +/// A device path may be recovered from this. +/// +/// # Notes +/// +/// This is a struct instead of an enum to make access to the `id` string +/// easier for situations where the variant does not need to be checked. +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct PartitionID { + pub variant: PartitionSource, + pub id: String, +} + +impl PartitionID { + /// Construct a new `PartitionID` as the given source. + pub fn new(variant: PartitionSource, id: String) -> Self { + Self { + variant, + id, + } + } + + /// Construct a new `PartitionID` as a `ID` source. + pub fn new_id(id: String) -> Self { + Self::new(ID, id) + } + + /// Construct a new `PartitionID` as a `Label` source. + pub fn new_label(id: String) -> Self { + Self::new(Label, id) + } + + /// Construct a new `PartitionID` as a `UUID` source. + pub fn new_uuid(id: String) -> Self { + Self::new(UUID, id) + } + + /// Construct a new `PartitionID` as a `PartLabel` source. + pub fn new_partlabel(id: String) -> Self { + Self::new(PartLabel, id) + } + + /// Construct a new `PartitionID` as a `PartUUID` source. + pub fn new_partuuid(id: String) -> Self { + Self::new(PartUUID, id) + } + + /// Construct a new `PartitionID` as a `Path` source. + pub fn new_path(id: String) -> Self { + Self::new(SourcePath, id) + } + + /// Find the device path of this ID. + pub fn get_device_path(&self) -> Option { + if self.variant == PartitionSource::Path && self.id.starts_with('/') { + Some(PathBuf::from(&self.id)) + } else { + from_id(&self.id, &self.variant.disk_by_path()) + } + } + + /// Find the given source ID of the device at the given path. + pub fn get_source>( + variant: PartitionSource, + path: P, + ) -> Option { + Some(Self { + variant, + id: find_id(path.as_ref(), &variant.disk_by_path())?, + }) + } + + /// Find the UUID of the device at the given path. + pub fn get_uuid>(path: P) -> Option { + Self::get_source(UUID, path) + } + + /// Find the PARTUUID of the device at the given path. + pub fn get_partuuid>(path: P) -> Option { + Self::get_source(PartUUID, path) + } + + /// Fetch a partition ID by a `/dev/disk/by-` path. + pub fn from_disk_by_path>(path: S) -> Result { + let path = path.as_ref(); + + let path = if let Some(path) = path.strip_prefix("/dev/disk/by-") { + path + } else { + return Err(Error::new(ErrorKind::NotFound, path)); + }; + + let id = if let Some(id) = path.strip_prefix("id/") { + Self::new(ID, id.into()) + } else if let Some(path) = path.strip_prefix("label/") { + Self::new(Label, path.into()) + } else if let Some(path) = path.strip_prefix("partlabel/") { + Self::new(PartLabel, path.into()) + } else if let Some(path) = path.strip_prefix("partuuid/") { + Self::new(PartUUID, path.into()) + } else if let Some(path) = path.strip_prefix("path/") { + Self::new(PartUUID, path.into()) + } else if let Some(path) = path.strip_prefix("uuid/") { + Self::new(PartUUID, path.into()) + } else { + return Err(Error::new(ErrorKind::InvalidData, path)); + }; + + Ok(id) + } +} + +impl Display for PartitionID { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + if let PartitionSource::Path = self.variant { + write!(fmt, "{}", self.id) + } else { + write!(fmt, "{}={}", <&'static str>::from(self.variant), self.id) + } + } +} + +impl FromStr for PartitionID { + type Err = Error; + + fn from_str(input: &str) -> Result { + if let Some(s) = input.strip_prefix('/') { + Ok(PartitionID { + variant: SourcePath, + id: s.to_owned(), + }) + } else if let Some(s) = input.strip_prefix("ID=") { + Ok(PartitionID { + variant: ID, + id: s.to_owned(), + }) + } else if let Some(s) = input.strip_prefix("LABEL=") { + Ok(PartitionID { + variant: Label, + id: s.to_owned(), + }) + } else if let Some(s) = input.strip_prefix("PARTLABEL=") { + Ok(PartitionID { + variant: PartLabel, + id: s.to_owned(), + }) + } else if let Some(s) = input.strip_prefix("PARTUUID=") { + Ok(PartitionID { + variant: PartUUID, + id: s.to_owned(), + }) + } else if let Some(s) = input.strip_prefix("UUID=") { + Ok(PartitionID { + variant: UUID, + id: s.to_owned(), + }) + } else { + Err(Error::new(ErrorKind::InvalidData, input)) + } + } +} + +/// Describes the type of partition identity. +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum PartitionSource { + ID, + Label, + PartLabel, + PartUUID, + Path, + UUID, +} + +impl Display for PartitionSource { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!(fmt, "{}", <&'static str>::from(*self)) + } +} + +impl From for &'static str { + fn from(pid: PartitionSource) -> &'static str { + match pid { + PartitionSource::ID => "ID", + PartitionSource::Label => "LABEL", + PartitionSource::PartLabel => "PARTLABEL", + PartitionSource::PartUUID => "PARTUUID", + PartitionSource::Path => "PATH", + PartitionSource::UUID => "UUID", + } + } +} + +impl PartitionSource { + fn disk_by_path(self) -> PathBuf { + PathBuf::from( + ["/dev/disk/by-", &<&'static str>::from(self).to_lowercase()] + .concat(), + ) + } +} + +/// A collection of all discoverable identifiers for a partition. +#[derive(Debug, Default, Clone, Hash, PartialEq)] +pub struct PartitionIdentifiers { + pub id: Option, + pub label: Option, + pub part_label: Option, + pub part_uuid: Option, + pub path: Option, + pub uuid: Option, +} + +impl PartitionIdentifiers { + /// Fetches all discoverable identifiers for a partition by the path to that + /// partition. + pub fn from_path>(path: P) -> PartitionIdentifiers { + let path = path.as_ref(); + + PartitionIdentifiers { + path: PartitionID::get_source(SourcePath, path).map(|id| id.id), + id: PartitionID::get_source(ID, path).map(|id| id.id), + label: PartitionID::get_source(Label, path).map(|id| id.id), + part_label: PartitionID::get_source(PartLabel, path) + .map(|id| id.id), + part_uuid: PartitionID::get_source(PartUUID, path).map(|id| id.id), + uuid: PartitionID::get_source(UUID, path).map(|id| id.id), + } + } + + /// Checks if the given identity matches one of the available identifiers. + pub fn matches(&self, id: &PartitionID) -> bool { + match id.variant { + ID => self.id.as_ref().map_or(false, |s| &id.id == s), + Label => self.label.as_ref().map_or(false, |s| &id.id == s), + PartLabel => { + self.part_label.as_ref().map_or(false, |s| &id.id == s) + } + PartUUID => self.part_uuid.as_ref().map_or(false, |s| &id.id == s), + SourcePath => self.path.as_ref().map_or(false, |s| &id.id == s), + UUID => self.uuid.as_ref().map_or(false, |s| &id.id == s), + } + } +} + +fn attempt Option>( + attempts: u8, + wait: u64, + mut func: F, +) -> Option { + let mut tried = 0; + let mut result; + + loop { + result = func(); + if result.is_none() && tried != attempts { + ::std::thread::sleep(::std::time::Duration::from_millis(wait)); + tried += 1; + } else { + return result; + } + } +} + +fn canonicalize(path: &Path) -> Cow<'_, Path> { + // NOTE: It seems that the kernel may intermittently error. + match attempt::(10, 1, || path.canonicalize().ok()) { + Some(path) => Cow::Owned(path), + None => Cow::Borrowed(path), + } +} + +/// Attempts to find the ID from the given path. +fn find_id(path: &Path, uuid_dir: &Path) -> Option { + // NOTE: It seems that the kernel may sometimes intermittently skip + // directories. + attempt(10, 1, move || { + let dir = uuid_dir.read_dir().ok()?; + find_id_(path, dir) + }) +} + +fn from_id(uuid: &str, uuid_dir: &Path) -> Option { + // NOTE: It seems that the kernel may sometimes intermittently skip + // directories. + attempt(10, 1, move || { + let dir = uuid_dir.read_dir().ok()?; + from_id_(uuid, dir) + }) +} + +fn find_id_(path: &Path, uuid_dir: fs::ReadDir) -> Option { + let path = canonicalize(path); + for uuid_entry in uuid_dir.filter_map(|entry| entry.ok()) { + let uuid_path = uuid_entry.path(); + let uuid_path = canonicalize(&uuid_path); + if uuid_path == path { + if let Some(uuid_entry) = uuid_entry.file_name().to_str() { + return Some(uuid_entry.into()); + } + } + } + + None +} + +fn from_id_(uuid: &str, uuid_dir: fs::ReadDir) -> Option { + for uuid_entry in uuid_dir.filter_map(|entry| entry.ok()) { + let uuid_entry = uuid_entry.path(); + if let Some(name) = uuid_entry.file_name() { + if name == uuid { + if let Ok(uuid_entry) = uuid_entry.canonicalize() { + return Some(uuid_entry); + } + } + } + } + + None +} diff --git a/devinfo/wrapper.h b/devinfo/wrapper.h new file mode 100644 index 000000000..b68096d72 --- /dev/null +++ b/devinfo/wrapper.h @@ -0,0 +1 @@ +#include diff --git a/doc/build.md b/doc/build.md index 44629d0a0..1bf3912d4 100644 --- a/doc/build.md +++ b/doc/build.md @@ -138,7 +138,7 @@ There are a few ways to build Mayastor! If you're hacking on Mayastor, it's best You can build release binaries of Mayastor with [`nix build`][nix-build]: ```bash -for PKG in moac mayastor; do +for PKG in mayastor; do echo "Building ${PKG} to artifacts/pkgs/${PKG}"; \ nix build -f . -o artifacts/pkgs/${PKG} ${PKG}; done @@ -147,7 +147,7 @@ done Try them as if they were installed: ```rust -nix shell -f . moac mayastor +nix shell -f . mayastor ``` ### Building portable Nix bundles @@ -179,7 +179,7 @@ Build the Docker images with [`nix build`][nix-build]: ```bash for IMAGE in \ - moac mayastor-client mayastor mayastor-csi moac mayastor-client kiiss-service \ + mayastor-client mayastor mayastor-csi mayastor-client kiiss-service \ node-service volume-service pool-service rest-service node-operator; \ do echo "Building ${IMAGE} to artifacts/docker/${IMAGE}.tar"; \ @@ -237,7 +237,7 @@ The basic steps are: ``` git submodule update --init --recursive sudo ./spdk-sys/spdk/scripts/pkgdep -./spdk-sys/build.sh --enable-debug --without-isal --with-iscsi-initiator --with-rdma \ +./spdk-sys/build.sh --enable-debug --without-isal --with-rdma \ --with-internal-vhost-lib --disable-tests \ --with-crypto ``` diff --git a/doc/pool-operator.md b/doc/pool-operator.md deleted file mode 100644 index 97a1fc681..000000000 --- a/doc/pool-operator.md +++ /dev/null @@ -1,70 +0,0 @@ -# Pool Operator - -Pool operator is part of mayastor control plane (moac) and is responsible -for storage pool creation and destruction on storage nodes. It follows classic -k8s custom resource operator model. Custom resource (CR) managed by the operator -is [MayastorPool](/csi/moac/crd/mayastorpool.yaml). CR objects represent the -*desired state* by a user. Pool operator is responsible for making the desired -state real. *Storage admin* creates and destroys mayastorpool CRs using -`kubectl`. The operator issues create and destroy storage pool commands -to storage nodes to make the state real. - -## MayastorPool - -MayastorPool custom resource is structured as follows: - -* metadata: Includes name of the pool which must be cluster-wide unique. -* spec: Pool parameters including: - * node: Name of the k8s node where the pool should be created (mayastor needs to be enabled on that node by means of node labels). - * disks: Disk names (absolute paths to device files in /dev) which comprise the storage pool. -* status: - * state: State of the pool can be one of: - * pending: Pool has not been created yet - either due to an error or because the k8s node does not have mayastor enabled. - * online: Pool has been created and is healthy & usable. - * degraded: Pool has been created and is usable but has some issue. - * faulted: Pool has unrecoverable error and is not usable. - * offline: Pool was created but currently it is not available because mayastor does not run on the k8s node. - * reason: If applicable it provides additional information explaining the state. - -MayastorPool CR entries are shared between storage admin and pool operator -with following responsibilities for each stake holder: - -* Storage admin: - 1. Is responsible for creation/destruction of the CRs. - 2. Modification of the CR is possible but not recommended. - 3. Status section of the CR is read-only -* Pool operator: - 1. Is responsible for updating status section of the CR. - 2. spec and metadata are read-only - 3. Never creates or destroys any of the MayastorPool CRs. - -## List storage pools - -```bash -kubectl get msp -``` - -## Create a storage pool - -Example of creating a storage pool `my-new-pool` on disk device -`/dev/loop0` on k8s node `node1`: - -```bash -cat < for Aio { value.parse().context(nexus_uri::IntParamParseError { uri: url.to_string(), parameter: String::from("blk_size"), + value: value.clone(), })? } None => 512, @@ -123,6 +124,7 @@ impl CreateDestroy for Aio { async fn destroy(self: Box) -> Result<(), Self::Error> { match Bdev::lookup_by_name(&self.name) { Some(bdev) => { + bdev.remove_alias(&self.alias); let (sender, receiver) = oneshot::channel::>(); unsafe { bdev_aio_delete( diff --git a/mayastor/src/bdev/dev.rs b/mayastor/src/bdev/dev.rs index 54a933bad..60c769800 100644 --- a/mayastor/src/bdev/dev.rs +++ b/mayastor/src/bdev/dev.rs @@ -17,37 +17,52 @@ //! Creating a bdev for any supported device type is now as simple as: //! ```ignore //! let uri = "aio:///tmp/disk1.img?blk_size=512"; -//! bdev::Uri::parse(&uri)?.create().await?; +//! bdev::uri::parse(&uri)?.create().await?; //! ``` -use std::{collections::HashMap, convert::TryFrom}; - -use snafu::ResultExt; -use url::Url; +use std::collections::HashMap; +use super::nvmx; use crate::{ - bdev::{BdevCreateDestroy, SpdkBlockDevice, Uri}, + bdev::SpdkBlockDevice, core::{BlockDevice, BlockDeviceDescriptor, CoreError}, - nexus_uri::{self, NexusBdevError}, + nexus_uri::NexusBdevError, }; -use super::{aio, iscsi, loopback, malloc, null, nvme, nvmx, uring}; +use url::Url; + +pub(crate) mod uri { + use std::convert::TryFrom; + + use snafu::ResultExt; + + use crate::{ + bdev::{ + aio, + loopback, + malloc, + null, + nvme, + nvmx, + uring, + BdevCreateDestroy, + }, + nexus_uri::{self, NexusBdevError}, + }; -impl Uri { pub fn parse( uri: &str, ) -> Result< Box>, NexusBdevError, > { - let url = Url::parse(uri).context(nexus_uri::UrlParseError { + let url = url::Url::parse(uri).context(nexus_uri::UrlParseError { uri: uri.to_string(), })?; match url.scheme() { "aio" => Ok(Box::new(aio::Aio::try_from(&url)?)), "bdev" => Ok(Box::new(loopback::Loopback::try_from(&url)?)), - "iscsi" => Ok(Box::new(iscsi::Iscsi::try_from(&url)?)), "loopback" => Ok(Box::new(loopback::Loopback::try_from(&url)?)), "malloc" => Ok(Box::new(malloc::Malloc::try_from(&url)?)), "null" => Ok(Box::new(null::Null::try_from(&url)?)), @@ -75,7 +90,7 @@ pub(crate) fn reject_unknown_parameters( Err(NexusBdevError::UriInvalid { uri: url.to_string(), message: format!( - "unrecognized parameters: {}.", + "unrecognized parameter(s): {}", invalid_parameters ), }) @@ -92,11 +107,11 @@ pub fn device_lookup(name: &str) -> Option> { } pub async fn device_create(uri: &str) -> Result { - Uri::parse(uri)?.create().await + uri::parse(uri)?.create().await } pub async fn device_destroy(uri: &str) -> Result<(), NexusBdevError> { - Uri::parse(uri)?.destroy().await + uri::parse(uri)?.destroy().await } pub fn device_open( diff --git a/mayastor/src/bdev/device.rs b/mayastor/src/bdev/device.rs old mode 100755 new mode 100644 index 471c9185e..74776fb6e --- a/mayastor/src/bdev/device.rs +++ b/mayastor/src/bdev/device.rs @@ -93,7 +93,7 @@ impl BlockDeviceDescriptor for SpdkBlockDeviceDescriptor { } fn get_io_handle(&self) -> Result, CoreError> { - let handle = SpdkBlockDeviceHandle::try_from(Arc::clone(&self.0))?; + let handle = SpdkBlockDeviceHandle::try_from(self.0.clone())?; Ok(Box::new(handle)) } diff --git a/mayastor/src/bdev/iscsi.rs b/mayastor/src/bdev/iscsi.rs deleted file mode 100644 index d032e3ae3..000000000 --- a/mayastor/src/bdev/iscsi.rs +++ /dev/null @@ -1,193 +0,0 @@ -use std::{ - collections::HashMap, - convert::TryFrom, - ffi::CString, - os::raw::{c_int, c_void}, -}; - -use async_trait::async_trait; -use futures::channel::oneshot; -use snafu::ResultExt; -use url::Url; -use uuid::Uuid; - -use spdk_sys::{create_iscsi_disk, delete_iscsi_disk, spdk_bdev}; - -use crate::{ - bdev::{dev::reject_unknown_parameters, util::uri, CreateDestroy, GetName}, - core::Bdev, - ffihelper::{cb_arg, done_errno_cb, errno_result_from_i32, ErrnoResult}, - nexus_uri::{self, NexusBdevError}, -}; - -const ISCSI_IQN_PREFIX: &str = "iqn.1980-05.mayastor"; - -#[derive(Debug)] -pub(super) struct Iscsi { - name: String, - alias: String, - iqn: String, - url: String, - uuid: Option, -} - -/// Convert a URI to an Iscsi "object" -/// NOTE: due to a bug in SPDK, providing a valid -/// target with an invalid iqn will crash the system. -impl TryFrom<&Url> for Iscsi { - type Error = NexusBdevError; - - fn try_from(url: &Url) -> Result { - if url.host_str().is_none() { - return Err(NexusBdevError::UriInvalid { - uri: url.to_string(), - message: String::from("missing host"), - }); - } - - let segments = uri::segments(url); - - if segments.is_empty() { - return Err(NexusBdevError::UriInvalid { - uri: url.to_string(), - message: String::from("no path segment"), - }); - } - - if segments.len() > 2 { - return Err(NexusBdevError::UriInvalid { - uri: url.to_string(), - message: String::from("too many path segments"), - }); - } - - let mut parameters: HashMap = - url.query_pairs().into_owned().collect(); - - let uuid = uri::uuid(parameters.remove("uuid")).context( - nexus_uri::UuidParamParseError { - uri: url.to_string(), - }, - )?; - - reject_unknown_parameters(url, parameters)?; - - Ok(Iscsi { - name: url[url::Position::BeforeHost .. url::Position::AfterPath] - .into(), - alias: url.to_string(), - iqn: format!("{}:{}", ISCSI_IQN_PREFIX, Uuid::new_v4()), - url: if segments.len() == 2 { - url[.. url::Position::AfterPath].to_string() - } else { - format!("{}/0", &url[.. url::Position::AfterPath]) - }, - uuid, - }) - } -} - -impl GetName for Iscsi { - fn get_name(&self) -> String { - self.name.clone() - } -} - -#[async_trait(?Send)] -impl CreateDestroy for Iscsi { - type Error = NexusBdevError; - - /// Create an iSCSI bdev - async fn create(&self) -> Result { - if Bdev::lookup_by_name(&self.name).is_some() { - return Err(NexusBdevError::BdevExists { - name: self.get_name(), - }); - } - - extern "C" fn done_iscsi_create_cb( - arg: *mut c_void, - bdev: *mut spdk_bdev, - errno: c_int, - ) { - let sender = unsafe { - Box::from_raw( - arg as *mut oneshot::Sender>, - ) - }; - - sender - .send(errno_result_from_i32(bdev, errno)) - .expect("done callback receiver side disappeared"); - } - - let cname = CString::new(self.get_name()).unwrap(); - let curl = CString::new(self.url.clone()).unwrap(); - let cinitiator = CString::new(self.iqn.clone()).unwrap(); - - let (sender, receiver) = oneshot::channel::>(); - - let errno = unsafe { - create_iscsi_disk( - cname.as_ptr(), - curl.as_ptr(), - cinitiator.as_ptr(), - Some(done_iscsi_create_cb), - cb_arg(sender), - ) - }; - - errno_result_from_i32((), errno).context(nexus_uri::InvalidParams { - name: self.get_name(), - })?; - - let mut bdev = receiver - .await - .context(nexus_uri::CancelBdev { - name: self.get_name(), - })? - .context(nexus_uri::CreateBdev { - name: self.get_name(), - })?; - - if let Some(u) = self.uuid { - bdev.set_uuid(u); - } - if !bdev.add_alias(&self.alias) { - error!( - "Failed to add alias {} to device {}", - self.alias, - self.get_name() - ); - } - - Ok(bdev.name()) - } - - /// Destroy the given iSCSI bdev - async fn destroy(self: Box) -> Result<(), Self::Error> { - match Bdev::lookup_by_name(&self.name) { - Some(bdev) => { - let (sender, receiver) = oneshot::channel::>(); - unsafe { - delete_iscsi_disk( - bdev.as_ptr(), - Some(done_errno_cb), - cb_arg(sender), - ); - } - receiver - .await - .context(nexus_uri::CancelBdev { - name: self.get_name(), - })? - .context(nexus_uri::DestroyBdev { - name: self.get_name(), - }) - } - None => Err(NexusBdevError::BdevNotFound { - name: self.get_name(), - }), - } - } -} diff --git a/mayastor/src/bdev/malloc.rs b/mayastor/src/bdev/malloc.rs index 14c6e9202..fd08c60f6 100644 --- a/mayastor/src/bdev/malloc.rs +++ b/mayastor/src/bdev/malloc.rs @@ -43,7 +43,7 @@ impl TryFrom<&Url> for Malloc { if segments.is_empty() { return Err(NexusBdevError::UriInvalid { uri: uri.to_string(), - message: "no path segments".to_string(), + message: "empty path".to_string(), }); } @@ -54,24 +54,17 @@ impl TryFrom<&Url> for Malloc { value.parse().context(nexus_uri::IntParamParseError { uri: uri.to_string(), parameter: String::from("blk_size"), + value: value.clone(), })? } else { 512 }; - if blk_size != 512 && blk_size != 4096 { - return Err(NexusBdevError::UriInvalid { - uri: uri.to_string(), - message: - "invalid blk_size specified must be one of 512 or 4096" - .to_string(), - }); - } - let size: u32 = if let Some(value) = parameters.remove("size_mb") { value.parse().context(nexus_uri::IntParamParseError { uri: uri.to_string(), parameter: String::from("size_mb"), + value: value.clone(), })? } else { 0 @@ -81,27 +74,44 @@ impl TryFrom<&Url> for Malloc { if let Some(value) = parameters.remove("num_blocks") { value.parse().context(nexus_uri::IntParamParseError { uri: uri.to_string(), - parameter: String::from("blk_size"), + parameter: String::from("num_blocks"), + value: value.clone(), })? } else { 0 }; + let uuid = uri::uuid(parameters.remove("uuid")).context( + nexus_uri::UuidParamParseError { + uri: uri.to_string(), + }, + )?; + + reject_unknown_parameters(uri, parameters)?; + + // Validate parameters. + if blk_size != 512 && blk_size != 4096 { + return Err(NexusBdevError::UriInvalid { + uri: uri.to_string(), + message: "'blk_size' must be one of: 512, 4096".to_string(), + }); + } + if size != 0 && num_blocks != 0 { return Err(NexusBdevError::UriInvalid { uri: uri.to_string(), - message: "conflicting parameters num_blocks and size_mb are mutually exclusive" + message: "'num_blocks' and 'size_mb' are mutually exclusive" .to_string(), }); } - let uuid = uri::uuid(parameters.remove("uuid")).context( - nexus_uri::UuidParamParseError { + if size == 0 && num_blocks == 0 { + return Err(NexusBdevError::UriInvalid { uri: uri.to_string(), - }, - )?; - - reject_unknown_parameters(uri, parameters)?; + message: "either 'num_blocks' or 'size_mb' must be specified" + .to_string(), + }); + } Ok(Self { name: uri.path()[1 ..].into(), @@ -177,6 +187,7 @@ impl CreateDestroy for Malloc { async fn destroy(self: Box) -> Result<(), Self::Error> { if let Some(bdev) = Bdev::lookup_by_name(&self.name) { + bdev.remove_alias(&self.alias); let (s, r) = oneshot::channel::>(); unsafe { diff --git a/mayastor/src/bdev/mod.rs b/mayastor/src/bdev/mod.rs index efa5a785e..5fa9ac691 100644 --- a/mayastor/src/bdev/mod.rs +++ b/mayastor/src/bdev/mod.rs @@ -5,8 +5,10 @@ pub use device::{bdev_io_ctx_pool_init, SpdkBlockDevice}; pub use nexus::{ nexus_bdev::{ nexus_create, + nexus_create_v2, nexus_lookup, Nexus, + NexusNvmeParams, NexusState, NexusStatus, VerboseError, @@ -30,8 +32,8 @@ pub use nvmx::{ mod aio; pub(crate) mod dev; +pub(crate) use dev::uri; pub(crate) mod device; -mod iscsi; mod loopback; mod malloc; pub(crate) mod nexus; @@ -60,5 +62,3 @@ pub trait CreateDestroy { pub trait GetName { fn get_name(&self) -> String; } - -pub struct Uri; diff --git a/mayastor/src/bdev/nexus/mod.rs b/mayastor/src/bdev/nexus/mod.rs index fdff25b7e..e0a7af0bd 100644 --- a/mayastor/src/bdev/nexus/mod.rs +++ b/mayastor/src/bdev/nexus/mod.rs @@ -16,7 +16,7 @@ use crate::{ /// NOTE: The resulting string must be freed explicitly after use! macro_rules! c_str { ($lit:expr) => { - std::ffi::CString::new($lit).unwrap().into_raw(); + std::ffi::CString::new($lit).unwrap().into_raw() }; } @@ -26,7 +26,6 @@ pub mod nexus_bdev_rebuild; pub mod nexus_bdev_snapshot; mod nexus_channel; pub(crate) mod nexus_child; -mod nexus_config; pub mod nexus_fn_table; pub mod nexus_io; pub mod nexus_label; @@ -126,12 +125,6 @@ pub fn instances() -> &'static mut Vec> { nexus_module::NexusModule::get_instances() } -/// function used to create a new nexus when parsing a config file -pub fn nexus_instance_new(name: String, size: u64, children: Vec) { - let list = instances(); - list.push(Nexus::new(&name, size, None, Some(&children))); -} - /// called during shutdown so that all nexus children are in Destroying state /// so that a possible remove event from SPDK also results in bdev removal pub async fn nexus_children_to_destroying_state() { diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 544f176aa..5449e96cf 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -17,6 +17,7 @@ use nix::errno::Errno; use serde::Serialize; use snafu::{ResultExt, Snafu}; use tonic::{Code, Status}; +use uuid::Uuid; use rpc::mayastor::NvmeAnaState; use spdk_sys::{spdk_bdev, spdk_bdev_register, spdk_bdev_unregister}; @@ -57,6 +58,9 @@ use crate::{ subsys::{NvmfError, NvmfSubsystem}, }; +pub static NVME_MIN_CNTLID: u16 = 1; +pub static NVME_MAX_CNTLID: u16 = 0xffef; + /// Obtain the full error chain pub trait VerboseError { fn verbose(&self) -> String; @@ -90,6 +94,12 @@ pub enum Error { NexusInitialising { name: String }, #[snafu(display("Invalid nexus uuid \"{}\"", uuid))] InvalidUuid { uuid: String }, + #[snafu(display( + "Nexus uuid \"{}\" already exists for nexus \"{}\"", + uuid, + nexus + ))] + UuidExists { uuid: String, nexus: String }, #[snafu(display("Invalid encryption key"))] InvalidKey {}, #[snafu(display("Failed to create crypto bdev for nexus {}", name))] @@ -150,6 +160,16 @@ pub enum Error { ChildMissing { child: String, name: String }, #[snafu(display("Child {} of nexus {} has no error store", child, name))] ChildMissingErrStore { child: String, name: String }, + #[snafu(display( + "Failed to acquire write exclusive reservation on child {} of nexus {}", + child, + name + ))] + ChildWriteExclusiveResvFailed { + source: ChildError, + child: String, + name: String, + }, #[snafu(display("Failed to open child {} of nexus {}", child, name))] OpenChild { source: ChildError, @@ -173,6 +193,12 @@ pub enum Error { child, name ))] + DestroyLastHealthyChild { child: String, name: String }, + #[snafu(display( + "Cannot remove the last healthy child {} of nexus {} from the IO path", + child, + name + ))] RemoveLastChild { child: String, name: String }, #[snafu(display( "Cannot fault the last healthy child {} of nexus {}", @@ -199,7 +225,7 @@ pub enum Error { child, name, ))] - CreateRebuildError { + CreateRebuild { source: RebuildError, child: String, name: String, @@ -229,7 +255,7 @@ pub enum Error { job, name, ))] - RebuildOperationError { + RebuildOperation { job: String, name: String, source: RebuildError, @@ -238,6 +264,8 @@ pub enum Error { InvalidShareProtocol { sp_value: i32 }, #[snafu(display("Invalid NvmeAnaState value {}", ana_value))] InvalidNvmeAnaState { ana_value: i32 }, + #[snafu(display("Invalid arguments for nexus {}: {}", name, args))] + InvalidArguments { name: String, args: String }, #[snafu(display("Failed to create nexus {}", name))] NexusCreate { name: String }, #[snafu(display("Failed to destroy nexus {}", name))] @@ -258,9 +286,9 @@ pub enum Error { #[snafu(display("Failed to create snapshot on nexus {}", name))] FailedCreateSnapshot { name: String, source: CoreError }, #[snafu(display("NVMf subsystem error: {}", e))] - SubsysNvmfError { e: String }, + SubsysNvmf { e: String }, #[snafu(display("failed to pause {} current state {:?}", name, state))] - PauseError { + Pause { state: NexusPauseState, name: String, }, @@ -268,7 +296,7 @@ pub enum Error { impl From for Error { fn from(error: NvmfError) -> Self { - Error::SubsysNvmfError { + Error::SubsysNvmf { e: error.to_string(), } } @@ -334,6 +362,48 @@ pub enum NexusPauseState { Unpausing, } +/// NVMe-specific parameters for the Nexus +#[derive(Debug)] +pub struct NexusNvmeParams { + /// minimum NVMe controller ID for sharing over NVMf + pub(crate) min_cntlid: u16, + /// maximum NVMe controller ID + pub(crate) max_cntlid: u16, + /// NVMe reservation key for children + pub(crate) resv_key: u64, + /// NVMe preempt key for children, 0 to not preempt + pub(crate) preempt_key: Option, +} + +impl Default for NexusNvmeParams { + fn default() -> Self { + NexusNvmeParams { + min_cntlid: NVME_MIN_CNTLID, + max_cntlid: NVME_MAX_CNTLID, + resv_key: 0x1234_5678, + preempt_key: None, + } + } +} + +impl NexusNvmeParams { + pub fn set_min_cntlid(&mut self, min_cntlid: u16) { + self.min_cntlid = min_cntlid; + } + pub fn set_max_cntlid(&mut self, max_cntlid: u16) { + self.max_cntlid = max_cntlid; + } + pub fn set_resv_key(&mut self, resv_key: u64) { + self.resv_key = resv_key; + } + pub fn set_preempt_key( + &mut self, + preempt_key: Option, + ) { + self.preempt_key = preempt_key; + } +} + /// The main nexus structure #[derive(Debug)] pub struct Nexus { @@ -345,8 +415,12 @@ pub struct Nexus { pub(crate) child_count: u32, /// vector of children pub children: Vec, + /// NVMe parameters + pub(crate) nvme_params: NexusNvmeParams, /// inner bdev pub(crate) bdev: Bdev, + /// uuid of the nexus (might not be the same as the nexus bdev!) + pub(crate) nexus_uuid: Uuid, /// raw pointer to bdev (to destruct it later using Box::from_raw()) bdev_raw: *mut spdk_bdev, /// represents the current state of the Nexus @@ -452,7 +526,9 @@ impl Nexus { pub fn new( name: &str, size: u64, - uuid: Option<&str>, + bdev_uuid: Option<&str>, + nexus_uuid: Option, + nvme_params: NexusNvmeParams, child_bdevs: Option<&[String]>, ) -> Box { let mut b = Box::new(spdk_bdev::default()); @@ -476,14 +552,19 @@ impl Nexus { share_handle: None, size, nexus_target: None, + nvme_params, io_device: None, pause_state: AtomicCell::new(NexusPauseState::Unpaused), pause_waiters: Vec::new(), nexus_info: futures::lock::Mutex::new(Default::default()), + nexus_uuid: Default::default(), }); // set the UUID of the underlying bdev - n.set_uuid(uuid); + n.set_uuid(bdev_uuid); + // set the nexus UUID to be the specified nexus UUID, otherwise inherit + // the bdev UUID + n.nexus_uuid = nexus_uuid.unwrap_or_else(|| n.bdev.uuid()); // register children if let Some(child_bdevs) = child_bdevs { @@ -527,6 +608,11 @@ impl Nexus { ); } + /// Get the Nexus uuid + pub(crate) fn uuid(&self) -> Uuid { + self.nexus_uuid + } + /// set the state of the nexus pub(crate) fn set_state(&mut self, state: NexusState) -> NexusState { debug!( @@ -701,13 +787,14 @@ impl Nexus { pub async fn resume(&mut self) -> Result<(), Error> { assert_eq!(Cores::current(), Cores::first()); - // if we are pausing we have concurrent requests for this - if matches!(self.pause_state.load(), NexusPauseState::Pausing) { + // In case nexus is already unpaused or is being paused, bail out. + if matches!( + self.pause_state.load(), + NexusPauseState::Pausing | NexusPauseState::Unpaused + ) { return Ok(()); } - assert_eq!(self.pause_state.load(), NexusPauseState::Paused); - info!( "{} resuming nexus, waiters: {}", self.name, @@ -797,7 +884,7 @@ impl Nexus { // we must pause again, schedule pause operation Err(NexusPauseState::Unpausing) => { - return Err(Error::PauseError { + return Err(Error::Pause { state: NexusPauseState::Unpausing, name: self.name.clone(), }); @@ -994,10 +1081,11 @@ impl Nexus { /// io type. Break the loop on first occurrence. /// TODO: optionally add this check during nexus creation pub fn io_is_supported(&self, io_type: IoType) -> bool { - self.children + !self + .children .iter() .filter_map(|e| e.get_device().ok()) - .any(|b| b.io_type_supported(io_type)) + .any(|b| !b.io_type_supported(io_type)) } /// IO completion for local replica @@ -1055,11 +1143,97 @@ pub async fn nexus_create( size: u64, uuid: Option<&str>, children: &[String], +) -> Result<(), Error> { + nexus_create_internal( + name, + size, + uuid, + None, + NexusNvmeParams::default(), + children, + ) + .await +} + +/// As create_nexus with additional parameters: +/// min_cntlid, max_cntldi: NVMe controller ID range when sharing over NVMf +/// resv_key: NVMe reservation key for children +pub async fn nexus_create_v2( + name: &str, + size: u64, + uuid: &str, + nvme_params: NexusNvmeParams, + children: &[String], +) -> Result<(), Error> { + if nvme_params.min_cntlid < NVME_MIN_CNTLID + || nvme_params.min_cntlid > nvme_params.max_cntlid + || nvme_params.max_cntlid > NVME_MAX_CNTLID + { + let args = format!( + "invalid NVMe controller ID range [{:x}h, {:x}h]", + nvme_params.min_cntlid, nvme_params.max_cntlid + ); + error!("failed to create nexus {}: {}", name, args); + return Err(Error::InvalidArguments { + name: name.to_owned(), + args, + }); + } + if nvme_params.resv_key == 0 { + let args = "invalid NVMe reservation key"; + error!("failed to create nexus {}: {}", name, args); + return Err(Error::InvalidArguments { + name: name.to_owned(), + args: args.to_string(), + }); + } + + match uuid::Uuid::parse_str(name) { + Ok(name_uuid) => { + let bdev_uuid = name_uuid.to_string(); + let nexus_uuid = uuid::Uuid::parse_str(uuid).map_err(|_| { + Error::InvalidUuid { + uuid: uuid.to_string(), + } + })?; + nexus_create_internal( + name, + size, + Some(bdev_uuid.as_str()), + Some(nexus_uuid), + nvme_params, + children, + ) + .await + } + Err(_) => { + nexus_create_internal( + name, + size, + Some(uuid), + None, + nvme_params, + children, + ) + .await + } + } +} + +async fn nexus_create_internal( + name: &str, + size: u64, + bdev_uuid: Option<&str>, + nexus_uuid: Option, + nvme_params: NexusNvmeParams, + children: &[String], ) -> Result<(), Error> { // global variable defined in the nexus module let nexus_list = instances(); - if let Some(nexus) = nexus_list.iter().find(|n| n.name == name) { + if let Some(nexus) = nexus_list.iter().find(|n| { + n.name == name || (nexus_uuid.is_some() && Some(n.uuid()) == nexus_uuid) + }) { // FIXME: Instead of error, we return Ok without checking // that the children match, which seems wrong. if *nexus.state.lock() == NexusState::Init { @@ -1067,6 +1241,14 @@ pub async fn nexus_create( name: name.to_owned(), }); } + if nexus.name != name + || (nexus_uuid.is_some() && Some(nexus.nexus_uuid) != nexus_uuid) + { + return Err(Error::UuidExists { + uuid: nexus.nexus_uuid.to_string(), + nexus: name.to_string(), + }); + } return Ok(()); } @@ -1075,7 +1257,14 @@ pub async fn nexus_create( // closing a child assumes that the nexus to which it belongs will appear // in the global list of nexus instances. We must also ensure that the // nexus instance gets removed from the global list if an error occurs. - nexus_list.push(Nexus::new(name, size, uuid, None)); + nexus_list.push(Nexus::new( + name, + size, + bdev_uuid, + nexus_uuid, + nvme_params, + None, + )); // Obtain a reference to the newly created Nexus object. let ni = diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index ba82406a1..56d0eb40b 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -34,6 +34,7 @@ use crate::{ device_destroy, device_lookup, lookup_nexus_child, + nexus, nexus::{ nexus_bdev::{ CreateChild, @@ -43,13 +44,15 @@ use crate::{ NexusStatus, OpenChild, }, + nexus_channel, nexus_channel::DrEvent, nexus_child::{ChildState, NexusChild}, + nexus_persistence::PersistOp, }, Reason, VerboseError, }, - core::DeviceEventType, + core::{DeviceEventType, Reactors}, nexus_uri::NexusBdevError, }; @@ -178,23 +181,31 @@ impl Nexus { self.name.clone(), Some(child_bdev), ); - match child.open(self.size) { - Ok(name) => { - // we have created the bdev, and created a nexusChild struct. To - // make use of the device itself the - // data and metadata must be validated. The child - // will be added and marked as faulted, once the rebuild has - // completed the device can transition to online - info!("{}: child opened successfully {}", self.name, name); - - // FIXME: use dummy key for now - if let Err(e) = child.resv_register(0x12345678).await { - error!("Failed to register key with child: {}", e); - } - + let mut child_name = child.open(self.size); + if let Ok(ref name) = child_name { + // we have created the bdev, and created a nexusChild struct. To + // make use of the device itself the + // data and metadata must be validated. The child + // will be added and marked as faulted, once the rebuild has + // completed the device can transition to online + info!("{}: child opened successfully {}", self.name, name); + + if let Err(e) = child + .acquire_write_exclusive( + self.nvme_params.resv_key, + self.nvme_params.preempt_key, + ) + .await + { + child_name = Err(e); + } + } + match child_name { + Ok(cn) => { // it can never take part in the IO path // of the nexus until it's rebuilt from a healthy child. child.fault(Reason::OutOfSync).await; + let child_state = child.state(); // Register event listener for newly added child. self.register_child_event_listener(&child); @@ -206,6 +217,7 @@ impl Nexus { error!("Failed to sync labels {:?}", e); // todo: how to signal this? } + self.persist(PersistOp::AddChild((cn, child_state))).await; Ok(self.status()) } @@ -252,6 +264,26 @@ impl Nexus { }); } + let healthy_children = self + .children + .iter() + .filter(|c| c.is_healthy()) + .collect::>(); + + let have_healthy_children = !healthy_children.is_empty(); + let other_healthy_children = healthy_children + .into_iter() + .filter(|c| c.get_name() != uri) + .count() + > 0; + + if have_healthy_children && !other_healthy_children { + return Err(Error::DestroyLastHealthyChild { + name: self.name.clone(), + child: uri.to_owned(), + }); + } + let cancelled_rebuilding_children = self.cancel_child_rebuild_jobs(uri).await; @@ -268,8 +300,11 @@ impl Nexus { }); } + let child_state = self.children[idx].state(); self.children.remove(idx); self.child_count -= 1; + self.persist(PersistOp::Update((uri.to_string(), child_state))) + .await; self.start_rebuild_jobs(cancelled_rebuilding_children).await; Ok(()) @@ -415,6 +450,37 @@ impl Nexus { } } } + DeviceEventType::AdminCommandCompletionFailed => { + let cn = &device; + for nexus in nexus::instances() { + if nexus_channel::fault_nexus_child(nexus, cn) { + info!( + "{}: retiring child {} in response to admin command completion failure event", + nexus.name, + device, + ); + + let child_dev = device.to_string(); + Reactors::master().send_future(async move { + // Error indicates it is already paused and another + // thread is processing the fault + let child_dev2 = child_dev.clone(); + if let Err(e) = nexus.child_retire(child_dev).await + { + warn!( + "retiring child {} returned {}", + child_dev2, e + ); + } + }); + return; + } + } + warn!( + "No nexus child exists for device {}, ignoring admin command completion failure event", + device + ); + } _ => { info!("Ignoring {:?} event for device {}", event, device); } @@ -487,14 +553,37 @@ impl Nexus { }); } - // FIXME: use dummy key for now + // acquire a write exclusive reservation on all children, + // if any one fails, close all children. + let mut we_err: Result<(), Error> = Ok(()); for child in self.children.iter() { - if let Err(error) = child.resv_register(0x12345678).await { - error!( - "{}: failed to register key {} for child {}", - self.name, child.name, error - ); + if let Err(error) = child + .acquire_write_exclusive( + self.nvme_params.resv_key, + self.nvme_params.preempt_key, + ) + .await + { + we_err = Err(Error::ChildWriteExclusiveResvFailed { + source: error, + child: child.name.clone(), + name: self.name.clone(), + }); + break; + } + } + if let Err(error) = we_err { + for child in &mut self.children { + if let Err(error) = child.close().await { + error!( + "{}: child {} failed to close with error {}", + self.name, + &child.name, + error.verbose() + ); + } } + return Err(error); } for child in self.children.iter() { diff --git a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs index aa2f14999..c274de504 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -12,15 +12,16 @@ use crate::{ nexus::{ nexus_bdev::{ nexus_lookup, - CreateRebuildError, + CreateRebuild, Error, Nexus, RebuildJobNotFound, - RebuildOperationError, + RebuildOperation, RemoveRebuildJob, }, nexus_channel::DrEvent, nexus_child::{ChildState, Reason}, + nexus_persistence::PersistOp, }, VerboseError, }, @@ -86,7 +87,7 @@ impl Nexus { }); }, ) - .context(CreateRebuildError { + .context(CreateRebuild { child: name.to_owned(), name: self.name.clone(), })?; @@ -101,7 +102,7 @@ impl Nexus { // rebuilt ranges in sync with the other children. self.reconfigure(DrEvent::ChildRebuild).await; - job.as_client().start().context(RebuildOperationError { + job.as_client().start().context(RebuildOperation { job: name.to_owned(), name: self.name.clone(), }) @@ -128,7 +129,7 @@ impl Nexus { /// Stop a rebuild job in the background pub async fn stop_rebuild(&self, name: &str) -> Result<(), Error> { match self.get_rebuild_job(name) { - Ok(rj) => rj.as_client().stop().context(RebuildOperationError { + Ok(rj) => rj.as_client().stop().context(RebuildOperation { job: name.to_owned(), name: self.name.clone(), }), @@ -141,7 +142,7 @@ impl Nexus { /// Pause a rebuild job in the background pub async fn pause_rebuild(&mut self, name: &str) -> Result<(), Error> { let rj = self.get_rebuild_job(name)?.as_client(); - rj.pause().context(RebuildOperationError { + rj.pause().context(RebuildOperation { job: name.to_owned(), name: self.name.clone(), }) @@ -150,7 +151,7 @@ impl Nexus { /// Resume a rebuild job in the background pub async fn resume_rebuild(&mut self, name: &str) -> Result<(), Error> { let rj = self.get_rebuild_job(name)?.as_client(); - rj.resume().context(RebuildOperationError { + rj.resume().context(RebuildOperation { job: name.to_owned(), name: self.name.clone(), }) @@ -265,6 +266,10 @@ impl Nexus { "Child {} has been rebuilt successfully", recovering_child.get_name() ); + let child_name = recovering_child.get_name().to_string(); + let child_state = recovering_child.state(); + self.persist(PersistOp::Update((child_name, child_state))) + .await; } RebuildState::Stopped => { info!( diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index 9f604956c..69d9d717d 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -68,6 +68,7 @@ impl ReconfigureCtx { } #[derive(Debug)] +#[allow(clippy::enum_variant_names)] /// Dynamic Reconfiguration Events occur when a child is added or removed pub enum DrEvent { /// Child offline reconfiguration event @@ -80,6 +81,32 @@ pub enum DrEvent { ChildRebuild, } +/// Mark nexus child as faulted based on its device name +pub(crate) fn fault_nexus_child(nexus: &mut Nexus, name: &str) -> bool { + nexus + .children + .iter() + .filter(|c| c.state() == ChildState::Open) + .filter(|c| { + // If there were previous retires, we do not have a reference + // to a BlockDevice. We do however, know it can't be the device + // we are attempting to retire in the first place so this + // condition is fine. + if let Ok(child) = c.get_device().as_ref() { + child.device_name() == name + } else { + false + } + }) + .any(|c| { + Ok(ChildState::Open) + == c.state.compare_exchange( + ChildState::Open, + ChildState::Faulted(Reason::IoError), + ) + }) +} + impl NexusChannelInner { /// very simplistic routine to rotate between children for read operations /// note that the channels can be None during a reconfigure; this is usually @@ -134,28 +161,7 @@ impl NexusChannelInner { /// Fault the child by marking its status. pub fn fault_child(&mut self, name: &str) -> bool { let nexus = unsafe { Nexus::from_raw(self.device) }; - nexus - .children - .iter() - .filter(|c| c.state() == ChildState::Open) - .filter(|c| { - // If there where previous retires, we do not have a reference - // to a BlockDevice. We do however, know it cant be the device - // we are attempting to retire in the first place so this - // condition is fine. - if let Ok(child) = c.get_device().as_ref() { - child.device_name() == name - } else { - false - } - }) - .any(|c| { - ChildState::Open - == c.state.compare_and_swap( - ChildState::Open, - ChildState::Faulted(Reason::IoError), - ) - }) + fault_nexus_child(nexus, name) } /// Refreshing our channels simply means that we either have a child going @@ -289,20 +295,15 @@ impl NexusChannel { pub extern "C" fn reconfigure( device: *mut c_void, ctx: Box, - event: &DrEvent, + _event: &DrEvent, ) { - match event { - DrEvent::ChildOffline - | DrEvent::ChildRemove - | DrEvent::ChildFault - | DrEvent::ChildRebuild => unsafe { - spdk_for_each_channel( - device, - Some(NexusChannel::refresh_io_channels), - Box::into_raw(ctx).cast(), - Some(Self::reconfigure_completed), - ); - }, + unsafe { + spdk_for_each_channel( + device, + Some(NexusChannel::refresh_io_channels), + Box::into_raw(ctx).cast(), + Some(Self::reconfigure_completed), + ); } } diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index eca9ab3a4..c3bbb5433 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -21,16 +21,25 @@ use crate::{ VerboseError, }, core::{ + nvme_reservation_acquire_action, + nvme_reservation_register_action, + nvme_reservation_register_cptpl, + nvme_reservation_type, BlockDevice, BlockDeviceDescriptor, BlockDeviceHandle, CoreError, + DmaError, Reactor, Reactors, }, nexus_uri::NexusBdevError, persistent_store::PersistentStore, rebuild::{ClientOperations, RebuildJob}, + spdk_sys::{ + spdk_nvme_registered_ctrlr_extended_data, + spdk_nvme_reservation_status_extended_data, + }, }; use url::Url; @@ -64,8 +73,19 @@ pub enum ChildError { HandleCreate { source: CoreError }, #[snafu(display("Failed to open a BlockDeviceHandle for child"))] HandleOpen { source: CoreError }, - #[snafu(display("Failed to register key for child"))] + #[snafu(display("Failed to allocate DmaBuffer for child"))] + HandleDmaMalloc { source: DmaError }, + #[snafu(display("Failed to register key for child: {}", source))] ResvRegisterKey { source: CoreError }, + #[snafu(display("Failed to acquire reservation for child: {}", source))] + ResvAcquire { source: CoreError }, + #[snafu(display( + "Failed to get reservation report for child: {}", + source + ))] + ResvReport { source: CoreError }, + #[snafu(display("Failed to get NVMe host ID: {}", source))] + NvmeHostId { source: CoreError }, #[snafu(display("Failed to create a BlockDevice for child {}", child))] ChildBdevCreate { child: String, @@ -272,21 +292,194 @@ impl NexusChild { ) } - /// Register a key - pub(crate) async fn resv_register( + /// Check if we're healthy + pub(crate) fn is_healthy(&self) -> bool { + self.state() == ChildState::Open + } + + /// Register an NVMe reservation, specifying a new key + async fn resv_register( &self, + hdl: &dyn BlockDeviceHandle, new_key: u64, + ) -> Result<(), CoreError> { + hdl.nvme_resv_register( + 0, + new_key, + nvme_reservation_register_action::REGISTER_KEY, + nvme_reservation_register_cptpl::NO_CHANGES, + ) + .await?; + info!( + "{}: registered key {:0x}h on child {}", + self.parent, new_key, self.name + ); + Ok(()) + } + + /// Acquire an NVMe reservation + async fn resv_acquire( + &self, + hdl: &dyn BlockDeviceHandle, + current_key: u64, + preempt_key: u64, + acquire_action: u8, + resv_type: u8, ) -> Result<(), ChildError> { - let hdl = self.get_io_handle().context(HandleOpen {})?; - if hdl.get_device().driver_name() == "nvme" { - info!( - "{}: registering key {:x}h on child {}...", - self.parent, new_key, self.name + if let Err(e) = hdl + .nvme_resv_acquire( + current_key, + preempt_key, + acquire_action, + resv_type, + ) + .await + { + return Err(ChildError::ResvAcquire { + source: e, + }); + } + info!( + "{}: acquired reservation type {:x}h, action {:x}h, current key {:0x}h, preempt key {:0x}h on child {}", + self.parent, resv_type, acquire_action, current_key, preempt_key, self.name + ); + Ok(()) + } + + /// Get NVMe reservation report + /// Returns: (key, host id) of write exclusive reservation holder + async fn resv_report( + &self, + hdl: &dyn BlockDeviceHandle, + ) -> Result, ChildError> { + let mut buffer = hdl.dma_malloc(4096).context(HandleDmaMalloc {})?; + if let Err(e) = hdl.nvme_resv_report(1, &mut buffer).await { + return Err(ChildError::ResvReport { + source: e, + }); + } + trace!( + "{}: received reservation report for child {}", + self.parent, + self.name + ); + let (stext, sl) = buffer.as_slice().split_at(std::mem::size_of::< + spdk_nvme_reservation_status_extended_data, + >()); + let (pre, resv_status_ext, post) = unsafe { + stext.align_to::() + }; + assert!(pre.is_empty()); + assert!(post.is_empty()); + let regctl = resv_status_ext[0].data.regctl; + trace!( + "reservation status: rtype {}, regctl {}, ptpls {}", + resv_status_ext[0].data.rtype, + regctl, + resv_status_ext[0].data.ptpls, + ); + let (pre, reg_ctrlr_ext, _post) = unsafe { + sl.align_to::() + }; + if !pre.is_empty() { + return Ok(None); + } + let mut numctrlr: usize = regctl.into(); + if numctrlr > reg_ctrlr_ext.len() { + numctrlr = reg_ctrlr_ext.len(); + warn!( + "Expecting data for {} controllers, received {}", + regctl, numctrlr + ); + } + for (i, c) in reg_ctrlr_ext.iter().enumerate().take(numctrlr) { + let cntlid = c.cntlid; + let rkey = c.rkey; + trace!( + "ctrlr {}: cntlid {:0x}h, status {}, hostid {:0x?}, rkey {:0x}h", + i, + cntlid, + c.rcsts.status(), + c.hostid, + rkey, ); - if let Err(e) = hdl.nvme_resv_register(0, new_key, 0, 0).await { - return Err(ChildError::ResvRegisterKey { - source: e, - }); + if resv_status_ext[0].data.rtype == 1 && c.rcsts.status() == 1 { + return Ok(Some((rkey, c.hostid))); + } + } + Ok(None) + } + + /// Register an NVMe reservation on the child then acquire a write + /// exclusive reservation, preempting an existing reservation, if another + /// host has it. + /// Ignores bdevs without NVMe reservation support. + pub(crate) async fn acquire_write_exclusive( + &self, + key: u64, + preempt_key: Option, + ) -> Result<(), ChildError> { + if std::env::var("NEXUS_NVMF_RESV_ENABLE").is_err() { + return Ok(()); + } + let hdl = self.get_io_handle().context(HandleOpen {})?; + if let Err(e) = self.resv_register(&*hdl, key).await { + match e { + CoreError::NotSupported { + .. + } => return Ok(()), + _ => { + return Err(ChildError::ResvRegisterKey { + source: e, + }) + } + } + } + if let Err(e) = self + .resv_acquire( + &*hdl, + key, + match preempt_key { + None => 0, + Some(k) => k.get(), + }, + match preempt_key { + None => nvme_reservation_acquire_action::ACQUIRE, + Some(_) => nvme_reservation_acquire_action::PREEMPT, + }, + nvme_reservation_type::WRITE_EXCLUSIVE_ALL_REGS, + ) + .await + { + warn!("{}", e); + } + if let Some((pkey, hostid)) = self.resv_report(&*hdl).await? { + let my_hostid = match hdl.host_id().await { + Ok(h) => h, + Err(e) => { + return Err(ChildError::NvmeHostId { + source: e, + }); + } + }; + if my_hostid != hostid { + info!("Write exclusive reservation held by {:0x?}", hostid); + self.resv_acquire( + &*hdl, + key, + pkey, + nvme_reservation_acquire_action::PREEMPT, + nvme_reservation_type::WRITE_EXCLUSIVE_ALL_REGS, + ) + .await?; + if let Some((_, hostid)) = self.resv_report(&*hdl).await? { + if my_hostid != hostid { + info!( + "Write exclusive reservation held by {:0x?}", + hostid + ); + } + } } } Ok(()) @@ -425,7 +618,7 @@ impl NexusChild { /// Called in response to a device removal event. /// All the necessary teardown should be performed here before the - /// underlaying device is removed. + /// underlying device is removed. /// /// Note: The descriptor *must* be dropped for the remove to complete. pub(crate) fn remove(&mut self) { diff --git a/mayastor/src/bdev/nexus/nexus_config.rs b/mayastor/src/bdev/nexus/nexus_config.rs deleted file mode 100644 index 1e0850cfa..000000000 --- a/mayastor/src/bdev/nexus/nexus_config.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{ffi::CStr, ptr::null_mut, str::FromStr}; - -use spdk_sys::{ - spdk_conf_find_section, - spdk_conf_section, - spdk_conf_section_get_nmval, - spdk_conf_section_get_nval, -}; - -use crate::bdev::nexus::nexus_instance_new; - -unsafe fn parse_config_param( - sp: *mut spdk_conf_section, - dev_name: &str, - dev_num: i32, - position: i32, -) -> Result { - let dev_name_c = std::ffi::CString::new(dev_name).unwrap(); - let val = - spdk_conf_section_get_nmval(sp, dev_name_c.as_ptr(), dev_num, position); - if val.is_null() { - return Err(format!( - "Config value for {}{} at position {} not found", - dev_name, dev_num, position - )); - } - CStr::from_ptr(val) - .to_str() - .unwrap() - .parse::() - .map_err(|_error| { - format!( - "Invalid config value for {}{} at position {}", - dev_name, dev_num, position - ) - }) -} - -pub(crate) fn parse_ini_config_file() -> i32 { - let section_name = std::ffi::CString::new("Nexus").unwrap(); - let sp = - unsafe { spdk_conf_find_section(null_mut(), section_name.as_ptr()) }; - - if sp.is_null() { - return 0; - } - - info!("Parsing nexus config sections"); - - let mut devnum = 0; - loop { - let dev = unsafe { - let dev_string = std::ffi::CString::new("Dev").unwrap(); - spdk_conf_section_get_nval(sp, dev_string.as_ptr(), devnum) - }; - if dev.is_null() { - break; - } - - let name: String = unsafe { - match parse_config_param(sp, "Dev", devnum, 0) { - Ok(val) => val, - Err(err) => { - error!("{}", err); - return libc::EINVAL; - } - } - }; - - // parse bdev block size - let block_size: u32 = unsafe { - match parse_config_param(sp, "Dev", devnum, 2) { - Ok(val) => val, - Err(err) => { - error!("{}", err); - return libc::EINVAL; - } - } - }; - - // parse bdev size - let lu_size: u64 = unsafe { - match parse_config_param::(sp, "Dev", devnum, 1) { - Ok(val) => val * 1024 * 1024 / u64::from(block_size), - Err(err) => { - error!("{}", err); - return libc::EINVAL; - } - } - }; - let mut child_bdevs = Vec::new(); - for i in 3 .. { - unsafe { - match parse_config_param::(sp, "Dev", devnum, i) { - Ok(val) => child_bdevs.push(val), - Err(_) => break, - } - } - } - - debug!( - "Found Nexus device {}: block_count={}, block_size={} with nvmf targets {:?}", - name, lu_size, block_size, &child_bdevs - ); - - nexus_instance_new(name, lu_size, child_bdevs); - devnum += 1; - } - 0 -} diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index 9bca0806b..95c6f30f1 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -103,7 +103,6 @@ impl NexusFnTable { /// Main entry point to submit IO to the underlying children this uses /// callbacks rather than futures and closures for performance reasons. /// This function is not called when the IO is re-submitted (see below). - #[no_mangle] pub extern "C" fn io_submit( channel: *mut spdk_io_channel, io: *mut spdk_bdev_io, diff --git a/mayastor/src/bdev/nexus/nexus_module.rs b/mayastor/src/bdev/nexus/nexus_module.rs index a4d3123e9..e1066cdbb 100644 --- a/mayastor/src/bdev/nexus/nexus_module.rs +++ b/mayastor/src/bdev/nexus/nexus_module.rs @@ -99,7 +99,6 @@ impl NexusModule { impl NexusModule { extern "C" fn nexus_mod_init() -> i32 { info!("Initializing Nexus CAS Module"); - crate::bdev::nexus::nexus_config::parse_ini_config_file(); 0 } diff --git a/mayastor/src/bdev/nexus/nexus_persistence.rs b/mayastor/src/bdev/nexus/nexus_persistence.rs index c9b60b70e..4e71968fb 100644 --- a/mayastor/src/bdev/nexus/nexus_persistence.rs +++ b/mayastor/src/bdev/nexus/nexus_persistence.rs @@ -32,6 +32,8 @@ pub struct ChildInfo { pub(crate) enum PersistOp { /// Create a persistent entry. Create, + /// Add a child to an existing persistent entry. + AddChild((ChildUri, ChildState)), /// Update a persistent entry. Update((ChildUri, ChildState)), /// Save the clean shutdown variable. @@ -63,6 +65,16 @@ impl Nexus { nexus_info.children.push(child_info); }); } + PersistOp::AddChild((uri, state)) => { + // Add the state of a new child. + // This should only be called on adding a new child. + let child_info = ChildInfo { + uuid: NexusChild::uuid(&uri) + .expect("Failed to get child UUID."), + healthy: Self::child_healthy(&state), + }; + nexus_info.children.push(child_info); + } PersistOp::Update((uri, state)) => { let uuid = NexusChild::uuid(&uri).expect("Failed to get child UUID."); @@ -96,7 +108,7 @@ impl Nexus { // TODO: Should we give up retrying eventually? async fn save(&self, info: &NexusInfo) { let mut output_err = true; - let nexus_uuid = self.name.strip_prefix("nexus-").unwrap_or(&self.name); + let nexus_uuid = self.uuid().to_string(); loop { match PersistentStore::put(&nexus_uuid, info).await { Ok(_) => { @@ -108,7 +120,8 @@ impl Nexus { // silently retry. if output_err { error!( - "Failed to persist nexus information for nexus {} with error {}. Retrying...", + "Failed to persist nexus information for nexus {}, UUID {} with error {}. Retrying...", + self.name, nexus_uuid, e ); diff --git a/mayastor/src/bdev/nexus/nexus_share.rs b/mayastor/src/bdev/nexus/nexus_share.rs index 4cc8095e5..d4d3ee85f 100644 --- a/mayastor/src/bdev/nexus/nexus_share.rs +++ b/mayastor/src/bdev/nexus/nexus_share.rs @@ -90,6 +90,10 @@ impl Share for Nexus { fn bdev_uri(&self) -> Option { self.bdev.bdev_uri() } + + fn bdev_uri_original(&self) -> Option { + self.bdev.bdev_uri_original() + } } impl From<&NexusTarget> for ShareProtocolNexus { @@ -141,7 +145,12 @@ impl Nexus { Ok(uri) } ShareProtocolNexus::NexusNvmf => { - let uri = self.share_nvmf(None).await?; + let uri = self + .share_nvmf(Some(( + self.nvme_params.min_cntlid, + self.nvme_params.max_cntlid, + ))) + .await?; self.nexus_target = Some(NexusTarget::NexusNvmfTarget); Ok(uri) } diff --git a/mayastor/src/bdev/null.rs b/mayastor/src/bdev/null.rs index 6af724876..0e7a062a9 100644 --- a/mayastor/src/bdev/null.rs +++ b/mayastor/src/bdev/null.rs @@ -54,6 +54,7 @@ impl TryFrom<&Url> for Null { value.parse().context(nexus_uri::IntParamParseError { uri: uri.to_string(), parameter: String::from("blk_size"), + value: value.clone(), })? } else { 512 @@ -72,6 +73,7 @@ impl TryFrom<&Url> for Null { value.parse().context(nexus_uri::IntParamParseError { uri: uri.to_string(), parameter: String::from("size_mb"), + value: value.clone(), })? } else { 0 @@ -82,6 +84,7 @@ impl TryFrom<&Url> for Null { value.parse().context(nexus_uri::IntParamParseError { uri: uri.to_string(), parameter: String::from("blk_size"), + value: value.clone(), })? } else { 0 @@ -182,6 +185,7 @@ impl CreateDestroy for Null { async fn destroy(self: Box) -> Result<(), Self::Error> { if let Some(bdev) = Bdev::lookup_by_name(&self.name) { + bdev.remove_alias(&self.alias); let (s, r) = oneshot::channel::>(); unsafe { spdk_sys::bdev_null_delete( diff --git a/mayastor/src/bdev/nvme.rs b/mayastor/src/bdev/nvme.rs index 2b77c8d53..bdf41d53b 100644 --- a/mayastor/src/bdev/nvme.rs +++ b/mayastor/src/bdev/nvme.rs @@ -96,9 +96,11 @@ impl CreateDestroy for NVMe { ) }; - errno_result_from_i32((), errno).context(nexus_uri::InvalidParams { - name: self.name.clone(), - })?; + errno_result_from_i32((), errno).context( + nexus_uri::CreateBdevInvalidParams { + name: self.name.clone(), + }, + )?; receiver .await @@ -124,7 +126,8 @@ impl CreateDestroy for NVMe { } async fn destroy(self: Box) -> Result<(), Self::Error> { - if let Some(_bdev) = Bdev::lookup_by_name(&self.get_name()) { + if let Some(bdev) = Bdev::lookup_by_name(&self.get_name()) { + bdev.remove_alias(&self.url.to_string()); let errno = unsafe { bdev_nvme_delete( self.name.clone().into_cstring().as_ptr(), diff --git a/mayastor/src/bdev/nvmf.rs b/mayastor/src/bdev/nvmf.rs index 45ba26d10..51b4cc2dc 100644 --- a/mayastor/src/bdev/nvmf.rs +++ b/mayastor/src/bdev/nvmf.rs @@ -84,6 +84,7 @@ impl TryFrom<&Url> for Nvmf { nexus_uri::BoolParamParseError { uri: url.to_string(), parameter: String::from("reftag"), + value: value.to_string(), }, )? { prchk_flags |= spdk_sys::SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; @@ -95,6 +96,7 @@ impl TryFrom<&Url> for Nvmf { nexus_uri::BoolParamParseError { uri: url.to_string(), parameter: String::from("guard"), + value: value.to_string(), }, )? { prchk_flags |= spdk_sys::SPDK_NVME_IO_FLAGS_PRCHK_GUARD; @@ -176,9 +178,11 @@ impl CreateDestroy for Nvmf { ) }; - errno_result_from_i32((), errno).context(nexus_uri::InvalidParams { - name: self.name.clone(), - })?; + errno_result_from_i32((), errno).context( + nexus_uri::CreateBdevInvalidParams { + name: self.name.clone(), + }, + )?; let bdev_count = receiver .await @@ -227,7 +231,8 @@ impl CreateDestroy for Nvmf { /// Destroy the given NVMF bdev async fn destroy(self: Box) -> Result<(), Self::Error> { match Bdev::lookup_by_name(&self.get_name()) { - Some(_) => { + Some(bdev) => { + bdev.remove_alias(&self.alias); let cname = CString::new(self.name.clone()).unwrap(); let errno = unsafe { diff --git a/mayastor/src/bdev/nvmx/channel.rs b/mayastor/src/bdev/nvmx/channel.rs old mode 100755 new mode 100644 index 2fd12d8d0..aa9b9410e --- a/mayastor/src/bdev/nvmx/channel.rs +++ b/mayastor/src/bdev/nvmx/channel.rs @@ -264,6 +264,10 @@ pub struct NvmeIoChannelInner<'a> { poller: poller::Poller<'a>, io_stats_controller: IoStatsController, pub device: Box, + /// to prevent the controller from being destroyed before the channel + ctrl: Option< + std::sync::Arc>>, + >, num_pending_ios: u64, // Flag to indicate the shutdown state of the channel. @@ -309,6 +313,7 @@ impl NvmeIoChannelInner<'_> { let rc = self.reset(); if rc == 0 { self.is_shutdown = true; + self.ctrl.take(); } rc } @@ -427,6 +432,7 @@ impl IoStatsController { self.io_stats.num_unmap_ops += num_ops; self.io_stats.bytes_unmapped += num_blocks; } + IoType::WriteZeros => {} _ => { warn!("Unsupported I/O type for I/O statistics: {:?}", op); } @@ -587,6 +593,7 @@ impl NvmeControllerIoChannel { io_stats_controller: IoStatsController::new(block_size), is_shutdown: false, device, + ctrl: Some(carc), num_pending_ios: 0, }); diff --git a/mayastor/src/bdev/nvmx/controller.rs b/mayastor/src/bdev/nvmx/controller.rs old mode 100755 new mode 100644 index bf93405ef..64c92338e --- a/mayastor/src/bdev/nvmx/controller.rs +++ b/mayastor/src/bdev/nvmx/controller.rs @@ -59,6 +59,7 @@ use crate::{ }, ffihelper::{cb_arg, done_cb}, nexus_uri::NexusBdevError, + sleep::mayastor_sleep, }; #[derive(Debug)] @@ -199,7 +200,7 @@ impl<'a> NvmeController<'a> { .expect("(BUG) no inner NVMe controller defined yet"); if let Some(ns) = inner.namespaces.get(0) { - Some(Arc::clone(ns)) + Some(ns.clone()) } else { debug!("no namespaces associated with the current controller"); None @@ -263,7 +264,7 @@ impl<'a> NvmeController<'a> { if !ns_active { self .state_machine - .transition(Faulted(ControllerFailureReason::NamespaceInitFailed)) + .transition(Faulted(ControllerFailureReason::NamespaceInit)) .expect("failed to fault controller in response to ns enumeration failure"); } @@ -324,7 +325,7 @@ impl<'a> NvmeController<'a> { ); } - let io_device = Arc::clone(&self.inner.as_ref().unwrap().io_device); + let io_device = self.inner.as_ref().unwrap().io_device.clone(); let reset_ctx = ResetCtx { name: self.name.clone(), cb, @@ -352,6 +353,7 @@ impl<'a> NvmeController<'a> { channel: &mut NvmeIoChannelInner, ctx: &mut ShutdownCtx, ) -> i32 { + debug!(?ctx.name, "shutting down I/O channel"); let rc = channel.shutdown(); if rc == 0 { @@ -379,7 +381,7 @@ impl<'a> NvmeController<'a> { error!("{} failed to shutdown I/O channels, rc = {}. Shutdown aborted.", ctx.name, result); controller .state_machine - .transition(Faulted(ControllerFailureReason::ShutdownFailed)) + .transition(Faulted(ControllerFailureReason::Shutdown)) .expect("failed to transition controller to Faulted state"); return; } @@ -503,6 +505,10 @@ impl<'a> NvmeController<'a> { source: Errno::EBUSY, } })?; + // Prevent racing device destroy + unsafe { + self.timeout_config.as_mut().start_device_destroy(); + } debug!("{} shutting down the controller", self.name); @@ -544,7 +550,7 @@ impl<'a> NvmeController<'a> { // shutdown might be in place. let _ = controller.state_machine.transition_checked( Running, - Faulted(ControllerFailureReason::ResetFailed), + Faulted(ControllerFailureReason::Reset), ); } @@ -595,7 +601,7 @@ impl<'a> NvmeController<'a> { if let Some(c) = self.controller() { c.fail() } - let io_device = Arc::clone(&self.inner.as_ref().unwrap().io_device); + let io_device = self.inner.as_ref().unwrap().io_device.clone(); let reset_ctx = ResetCtx { name: self.name.clone(), cb, @@ -654,7 +660,7 @@ impl<'a> NvmeController<'a> { // Once controller is successfully reset, schedule another //I/O channel traversal to restore all I/O channels. - let io_device = Arc::clone(&reset_ctx.io_device); + let io_device = reset_ctx.io_device.clone(); io_device.traverse_io_channels( NvmeController::_reset_create_channels, NvmeController::_reset_create_channels_done, @@ -821,22 +827,45 @@ extern "C" fn aer_cb(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { } } -/// return number of completions processed (maybe 0) or the negated on error, -/// which is one of: -/// -/// ENXIO: the qpair is not conected or when the controller is -/// marked as failed. -/// -/// EGAIN: returned whenever the controller is being reset. +/// Poll to process qpair completions on admin queue +/// Returns: 0 (SPDK_POLLER_IDLE) or 1 (SPDK_POLLER_BUSY) pub extern "C" fn nvme_poll_adminq(ctx: *mut c_void) -> i32 { let mut context = NonNull::::new(ctx.cast()) .expect("ctx pointer may never be null"); let context = unsafe { context.as_mut() }; + // returns number of completions processed (maybe 0) or the negated error, + // which is one of: + // + // ENXIO: the qpair is not connected or when the controller is + // marked as failed. + // + // EAGAIN: returned whenever the controller is being reset. let result = context.process_adminq(); if result < 0 { - //error!("{}: {}", context.name, Errno::from_i32(result.abs())); + if context.start_device_destroy() { + error!( + "process adminq: {}: {}", + context.name, + Errno::from_i32(result.abs()) + ); + info!("dispatching nexus fault and retire: {}", context.name); + let dev_name = context.name.to_string(); + let carc = NVME_CONTROLLERS.lookup_by_name(&dev_name).unwrap(); + debug!( + ?dev_name, + "notifying listeners of admin command completion failure" + ); + let controller = carc.lock(); + let num_listeners = controller + .notify_event(DeviceEventType::AdminCommandCompletionFailed); + debug!( + ?dev_name, + ?num_listeners, + "listeners notified of admin command completion failure" + ); + } return 1; } @@ -861,24 +890,30 @@ pub(crate) async fn destroy_device(name: String) -> Result<(), NexusBdevError> { { let mut controller = carc.lock(); - fn _shutdown_callback(success: bool, ctx: *mut c_void) { - done_cb(ctx, success); - } + // Skip not-fully initialized controllers. + if controller.get_state() != NvmeControllerState::New { + fn _shutdown_callback(success: bool, ctx: *mut c_void) { + done_cb(ctx, success); + } - controller - .shutdown(_shutdown_callback, cb_arg(s)) - .map_err(|_| NexusBdevError::DestroyBdev { - name: String::from(&name), - source: Errno::EAGAIN, - })? - } + controller.shutdown(_shutdown_callback, cb_arg(s)).map_err( + |_| NexusBdevError::DestroyBdev { + name: String::from(&name), + source: Errno::EAGAIN, + }, + )?; - if !r.await.expect("Failed awaiting at shutdown()") { - error!(?name, "failed to shutdown controller"); - return Err(NexusBdevError::DestroyBdev { - name: String::from(&name), - source: Errno::EAGAIN, - }); + // Release the lock before waiting for controller shutdown. + drop(controller); + + if !r.await.expect("Failed awaiting at shutdown()") { + error!(?name, "failed to shutdown controller"); + return Err(NexusBdevError::DestroyBdev { + name: String::from(&name), + source: Errno::EAGAIN, + }); + } + } } // 2. Remove controller from the list so that a new controller with the @@ -894,13 +929,34 @@ pub(crate) async fn destroy_device(name: String) -> Result<(), NexusBdevError> { // Notify the listeners. debug!(?name, "notifying listeners about device removal"); - let controller = carc.lock(); - let num_listeners = controller.notify_event(DeviceEventType::DeviceRemoved); - debug!( - ?name, - ?num_listeners, - "listeners notified about device removal" - ); + { + let controller = carc.lock(); + let num_listeners = + controller.notify_event(DeviceEventType::DeviceRemoved); + debug!( + ?name, + ?num_listeners, + "listeners notified about device removal" + ); + } + + let mut carc = carc; + loop { + match Arc::try_unwrap(carc) { + Ok(i) => { + drop(i); + break; + } + Err(ret) => { + warn!(?name, "delaying controller destroy"); + let rx = mayastor_sleep(std::time::Duration::from_millis(250)); + if rx.await.is_err() { + error!("failed to wait for mayastor_sleep"); + } + carc = ret; + } + } + } Ok(()) } @@ -909,7 +965,6 @@ pub(crate) fn connected_attached_cb( ctx: &mut NvmeControllerContext, ctrlr: SpdkNvmeController, ) { - ctx.unregister_poller(); // we use the ctrlr address as the controller id in the global table let cid = ctrlr.as_ptr() as u64; @@ -920,7 +975,7 @@ pub(crate) fn connected_attached_cb( .expect("no controller in the list"); // clone it now such that we can lock the original, and insert it later. - let ctl = Arc::clone(&controller); + let ctl = controller.clone(); let mut controller = controller.lock(); controller .state_machine @@ -1003,6 +1058,7 @@ pub(crate) mod options { admin_timeout_ms: Option, disable_error_logging: Option, fabrics_connect_timeout_us: Option, + ext_host_id: Option<[u8; 16]>, host_nqn: Option, keep_alive_timeout_ms: Option, transport_retry_count: Option, @@ -1038,6 +1094,11 @@ pub(crate) mod options { self } + pub fn with_ext_host_id(mut self, ext_host_id: [u8; 16]) -> Self { + self.ext_host_id = Some(ext_host_id); + self + } + pub fn with_hostnqn>(mut self, host_nqn: T) -> Self { self.host_nqn = Some(host_nqn.into()); self @@ -1063,6 +1124,10 @@ pub(crate) mod options { opts.0.keep_alive_timeout_ms = timeout_ms; } + if let Some(ext_host_id) = self.ext_host_id { + opts.0.extended_host_id = ext_host_id; + } + if let Some(host_nqn) = self.host_nqn { unsafe { copy_nonoverlapping( diff --git a/mayastor/src/bdev/nvmx/controller_inner.rs b/mayastor/src/bdev/nvmx/controller_inner.rs old mode 100755 new mode 100644 index 83d2e0be9..bc664acbc --- a/mayastor/src/bdev/nvmx/controller_inner.rs +++ b/mayastor/src/bdev/nvmx/controller_inner.rs @@ -74,6 +74,7 @@ pub(crate) struct TimeoutConfig { ctrlr: SpdkNvmeController, reset_attempts: u32, next_reset_time: Instant, + destroy_in_progress: AtomicCell, } impl Drop for TimeoutConfig { @@ -93,6 +94,7 @@ impl TimeoutConfig { ctrlr: SpdkNvmeController(NonNull::dangling()), reset_attempts: MAX_RESET_ATTEMPTS, next_reset_time: Instant::now(), + destroy_in_progress: AtomicCell::new(false), } } @@ -100,6 +102,11 @@ impl TimeoutConfig { self as *const _ as *mut _ } + pub fn start_device_destroy(&mut self) -> bool { + self.destroy_in_progress + .compare_exchange(false, true) + .is_ok() + } pub fn set_controller(&mut self, ctrlr: SpdkNvmeController) { self.ctrlr = ctrlr; } @@ -142,7 +149,10 @@ impl TimeoutConfig { // Clear the flag as we are the exclusive owner. assert!( - timeout_ctx.reset_in_progress.compare_and_swap(true, false), + timeout_ctx + .reset_in_progress + .compare_exchange(true, false) + .is_ok(), "non-exclusive access to controller reset flag" ); } @@ -176,7 +186,7 @@ impl TimeoutConfig { /// resets related to I/O timeout. pub(crate) fn reset_controller(&mut self) { // Make sure no other resets are in progress. - if self.reset_in_progress.compare_and_swap(false, true) { + if self.reset_in_progress.compare_exchange(false, true).is_ok() { return; } @@ -223,7 +233,7 @@ impl TimeoutConfig { // Clear the flag as we are the exclusive owner. assert!( - self.reset_in_progress.compare_and_swap(true, false), + self.reset_in_progress.compare_exchange(true, false).is_ok(), "non-exclusive access to controller reset flag" ); } @@ -294,6 +304,11 @@ impl SpdkNvmeController { pub fn as_ptr(&self) -> *mut spdk_nvme_ctrlr { self.0.as_ptr() } + + /// Returns extended host identifier + pub fn ext_host_id(&self) -> &[u8; 16] { + unsafe { &(*self.as_ptr()).opts.extended_host_id } + } } impl From<*mut spdk_nvme_ctrlr> for SpdkNvmeController { @@ -456,6 +471,7 @@ impl<'a> NvmeController<'a> { spdk_nvme_ctrlr_register_timeout_callback( self.ctrlr_as_ptr(), device_defaults.timeout_us, + device_defaults.timeout_us, Some(NvmeController::io_timeout_handler), self.timeout_config.as_ptr().cast(), ); diff --git a/mayastor/src/bdev/nvmx/controller_state.rs b/mayastor/src/bdev/nvmx/controller_state.rs old mode 100755 new mode 100644 index 6edd59c86..3ddf39fcd --- a/mayastor/src/bdev/nvmx/controller_state.rs +++ b/mayastor/src/bdev/nvmx/controller_state.rs @@ -14,9 +14,9 @@ pub enum NvmeControllerState { } #[derive(Debug, PartialEq, Copy, Clone)] pub enum ControllerFailureReason { - ResetFailed, - ShutdownFailed, - NamespaceInitFailed, + Reset, + Shutdown, + NamespaceInit, } impl ToString for NvmeControllerState { @@ -171,8 +171,7 @@ impl ControllerStateMachine { ) -> Result<(), ControllerStateMachineError> { let f = self.lookup_flag(flag); - let current = f.compare_and_swap(false, true); - if current { + if let Err(current) = f.compare_exchange(false, true) { Err(ControllerStateMachineError::ControllerFlagUpdateError { flag, current_value: current, @@ -190,15 +189,14 @@ impl ControllerStateMachine { ) -> Result<(), ControllerStateMachineError> { let f = self.lookup_flag(flag); - let current = f.compare_and_swap(true, false); - if current { - Ok(()) - } else { + if let Err(current) = f.compare_exchange(true, false) { Err(ControllerStateMachineError::ControllerFlagUpdateError { flag, current_value: current, new_value: false, }) + } else { + Ok(()) } } diff --git a/mayastor/src/bdev/nvmx/device.rs b/mayastor/src/bdev/nvmx/device.rs old mode 100755 new mode 100644 index 84bae3836..d8b2eeb60 --- a/mayastor/src/bdev/nvmx/device.rs +++ b/mayastor/src/bdev/nvmx/device.rs @@ -65,7 +65,7 @@ impl NvmeDeviceDescriptor { impl BlockDeviceDescriptor for NvmeDeviceDescriptor { fn get_device(&self) -> Box { - Box::new(NvmeBlockDevice::from_ns(&self.name, Arc::clone(&self.ns))) + Box::new(NvmeBlockDevice::from_ns(&self.name, self.ns.clone())) } fn into_handle( @@ -85,7 +85,7 @@ impl BlockDeviceDescriptor for NvmeDeviceDescriptor { &self.name, self.io_device_id, self.ctrlr, - Arc::clone(&self.ns), + self.ns.clone(), self.prchk_flags, )?)) } @@ -178,8 +178,8 @@ impl BlockDevice for NvmeBlockDevice { | IoType::Abort => true, IoType::Compare => self.ns.supports_compare(), IoType::NvmeIoMd => self.ns.md_size() > 0, - IoType::Unmap => false, - IoType::WriteZeros => false, + IoType::Unmap => self.ns.supports_deallocate(), + IoType::WriteZeros => self.ns.supports_write_zeroes(), IoType::CompareAndWrite => false, _ => false, } diff --git a/mayastor/src/bdev/nvmx/handle.rs b/mayastor/src/bdev/nvmx/handle.rs old mode 100755 new mode 100644 index 357842180..b78b1da10 --- a/mayastor/src/bdev/nvmx/handle.rs +++ b/mayastor/src/bdev/nvmx/handle.rs @@ -18,6 +18,7 @@ use spdk_sys::{ spdk_nvme_ns_cmd_read, spdk_nvme_ns_cmd_readv, spdk_nvme_ns_cmd_write, + spdk_nvme_ns_cmd_write_zeroes, spdk_nvme_ns_cmd_writev, }; @@ -52,7 +53,7 @@ use crate::{ IoType, NvmeCommandStatus, }, - ffihelper::{cb_arg, done_cb}, + ffihelper::{cb_arg, done_cb, FfiResult}, subsys, }; @@ -144,7 +145,7 @@ impl NvmeDeviceHandle { name: &str, ns: &Arc, ) -> Box { - Box::new(NvmeBlockDevice::from_ns(name, Arc::clone(ns))) + Box::new(NvmeBlockDevice::from_ns(name, ns.clone())) } #[inline] @@ -887,8 +888,56 @@ impl BlockDeviceHandle for NvmeDeviceHandle { cb: IoCompletionCallback, cb_arg: IoCompletionCallbackArg, ) -> Result<(), CoreError> { - // Write zeroes are done through unmap. - self.unmap_blocks(offset_blocks, num_blocks, cb, cb_arg) + let channel = self.io_channel.as_ptr(); + let inner = NvmeIoChannel::inner_from_channel(channel); + + // Make sure channel allows I/O + check_channel_for_io( + IoType::WriteZeros, + inner, + offset_blocks, + num_blocks, + )?; + + let bio = alloc_nvme_io_ctx( + IoType::WriteZeros, + NvmeIoCtx { + cb, + cb_arg, + iov: std::ptr::null_mut() as *mut iovec, // No I/O vec involved. + iovcnt: 0, + iovpos: 0, + iov_offset: 0, + channel, + op: IoType::WriteZeros, + num_blocks, + }, + offset_blocks, + num_blocks, + )?; + + let rc = unsafe { + spdk_nvme_ns_cmd_write_zeroes( + self.ns.as_ptr(), + inner.qpair.as_mut().unwrap().as_ptr(), + offset_blocks, + num_blocks as u32, + Some(nvme_io_done), + bio as *mut c_void, + self.prchk_flags, + ) + }; + + if rc < 0 { + Err(CoreError::WriteZeroesDispatch { + source: Errno::from_i32(-rc), + offset: offset_blocks, + len: num_blocks, + }) + } else { + inner.account_io(); + Ok(()) + } } async fn create_snapshot(&self) -> Result { @@ -930,7 +979,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { let (s, r) = oneshot::channel::(); - let _rc = unsafe { + unsafe { spdk_nvme_ctrlr_cmd_admin_raw( self.ctrlr.as_ptr(), &mut pcmd, @@ -939,7 +988,11 @@ impl BlockDeviceHandle for NvmeDeviceHandle { Some(nvme_admin_passthru_done), cb_arg(s), ) - }; + } + .to_result(|e| CoreError::NvmeAdminDispatch { + source: Errno::from_i32(e), + opcode: cmd.opc(), + })?; inner.account_io(); let ret = if r.await.expect("Failed awaiting NVMe Admin command I/O") { @@ -999,6 +1052,50 @@ impl BlockDeviceHandle for NvmeDeviceHandle { self.io_passthru(&cmd, Some(&mut buffer)).await } + /// NVMe Reservation Acquire + async fn nvme_resv_acquire( + &self, + current_key: u64, + preempt_key: u64, + acquire_action: u8, + resv_type: u8, + ) -> Result<(), CoreError> { + let mut cmd = spdk_sys::spdk_nvme_cmd::default(); + cmd.set_opc(nvme_nvm_opcode::RESERVATION_ACQUIRE.into()); + cmd.nsid = 0x1; + unsafe { + cmd.__bindgen_anon_1 + .cdw10_bits + .resv_acquire + .set_racqa(acquire_action.into()); + cmd.__bindgen_anon_1 + .cdw10_bits + .resv_acquire + .set_rtype(resv_type.into()); + } + let mut buffer = self.dma_malloc(16).unwrap(); + let (ck, pk) = buffer.as_mut_slice().split_at_mut(8); + ck.copy_from_slice(¤t_key.to_le_bytes()); + pk.copy_from_slice(&preempt_key.to_le_bytes()); + self.io_passthru(&cmd, Some(&mut buffer)).await + } + + /// NVMe Reservation Report + /// cdw11: bit 0- Extended Data Structure + async fn nvme_resv_report( + &self, + cdw11: u32, + buffer: &mut DmaBuf, + ) -> Result<(), CoreError> { + let mut cmd = spdk_sys::spdk_nvme_cmd::default(); + cmd.set_opc(nvme_nvm_opcode::RESERVATION_REPORT.into()); + cmd.nsid = 0x1; + // Number of dwords to transfer + cmd.__bindgen_anon_1.cdw10 = ((buffer.len() >> 2) - 1) as u32; + cmd.__bindgen_anon_2.cdw11 = cdw11; + self.io_passthru(&cmd, Some(buffer)).await + } + /// sends the specified NVMe IO Passthru command async fn io_passthru( &self, @@ -1035,7 +1132,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { let (s, r) = oneshot::channel::(); - let _rc = unsafe { + unsafe { spdk_nvme_ctrlr_cmd_io_raw( self.ctrlr.as_ptr(), inner.qpair.as_mut().unwrap().as_ptr(), @@ -1045,7 +1142,11 @@ impl BlockDeviceHandle for NvmeDeviceHandle { Some(nvme_io_passthru_done), cb_arg(s), ) - }; + } + .to_result(|e| CoreError::NvmeIoPassthruDispatch { + source: Errno::from_i32(e), + opcode: nvme_cmd.opc(), + })?; inner.account_io(); let ret = if r.await.expect("Failed awaiting NVMe IO passthru command") @@ -1060,6 +1161,21 @@ impl BlockDeviceHandle for NvmeDeviceHandle { inner.discard_io(); ret } + + /// Returns NVMe extended host identifier + async fn host_id(&self) -> Result<[u8; 16], CoreError> { + let controller = NVME_CONTROLLERS.lookup_by_name(&self.name).ok_or( + CoreError::BdevNotFound { + name: self.name.to_string(), + }, + )?; + let controller = controller.lock(); + let inner = controller.controller().ok_or(CoreError::BdevNotFound { + name: self.name.to_string(), + })?; + let id = inner.ext_host_id(); + Ok(*id) + } } impl Drop for NvmeDeviceHandle { diff --git a/mayastor/src/bdev/nvmx/mod.rs b/mayastor/src/bdev/nvmx/mod.rs old mode 100755 new mode 100644 index c4bd90bde..f56adc2d6 --- a/mayastor/src/bdev/nvmx/mod.rs +++ b/mayastor/src/bdev/nvmx/mod.rs @@ -51,7 +51,7 @@ impl<'a> NVMeCtlrList<'a> { name: T, ) -> Option>>> { let entries = self.read_lock(); - entries.get(&name.into()).map(|e| Arc::clone(e)) + entries.get(&name.into()).cloned() } /// remove a NVMe controller from the list, when the last reference to the diff --git a/mayastor/src/bdev/nvmx/namespace.rs b/mayastor/src/bdev/nvmx/namespace.rs old mode 100755 new mode 100644 index 9b4e11de7..656d002fa --- a/mayastor/src/bdev/nvmx/namespace.rs +++ b/mayastor/src/bdev/nvmx/namespace.rs @@ -3,12 +3,15 @@ use std::ptr::NonNull; use spdk_sys::{ spdk_nvme_ns, spdk_nvme_ns_get_extended_sector_size, + spdk_nvme_ns_get_flags, spdk_nvme_ns_get_md_size, spdk_nvme_ns_get_num_sectors, spdk_nvme_ns_get_optimal_io_boundary, spdk_nvme_ns_get_size, spdk_nvme_ns_get_uuid, spdk_nvme_ns_supports_compare, + SPDK_NVME_NS_DEALLOCATE_SUPPORTED, + SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED, }; #[derive(Debug)] @@ -38,6 +41,22 @@ impl NvmeNamespace { unsafe { spdk_nvme_ns_supports_compare(self.0.as_ptr()) } } + pub fn supports_deallocate(&self) -> bool { + unsafe { + spdk_nvme_ns_get_flags(self.0.as_ptr()) + & SPDK_NVME_NS_DEALLOCATE_SUPPORTED + > 0 + } + } + + pub fn supports_write_zeroes(&self) -> bool { + unsafe { + spdk_nvme_ns_get_flags(self.0.as_ptr()) + & SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED + > 0 + } + } + pub fn alignment(&self) -> u64 { unsafe { spdk_nvme_ns_get_optimal_io_boundary(self.0.as_ptr()) as u64 } } diff --git a/mayastor/src/bdev/nvmx/uri.rs b/mayastor/src/bdev/nvmx/uri.rs index 458b30f8e..ae693a2c2 100644 --- a/mayastor/src/bdev/nvmx/uri.rs +++ b/mayastor/src/bdev/nvmx/uri.rs @@ -5,6 +5,7 @@ use async_trait::async_trait; use futures::channel::{oneshot, oneshot::Sender}; +use libc; use nix::errno::Errno; use parking_lot::Mutex; use snafu::ResultExt; @@ -16,6 +17,7 @@ use std::{ sync::Arc, }; use url::Url; +use uuid::Uuid; use controller::options::NvmeControllerOpts; use poller::Poller; @@ -23,6 +25,7 @@ use spdk_sys::{ spdk_nvme_connect_async, spdk_nvme_ctrlr, spdk_nvme_ctrlr_opts, + spdk_nvme_probe_ctx, spdk_nvme_probe_poll_async, spdk_nvme_transport_id, }; @@ -48,7 +51,7 @@ use crate::{ use super::controller::transport::NvmeTransportId; const DEFAULT_NVMF_PORT: u16 = 8420; -// Callback to be called once NVMe controller is successfully created. +// Callback to be called once NVMe controller attach sequence completes. extern "C" fn connect_attach_cb( _cb_ctx: *mut c_void, _trid: *const spdk_nvme_transport_id, @@ -57,11 +60,32 @@ extern "C" fn connect_attach_cb( ) { let context = unsafe { &mut *(_cb_ctx as *const _ as *mut NvmeControllerContext) }; - controller::connected_attached_cb( - context, - SpdkNvmeController::from_ptr(ctrlr) - .expect("probe callback with NULL ptr"), - ); + + // Normally, the attach handler is called by the poller after + // the controller is connected. In such a case 'spdk_nvme_probe_poll_async' + // returns zero. However, in case of attach errors zero is also returned. + // In order to notify the polling function about successfull attach, + // we set up the flag. + assert!(!context.attached); + context.attached = true; + + // Unregister poller immediately after controller attach completes. + context.unregister_poller(); + + // Check whether controller attach failed. + if ctrlr.is_null() { + context + .sender() + .send(Err(Errno::ENXIO)) + .expect("done callback receiver side disappeared"); + } else { + // Instantiate the controller in case attach succeeded. + controller::connected_attached_cb( + context, + SpdkNvmeController::from_ptr(ctrlr) + .expect("probe callback with NULL ptr"), + ); + } } #[derive(Debug)] @@ -119,6 +143,7 @@ impl TryFrom<&Url> for NvmfDeviceTemplate { nexus_uri::BoolParamParseError { uri: url.to_string(), parameter: String::from("reftag"), + value: value.to_string(), }, )? { prchk_flags |= spdk_sys::SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; @@ -130,6 +155,7 @@ impl TryFrom<&Url> for NvmfDeviceTemplate { nexus_uri::BoolParamParseError { uri: url.to_string(), parameter: String::from("guard"), + value: value.to_string(), }, )? { prchk_flags |= spdk_sys::SPDK_NVME_IO_FLAGS_PRCHK_GUARD; @@ -169,6 +195,7 @@ pub(crate) struct NvmeControllerContext<'probe> { sender: Option>>, receiver: oneshot::Receiver>, poller: Option>, + attached: bool, } impl<'probe> NvmeControllerContext<'probe> { @@ -191,6 +218,18 @@ impl<'probe> NvmeControllerContext<'probe> { Config::get().nvme_bdev_opts.retry_count as u8, ); + if let Ok(ext_host_id) = std::env::var("MAYASTOR_NVMF_HOSTID") { + if let Ok(uuid) = Uuid::parse_str(&ext_host_id) { + opts = opts.with_ext_host_id(*uuid.as_bytes()); + if std::env::var("HOSTNQN").is_err() { + opts = opts.with_hostnqn(format!( + "nqn.2019-05.io.openebs:uuid:{}", + uuid + )); + } + } + } + if let Ok(host_nqn) = std::env::var("HOSTNQN") { opts = opts.with_hostnqn(host_nqn); } @@ -205,6 +244,7 @@ impl<'probe> NvmeControllerContext<'probe> { sender: Some(sender), receiver, poller: None, + attached: false, } } @@ -226,6 +266,7 @@ impl CreateDestroy for NvmfDeviceTemplate { type Error = NexusBdevError; async fn create(&self) -> Result { + info!("::create() {}", self.get_name()); let cname = self.get_name(); if NVME_CONTROLLERS.lookup_by_name(&cname).is_some() { return Err(NexusBdevError::BdevExists { @@ -246,28 +287,67 @@ impl CreateDestroy for NvmfDeviceTemplate { let mut context = NvmeControllerContext::new(self); // Initiate connection with remote NVMe target. - let probe_ctx = NonNull::new(unsafe { + let probe_ctx = match NonNull::new(unsafe { spdk_nvme_connect_async( context.trid.as_ptr(), context.opts.as_ptr(), Some(connect_attach_cb), ) - }); + }) { + Some(ctx) => ctx, + None => { + // Remove controller record before returning error. + NVME_CONTROLLERS.remove_by_name(&cname).unwrap(); + return Err(NexusBdevError::CreateBdev { + name: cname, + source: Errno::ENODEV, + }); + } + }; - if probe_ctx.is_none() { - // Remove controller record before returning error. - NVME_CONTROLLERS.remove_by_name(&cname).unwrap(); - return Err(NexusBdevError::CreateBdev { - name: cname, - source: Errno::ENODEV, - }); + struct AttachCtx { + probe_ctx: NonNull, + /// NvmeControllerContext required for handling of attach failures. + cb_ctx: *const spdk_nvme_ctrlr_opts, + name: String, } + let attach_cb_ctx = AttachCtx { + probe_ctx, + cb_ctx: context.opts.as_ptr(), + name: self.get_name(), + }; + let poller = poller::Builder::new() .with_name("nvme_async_probe_poller") .with_interval(1000) // poll every 1 second .with_poll_fn(move || unsafe { - spdk_nvme_probe_poll_async(probe_ctx.unwrap().as_ptr()) + let context = + &mut *(attach_cb_ctx.cb_ctx as *mut NvmeControllerContext); + + let r = spdk_nvme_probe_poll_async( + attach_cb_ctx.probe_ctx.as_ptr(), + ); + + if r != -libc::EAGAIN { + // Double check against successful attach, as we expect + // the attach handler to be called by the poller. + if !context.attached { + error!( + "{} controller attach failed", + attach_cb_ctx.name + ); + + connect_attach_cb( + attach_cb_ctx.cb_ctx as *mut c_void, + std::ptr::null(), + std::ptr::null_mut(), + std::ptr::null(), + ); + } + } + + r }) .build(); diff --git a/mayastor/src/bdev/nvmx/utils.rs b/mayastor/src/bdev/nvmx/utils.rs old mode 100755 new mode 100644 diff --git a/mayastor/src/bdev/uring.rs b/mayastor/src/bdev/uring.rs index ae1b099ba..20f7ff9ad 100644 --- a/mayastor/src/bdev/uring.rs +++ b/mayastor/src/bdev/uring.rs @@ -44,6 +44,7 @@ impl TryFrom<&Url> for Uring { value.parse().context(nexus_uri::IntParamParseError { uri: url.to_string(), parameter: String::from("blk_size"), + value: value.clone(), })? } None => 512, @@ -113,6 +114,7 @@ impl CreateDestroy for Uring { async fn destroy(self: Box) -> Result<(), Self::Error> { match Bdev::lookup_by_name(&self.name) { Some(bdev) => { + bdev.remove_alias(&self.alias); let (sender, receiver) = oneshot::channel::>(); unsafe { delete_uring_bdev( diff --git a/mayastor/src/bin/casperf.rs b/mayastor/src/bin/casperf.rs index e3af0a503..1fdc5e6d1 100644 --- a/mayastor/src/bin/casperf.rs +++ b/mayastor/src/bin/casperf.rs @@ -280,9 +280,9 @@ fn sig_override() { }; unsafe { - signal_hook::register(signal_hook::SIGTERM, handler) + signal_hook::low_level::register(signal_hook::consts::SIGTERM, handler) .expect("failed to set SIGTERM"); - signal_hook::register(signal_hook::SIGINT, handler) + signal_hook::low_level::register(signal_hook::consts::SIGINT, handler) .expect("failed to set SIGINT"); }; } diff --git a/mayastor/src/bin/mayastor-client/context.rs b/mayastor/src/bin/mayastor-client/context.rs index 310373b51..95a0b1dcc 100644 --- a/mayastor/src/bin/mayastor-client/context.rs +++ b/mayastor/src/bin/mayastor-client/context.rs @@ -30,7 +30,7 @@ pub enum Error { backtrace: Backtrace, }, #[snafu(display("Invalid output format: {}", format))] - OutputFormatError { format: String }, + OutputFormatInvalid { format: String }, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -46,7 +46,7 @@ impl FromStr for OutputFormat { match s.to_lowercase().as_str() { "json" => Ok(Self::Json), "default" => Ok(Self::Default), - s => Err(Error::OutputFormatError { + s => Err(Error::OutputFormatInvalid { format: s.to_string(), }), } @@ -102,7 +102,7 @@ impl Context { } let output = matches.value_of("output").ok_or_else(|| { - Error::OutputFormatError { + Error::OutputFormatInvalid { format: "".to_string(), } })?; diff --git a/mayastor/src/bin/mayastor-client/controller_cli.rs b/mayastor/src/bin/mayastor-client/controller_cli.rs index df330e18b..06c387635 100755 --- a/mayastor/src/bin/mayastor-client/controller_cli.rs +++ b/mayastor/src/bin/mayastor-client/controller_cli.rs @@ -12,6 +12,8 @@ use tonic::Status; pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let list = SubCommand::with_name("list").about("List existing NVMe controllers"); + let stats = SubCommand::with_name("stats") + .about("Display I/O statistics for NVMe controllers"); SubCommand::with_name("controller") .settings(&[ @@ -21,6 +23,7 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { ]) .about("NVMe controllers") .subcommand(list) + .subcommand(stats) } pub async fn handler( @@ -29,6 +32,7 @@ pub async fn handler( ) -> crate::Result<()> { match matches.subcommand() { ("list", Some(args)) => list_controllers(ctx, args).await, + ("stats", Some(args)) => controller_stats(ctx, args).await, (cmd, _) => { Err(Status::not_found(format!("command {} does not exist", cmd))) .context(GrpcStatus) @@ -48,6 +52,61 @@ fn controller_state_to_str(idx: i32) -> String { .to_string() } +async fn controller_stats( + mut ctx: Context, + _matches: &ArgMatches<'_>, +) -> crate::Result<()> { + let response = ctx + .client + .stat_nvme_controllers(rpc::Null {}) + .await + .context(GrpcStatus)?; + + match ctx.output { + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&response.get_ref()) + .unwrap() + .to_colored_json_auto() + .unwrap() + ); + } + OutputFormat::Default => { + let controllers = &response.get_ref().controllers; + if controllers.is_empty() { + ctx.v1("No NVMe controllers found"); + return Ok(()); + } + + let table: Vec> = controllers + .iter() + .map(|c| { + let stats = c.stats.as_ref().unwrap(); + + let num_read_ops = stats.num_read_ops.to_string(); + let num_write_ops = stats.num_write_ops.to_string(); + let bytes_read = stats.bytes_read.to_string(); + let bytes_written = stats.bytes_written.to_string(); + + vec![ + c.name.to_string(), + num_read_ops, + num_write_ops, + bytes_read, + bytes_written, + ] + }) + .collect(); + + let hdr = vec!["NAME", "READS", "WRITES", "READ/B", "WRITTEN/B"]; + ctx.print_list(hdr, table); + } + } + + Ok(()) +} + async fn list_controllers( mut ctx: Context, _matches: &ArgMatches<'_>, diff --git a/mayastor/src/bin/mayastor-client/nexus_cli.rs b/mayastor/src/bin/mayastor-client/nexus_cli.rs index c6051855f..c6f4bd52c 100644 --- a/mayastor/src/bin/mayastor-client/nexus_cli.rs +++ b/mayastor/src/bin/mayastor-client/nexus_cli.rs @@ -7,7 +7,7 @@ use crate::{ }; use ::rpc::mayastor as rpc; use byte_unit::Byte; -use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; +use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand}; use colored_json::ToColoredJson; use snafu::ResultExt; use tonic::{Code, Status}; @@ -35,6 +35,51 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .help("list of children to add"), ); + let create_v2 = SubCommand::with_name("create2") + .about("Create a new nexus device with NVMe options") + .arg( + Arg::with_name("name") + .required(true) + .index(1) + .help("name of the nexus"), + ) + .arg( + Arg::with_name("uuid") + .required(true) + .help("uuid for the nexus"), + ) + .arg( + Arg::with_name("size") + .required(true) + .help("size with optional unit suffix"), + ) + .arg( + Arg::with_name("min-cntlid") + .required(true) + .help("minimum NVMe controller ID for sharing over NVMf"), + ) + .arg( + Arg::with_name("max-cntlid") + .required(true) + .help("maximum NVMe controller ID"), + ) + .arg( + Arg::with_name("resv-key") + .required(true) + .help("NVMe reservation key for children"), + ) + .arg( + Arg::with_name("preempt-key") + .required(true) + .help("NVMe preempt key for children, 0 for no preemption"), + ) + .arg( + Arg::with_name("children") + .required(true) + .multiple(true) + .help("list of children to add"), + ); + let destroy = SubCommand::with_name("destroy") .about("destroy the nexus with given name") .arg( @@ -126,6 +171,16 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .takes_value(false), ); + let list2 = SubCommand::with_name("list2") + .about("list all nexus devices") + .arg( + Arg::with_name("children") + .short("c") + .long("show-children") + .required(false) + .takes_value(false), + ); + let children = SubCommand::with_name("children") .about("list nexus children") .arg( @@ -143,6 +198,7 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { ]) .about("Nexus device management") .subcommand(create) + .subcommand(create_v2) .subcommand(destroy) .subcommand(publish) .subcommand(add) @@ -150,6 +206,7 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .subcommand(unpublish) .subcommand(ana_state) .subcommand(list) + .subcommand(list2) .subcommand(children) .subcommand(nexus_child_cli::subcommands()) } @@ -160,8 +217,10 @@ pub async fn handler( ) -> crate::Result<()> { match matches.subcommand() { ("create", Some(args)) => nexus_create(ctx, args).await, + ("create2", Some(args)) => nexus_create_v2(ctx, args).await, ("destroy", Some(args)) => nexus_destroy(ctx, args).await, ("list", Some(args)) => nexus_list(ctx, args).await, + ("list2", Some(args)) => nexus_list_v2(ctx, args).await, ("children", Some(args)) => nexus_children(ctx, args).await, ("publish", Some(args)) => nexus_publish(ctx, args).await, ("unpublish", Some(args)) => nexus_unpublish(ctx, args).await, @@ -176,10 +235,13 @@ pub async fn handler( } } -async fn nexus_create( - mut ctx: Context, +fn nexus_create_parse( matches: &ArgMatches<'_>, -) -> crate::Result<()> { +) -> crate::Result<( + ::prost::alloc::string::String, + u64, + ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +)> { let uuid = matches.value_of("uuid").unwrap().to_string(); let size = parse_size(matches.value_of("size").ok_or_else(|| { Error::MissingValue { @@ -196,6 +258,14 @@ async fn nexus_create( .map(|c| c.to_string()) .collect::>(); let size = size.get_bytes() as u64; + Ok((uuid, size, children)) +} + +async fn nexus_create( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> crate::Result<()> { + let (uuid, size, children) = nexus_create_parse(matches)?; let response = ctx .client @@ -225,6 +295,54 @@ async fn nexus_create( Ok(()) } +async fn nexus_create_v2( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> crate::Result<()> { + let (uuid, size, children) = nexus_create_parse(matches)?; + let name = matches.value_of("name").unwrap().to_string(); + let min_cntl_id = value_t!(matches.value_of("min-cntlid"), u32) + .unwrap_or_else(|e| e.exit()); + let max_cntl_id = value_t!(matches.value_of("max-cntlid"), u32) + .unwrap_or_else(|e| e.exit()); + let resv_key = value_t!(matches.value_of("resv-key"), u64) + .unwrap_or_else(|e| e.exit()); + let preempt_key = value_t!(matches.value_of("preempt-key"), u64) + .unwrap_or_else(|e| e.exit()); + + let response = ctx + .client + .create_nexus_v2(rpc::CreateNexusV2Request { + name: name.clone(), + uuid: uuid.clone(), + size, + min_cntl_id, + max_cntl_id, + resv_key, + preempt_key, + children, + }) + .await + .context(GrpcStatus)?; + + match ctx.output { + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&response.get_ref()) + .unwrap() + .to_colored_json_auto() + .unwrap() + ); + } + OutputFormat::Default => { + println!("{}", &response.get_ref().uuid); + } + }; + + Ok(()) +} + async fn nexus_destroy( mut ctx: Context, matches: &ArgMatches<'_>, @@ -322,6 +440,73 @@ async fn nexus_list( Ok(()) } +async fn nexus_list_v2( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> crate::Result<()> { + let response = ctx + .client + .list_nexus_v2(rpc::Null {}) + .await + .context(GrpcStatus)?; + + match ctx.output { + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&response.get_ref()) + .unwrap() + .to_colored_json_auto() + .unwrap() + ); + } + OutputFormat::Default => { + let nexus = &response.get_ref().nexus_list; + if nexus.is_empty() { + ctx.v1("No nexus found"); + return Ok(()); + } + + ctx.v2("Found following nexus:"); + let show_child = matches.is_present("children"); + + let table = nexus + .iter() + .map(|n| { + let size = ctx.units(Byte::from_bytes(n.size.into())); + let state = nexus_state_to_str(n.state); + let mut row = vec![ + n.name.clone(), + n.uuid.clone(), + size, + state.to_string(), + n.rebuilds.to_string(), + n.device_uri.clone(), + ]; + if show_child { + row.push( + n.children + .iter() + .map(|c| c.uri.clone()) + .collect::>() + .join(","), + ) + } + row + }) + .collect(); + let mut hdr = + vec!["NAME", "UUID", ">SIZE", "STATE", ">REBUILDS", "PATH"]; + if show_child { + hdr.push("CHILDREN"); + } + ctx.print_list(hdr, table); + } + }; + + Ok(()) +} + async fn nexus_children( mut ctx: Context, matches: &ArgMatches<'_>, diff --git a/mayastor/src/bin/mayastor-client/replica_cli.rs b/mayastor/src/bin/mayastor-client/replica_cli.rs index 844973c09..16cecc735 100644 --- a/mayastor/src/bin/mayastor-client/replica_cli.rs +++ b/mayastor/src/bin/mayastor-client/replica_cli.rs @@ -19,11 +19,10 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .required(true) .index(1) .help("Storage pool name")) - .arg( - Arg::with_name("uuid") + Arg::with_name("name") .required(true).index(2) - .help("Unique replica uuid")) + .help("Replica name")) .arg( Arg::with_name("protocol") @@ -47,6 +46,44 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .takes_value(false) .help("Whether replica is thin provisioned (default false)")); + let create_v2 = SubCommand::with_name("create2") + .about("Create replica on pool") + .arg( + Arg::with_name("pool") + .required(true) + .index(1) + .help("Storage pool name")) + .arg( + Arg::with_name("name") + .required(true) + .index(2) + .help("Replica name")) + .arg( + Arg::with_name("uuid") + .required(true).index(3) + .help("Unique replica uuid")) + .arg( + Arg::with_name("protocol") + .short("p") + .long("protocol") + .takes_value(true) + .value_name("PROTOCOL") + .help("Name of a protocol (nvmf, iSCSI) used for sharing the replica (default none)")) + .arg( + Arg::with_name("size") + .short("s") + .long("size") + .takes_value(true) + .required(true) + .value_name("NUMBER") + .help("Size of the replica")) + .arg( + Arg::with_name("thin") + .short("t") + .long("thin") + .takes_value(false) + .help("Whether replica is thin provisioned (default false)")); + let destroy = SubCommand::with_name("destroy") .about("Destroy replica") .arg( @@ -58,10 +95,10 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let share = SubCommand::with_name("share").about("Share or unshare replica") .arg( - Arg::with_name("uuid") + Arg::with_name("name") .required(true) .index(1) - .help("Replica uuid")) + .help("Replica name")) .arg( Arg::with_name("protocol") .required(true) @@ -76,9 +113,11 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { ]) .about("Replica management") .subcommand(create) + .subcommand(create_v2) .subcommand(destroy) .subcommand(share) .subcommand(SubCommand::with_name("list").about("List replicas")) + .subcommand(SubCommand::with_name("list2").about("List replicas")) .subcommand( SubCommand::with_name("stats").about("IO stats of replicas"), ) @@ -90,8 +129,10 @@ pub async fn handler( ) -> crate::Result<()> { match matches.subcommand() { ("create", Some(args)) => replica_create(ctx, args).await, + ("create2", Some(args)) => replica_create_v2(ctx, args).await, ("destroy", Some(args)) => replica_destroy(ctx, args).await, ("list", Some(args)) => replica_list(ctx, args).await, + ("list2", Some(args)) => replica_list2(ctx, args).await, ("share", Some(args)) => replica_share(ctx, args).await, ("stats", Some(args)) => replica_stat(ctx, args).await, (cmd, _) => { @@ -111,6 +152,66 @@ async fn replica_create( field: "pool".to_string(), })? .to_owned(); + let name = matches + .value_of("name") + .ok_or_else(|| Error::MissingValue { + field: "name".to_string(), + })? + .to_owned(); + let size = parse_size(matches.value_of("size").ok_or_else(|| { + Error::MissingValue { + field: "size".to_string(), + } + })?) + .map_err(|s| Status::invalid_argument(format!("Bad size '{}'", s))) + .context(GrpcStatus)?; + let thin = matches.is_present("thin"); + let share = parse_replica_protocol(matches.value_of("protocol")) + .context(GrpcStatus)?; + + let rq = rpc::CreateReplicaRequest { + uuid: name.clone(), + pool, + thin, + share, + size: size.get_bytes() as u64, + }; + let response = ctx.client.create_replica(rq).await.context(GrpcStatus)?; + + match ctx.output { + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&response.get_ref()) + .unwrap() + .to_colored_json_auto() + .unwrap() + ); + } + OutputFormat::Default => { + println!("{}", &response.get_ref().uri); + } + }; + + Ok(()) +} + +async fn replica_create_v2( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> crate::Result<()> { + let pool = matches + .value_of("pool") + .ok_or_else(|| Error::MissingValue { + field: "pool".to_string(), + })? + .to_owned(); + let name = matches + .value_of("name") + .ok_or_else(|| Error::MissingValue { + field: "name".to_string(), + })? + .to_owned(); let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { @@ -128,14 +229,16 @@ async fn replica_create( let share = parse_replica_protocol(matches.value_of("protocol")) .context(GrpcStatus)?; - let rq = rpc::CreateReplicaRequest { + let rq = rpc::CreateReplicaRequestV2 { + name, uuid: uuid.clone(), pool, thin, share, size: size.get_bytes() as u64, }; - let response = ctx.client.create_replica(rq).await.context(GrpcStatus)?; + let response = + ctx.client.create_replica_v2(rq).await.context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { @@ -244,18 +347,71 @@ async fn replica_list( Ok(()) } +async fn replica_list2( + mut ctx: Context, + _matches: &ArgMatches<'_>, +) -> crate::Result<()> { + let response = ctx + .client + .list_replicas_v2(rpc::Null {}) + .await + .context(GrpcStatus)?; + + match ctx.output { + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&response.get_ref()) + .unwrap() + .to_colored_json_auto() + .unwrap() + ); + } + OutputFormat::Default => { + let replicas = &response.get_ref().replicas; + if replicas.is_empty() { + ctx.v1("No replicas found"); + return Ok(()); + } + + let table = replicas + .iter() + .map(|r| { + let proto = replica_protocol_to_str(r.share); + let size = ctx.units(Byte::from_bytes(r.size.into())); + vec![ + r.pool.clone(), + r.name.clone(), + r.uuid.clone(), + r.thin.to_string(), + proto.to_string(), + size, + r.uri.clone(), + ] + }) + .collect(); + ctx.print_list( + vec!["POOL", "NAME", "UUID", ">THIN", ">SHARE", ">SIZE", "URI"], + table, + ); + } + }; + + Ok(()) +} + async fn replica_share( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { - let uuid = matches.value_of("uuid").unwrap().to_owned(); + let name = matches.value_of("name").unwrap().to_owned(); let share = parse_replica_protocol(matches.value_of("protocol")) .context(GrpcStatus)?; let response = ctx .client .share_replica(rpc::ShareReplicaRequest { - uuid: uuid.clone(), + uuid: name.clone(), share, }) .await diff --git a/mayastor/src/core/bdev.rs b/mayastor/src/core/bdev.rs index 57e5ad486..213011c3a 100644 --- a/mayastor/src/core/bdev.rs +++ b/mayastor/src/core/bdev.rs @@ -143,6 +143,18 @@ impl Share for Bdev { } None } + + /// return the URI that was used to construct the bdev, without uuid + fn bdev_uri_original(&self) -> Option { + for alias in self.aliases().iter() { + if let Ok(uri) = url::Url::parse(alias) { + if self == uri { + return Some(uri.to_string()); + } + } + } + None + } } impl Bdev { @@ -347,7 +359,7 @@ impl Bdev { let mut ent_ptr = head.tqh_first; while !ent_ptr.is_null() { let ent = unsafe { &*ent_ptr }; - let alias = unsafe { CStr::from_ptr(ent.alias) }; + let alias = unsafe { CStr::from_ptr(ent.alias.name) }; aliases.push(alias.to_str().unwrap().to_string()); ent_ptr = ent.tailq.tqe_next; } @@ -473,12 +485,13 @@ impl Debug for Bdev { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { write!( f, - "name: {}, driver: {}, product: {}, num_blocks: {}, block_len: {}", + "name: {}, driver: {}, product: {}, num_blocks: {}, block_len: {}, alignment: {}", self.name(), self.driver(), self.product_name(), self.num_blocks(), - self.block_len() + self.block_len(), + self.alignment(), ) } } diff --git a/mayastor/src/core/block_device.rs b/mayastor/src/core/block_device.rs old mode 100755 new mode 100644 index 3ce5fd022..166fde7fa --- a/mayastor/src/core/block_device.rs +++ b/mayastor/src/core/block_device.rs @@ -180,9 +180,30 @@ pub trait BlockDeviceHandle { _register_action: u8, _cptpl: u8, ) -> Result<(), CoreError> { - Err(CoreError::NvmeIoPassthruDispatch { + Err(CoreError::NotSupported { + source: Errno::EOPNOTSUPP, + }) + } + + async fn nvme_resv_acquire( + &self, + _current_key: u64, + _preempt_key: u64, + _acquire_action: u8, + _resv_type: u8, + ) -> Result<(), CoreError> { + Err(CoreError::NotSupported { + source: Errno::EOPNOTSUPP, + }) + } + + async fn nvme_resv_report( + &self, + _cdw11: u32, + _buffer: &mut DmaBuf, + ) -> Result<(), CoreError> { + Err(CoreError::NotSupported { source: Errno::EOPNOTSUPP, - opcode: 0, // FIXME }) } @@ -196,6 +217,12 @@ pub trait BlockDeviceHandle { opcode: nvme_cmd.opc(), }) } + + async fn host_id(&self) -> Result<[u8; 16], CoreError> { + Err(CoreError::NotSupported { + source: Errno::EOPNOTSUPP, + }) + } } pub trait LbaRangeController {} @@ -237,4 +264,5 @@ pub enum DeviceEventType { DeviceRemoved, DeviceResized, MediaManagement, + AdminCommandCompletionFailed, } diff --git a/mayastor/src/core/channel.rs b/mayastor/src/core/channel.rs index 8e5a6e6e3..d9a3f9753 100644 --- a/mayastor/src/core/channel.rs +++ b/mayastor/src/core/channel.rs @@ -1,9 +1,11 @@ -use std::{ - fmt::{Debug, Error, Formatter}, - os::raw::c_char, -}; +use std::fmt::{Debug, Error, Formatter}; -use spdk_sys::{spdk_io_channel, spdk_put_io_channel}; +use spdk_sys::{ + spdk_io_channel, + spdk_io_channel_get_io_device_name, + spdk_put_io_channel, + spdk_thread_get_name, +}; use std::ptr::NonNull; pub struct IoChannel(NonNull); @@ -24,13 +26,9 @@ impl IoChannel { /// actual name fn name(&self) -> &str { unsafe { - // struct is opaque - std::ffi::CStr::from_ptr( - (*(self.0.as_ptr())) - .dev - .add(std::mem::size_of::<*mut spdk_io_channel>()) - as *const c_char, - ) + std::ffi::CStr::from_ptr(spdk_io_channel_get_io_device_name( + self.0.as_ptr(), + )) .to_str() .unwrap() } @@ -38,9 +36,11 @@ impl IoChannel { fn thread_name(&self) -> &str { unsafe { - std::ffi::CStr::from_ptr(&(*self.0.as_ref().thread).name[0]) - .to_str() - .unwrap() + std::ffi::CStr::from_ptr(spdk_thread_get_name( + self.0.as_ref().thread, + )) + .to_str() + .unwrap() } } } diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 55d400bf6..8a5c2c60d 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -40,6 +40,7 @@ use crate::{ core::{ reactor::{Reactor, ReactorState, Reactors}, Cores, + MayastorFeatures, Mthread, }, grpc, @@ -133,6 +134,24 @@ pub struct MayastorCliArgs { pub nvme_ctl_io_ctx_pool_size: u64, } +/// Mayastor features. +impl MayastorFeatures { + fn init_features() -> MayastorFeatures { + let ana = match std::env::var("NEXUS_NVMF_ANA_ENABLE") { + Ok(s) => s == "1", + Err(_) => false, + }; + + MayastorFeatures { + asymmetric_namespace_access: ana, + } + } + + pub fn get_features() -> Self { + MAYASTOR_FEATURES.get_or_init(Self::init_features).clone() + } +} + /// Defaults are redefined here in case of using it during tests impl Default for MayastorCliArgs { fn default() -> Self { @@ -295,6 +314,7 @@ async fn do_shutdown(arg: *mut c_void) { iscsi::fini(); nexus::nexus_children_to_destroying_state().await; + crate::lvs::Lvs::export_all().await; unsafe { spdk_rpc_finish(); spdk_subsystem_fini(Some(reactors_stop), arg); @@ -344,6 +364,8 @@ struct SubsystemCtx { sender: futures::channel::oneshot::Sender, } +static MAYASTOR_FEATURES: OnceCell = OnceCell::new(); + static MAYASTOR_DEFAULT_ENV: OnceCell = OnceCell::new(); impl MayastorEnvironment { pub fn new(args: MayastorCliArgs) -> Self { @@ -386,16 +408,18 @@ impl MayastorEnvironment { /// configure signal handling fn install_signal_handlers(&self) { unsafe { - signal_hook::register(signal_hook::SIGTERM, || { - mayastor_signal_handler(1) - }) + signal_hook::low_level::register( + signal_hook::consts::SIGTERM, + || mayastor_signal_handler(1), + ) } .unwrap(); unsafe { - signal_hook::register(signal_hook::SIGINT, || { - mayastor_signal_handler(1) - }) + signal_hook::low_level::register( + signal_hook::consts::SIGINT, + || mayastor_signal_handler(1), + ) } .unwrap(); } diff --git a/mayastor/src/core/handle.rs b/mayastor/src/core/handle.rs index eaf39b7a2..3dad630e9 100644 --- a/mayastor/src/core/handle.rs +++ b/mayastor/src/core/handle.rs @@ -16,6 +16,7 @@ use spdk_sys::{ spdk_bdev_read, spdk_bdev_reset, spdk_bdev_write, + spdk_bdev_write_zeroes, spdk_io_channel, }; @@ -213,6 +214,41 @@ impl BdevHandle { } } + pub async fn write_zeroes_at( + &self, + offset: u64, + len: u64, + ) -> Result<(), CoreError> { + let (s, r) = oneshot::channel::(); + let errno = unsafe { + spdk_bdev_write_zeroes( + self.desc.as_ptr(), + self.channel.as_ptr(), + offset, + len, + Some(Self::io_completion_cb), + cb_arg(s), + ) + }; + + if errno != 0 { + return Err(CoreError::WriteZeroesDispatch { + source: Errno::from_i32(errno.abs()), + offset, + len, + }); + } + + if r.await.expect("Failed awaiting write zeroes IO") { + Ok(()) + } else { + Err(CoreError::WriteZeroesFailed { + offset, + len, + }) + } + } + /// create a snapshot, only works for nvme bdev /// returns snapshot time as u64 seconds since Unix epoch pub async fn create_snapshot(&self) -> Result { diff --git a/mayastor/src/core/io_device.rs b/mayastor/src/core/io_device.rs old mode 100755 new mode 100644 diff --git a/mayastor/src/core/mempool.rs b/mayastor/src/core/mempool.rs old mode 100755 new mode 100644 diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index 3e7f4d671..68e4942a6 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -40,6 +40,10 @@ pub use io_device::IoDevice; pub use nvme::{ nvme_admin_opc, nvme_nvm_opcode, + nvme_reservation_acquire_action, + nvme_reservation_register_action, + nvme_reservation_register_cptpl, + nvme_reservation_type, GenericStatusCode, NvmeCommandStatus, NvmeStatus, @@ -136,6 +140,16 @@ pub enum CoreError { offset: u64, len: u64, }, + #[snafu(display( + "Failed to dispatch write-zeroes at offset {} length {}", + offset, + len + ))] + WriteZeroesDispatch { + source: Errno, + offset: u64, + len: u64, + }, #[snafu(display( "Failed to dispatch NVMe IO passthru command {:x}h: {}", opcode, @@ -157,6 +171,15 @@ pub enum CoreError { }, #[snafu(display("Reset failed"))] ResetFailed {}, + #[snafu(display( + "Write zeroes failed at offset {} length {}", + offset, + len + ))] + WriteZeroesFailed { + offset: u64, + len: u64, + }, #[snafu(display("NVMe Admin command {:x}h failed", opcode))] NvmeAdminFailed { opcode: u16, @@ -282,7 +305,7 @@ impl MayastorWorkQueue { } pub fn take(&self) -> Option { - if let Ok(elem) = self.incoming.pop() { + if let Some(elem) = self.incoming.pop() { return Some(elem); } None @@ -291,3 +314,8 @@ impl MayastorWorkQueue { pub static MWQ: once_cell::sync::Lazy> = once_cell::sync::Lazy::new(MayastorWorkQueue::new); + +#[derive(Debug, Clone)] +pub struct MayastorFeatures { + pub asymmetric_namespace_access: bool, +} diff --git a/mayastor/src/core/nvme.rs b/mayastor/src/core/nvme.rs index 80dfd248d..976f403a0 100644 --- a/mayastor/src/core/nvme.rs +++ b/mayastor/src/core/nvme.rs @@ -237,11 +237,38 @@ pub mod nvme_nvm_opcode { // pub const WRITE_ZEROES: u8 = 0x08; // pub const DATASET_MANAGEMENT: u8 = 0x09; pub const RESERVATION_REGISTER: u8 = 0x0d; - // pub const RESERVATION_REPORT: u8 = 0x0e; - // pub const RESERVATION_ACQUIRE: u8 = 0x11; + pub const RESERVATION_REPORT: u8 = 0x0e; + pub const RESERVATION_ACQUIRE: u8 = 0x11; // pub const RESERVATION_RELEASE: u8 = 0x15; } +pub mod nvme_reservation_type { + pub const WRITE_EXCLUSIVE: u8 = 0x1; + pub const EXCLUSIVE_ACCESS: u8 = 0x2; + pub const WRITE_EXCLUSIVE_REG_ONLY: u8 = 0x3; + pub const EXCLUSIVE_ACCESS_REG_ONLY: u8 = 0x4; + pub const WRITE_EXCLUSIVE_ALL_REGS: u8 = 0x5; + pub const EXCLUSIVE_ACCESS_ALL_REGS: u8 = 0x6; +} + +pub mod nvme_reservation_register_action { + pub const REGISTER_KEY: u8 = 0x0; + pub const UNREGISTER_KEY: u8 = 0x1; + pub const REPLACE_KEY: u8 = 0x2; +} + +pub mod nvme_reservation_register_cptpl { + pub const NO_CHANGES: u8 = 0x0; + pub const CLEAR_POWER_ON: u8 = 0x2; + pub const PERSIST_POWER_LOSS: u8 = 0x2; +} + +pub mod nvme_reservation_acquire_action { + pub const ACQUIRE: u8 = 0x0; + pub const PREEMPT: u8 = 0x1; + pub const PREEMPT_ABORT: u8 = 0x2; +} + impl NvmeCommandStatus { pub fn from_command_status_raw(sct: i32, sc: i32) -> Self { match StatusCodeType::from(sct) { diff --git a/mayastor/src/core/reactor.rs b/mayastor/src/core/reactor.rs index 4ea3ad3d5..fd05fb51a 100644 --- a/mayastor/src/core/reactor.rs +++ b/mayastor/src/core/reactor.rs @@ -459,7 +459,7 @@ impl Reactor { }); drop(threads); - while let Ok(i) = self.incoming.pop() { + while let Some(i) = self.incoming.pop() { self.threads.borrow_mut().push_back(i); } } @@ -481,7 +481,7 @@ impl Reactor { self.run_futures(); drop(threads); - while let Ok(i) = self.incoming.pop() { + while let Some(i) = self.incoming.pop() { self.threads.borrow_mut().push_back(i); } } diff --git a/mayastor/src/core/share.rs b/mayastor/src/core/share.rs index a108d4f31..a786e2e17 100644 --- a/mayastor/src/core/share.rs +++ b/mayastor/src/core/share.rs @@ -55,4 +55,5 @@ pub trait Share: std::fmt::Debug { fn shared(&self) -> Option; fn share_uri(&self) -> Option; fn bdev_uri(&self) -> Option; + fn bdev_uri_original(&self) -> Option; } diff --git a/mayastor/src/core/thread.rs b/mayastor/src/core/thread.rs index 46f983112..df70c48e5 100644 --- a/mayastor/src/core/thread.rs +++ b/mayastor/src/core/thread.rs @@ -9,6 +9,8 @@ use spdk_sys::{ spdk_thread_destroy, spdk_thread_exit, spdk_thread_get_by_id, + spdk_thread_get_id, + spdk_thread_get_name, spdk_thread_is_exited, spdk_thread_poll, spdk_thread_send_msg, @@ -65,7 +67,7 @@ impl Mthread { } pub fn id(&self) -> u64 { - unsafe { (self.0.as_ref()).id } + unsafe { spdk_thread_get_id(self.0.as_ptr()) } } /// /// # Note @@ -104,7 +106,7 @@ impl Mthread { pub fn name(&self) -> &str { unsafe { - std::ffi::CStr::from_ptr(&self.0.as_ref().name[0]) + std::ffi::CStr::from_ptr(spdk_thread_get_name(self.0.as_ptr())) .to_str() .unwrap() } @@ -214,7 +216,7 @@ impl Mthread { .expect("sender already taken") .send(result) { - error!("Failed to send with error {:?}", e); + error!("Failed to send response future result {:?}", e); } }) .detach(); diff --git a/mayastor/src/grpc/controller_grpc.rs b/mayastor/src/grpc/controller_grpc.rs old mode 100755 new mode 100644 index 0867958ae..85718a46e --- a/mayastor/src/grpc/controller_grpc.rs +++ b/mayastor/src/grpc/controller_grpc.rs @@ -5,10 +5,13 @@ use crate::{ NvmeControllerState, NVME_CONTROLLERS, }, + core::{BlockDeviceIoStats, CoreError}, + ffihelper::{cb_arg, done_cb}, grpc::{rpc_submit, GrpcResult}, }; use ::rpc::mayastor as rpc; +use futures::channel::oneshot; use std::convert::From; use tonic::{Response, Status}; @@ -48,6 +51,68 @@ impl From for rpc::NvmeControllerState { } } +impl From for rpc::NvmeControllerIoStats { + fn from(b: BlockDeviceIoStats) -> Self { + Self { + num_read_ops: b.num_read_ops, + num_write_ops: b.num_write_ops, + bytes_read: b.bytes_read, + bytes_written: b.bytes_written, + num_unmap_ops: b.num_unmap_ops, + bytes_unmapped: b.bytes_unmapped, + } + } +} + +pub async fn controller_stats() -> GrpcResult { + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + let mut res: Vec = Vec::new(); + let controllers = NVME_CONTROLLERS.controllers(); + for name in controllers.iter() { + if let Some(ctrlr) = NVME_CONTROLLERS.lookup_by_name(name) { + let (s, r) = + oneshot::channel::>(); + + { + let ctrlr = ctrlr.lock(); + + if let Err(e) = ctrlr.get_io_stats( + |stats, ch| { + done_cb(ch, stats); + }, + cb_arg(s), + ) { + error!( + "{}: failed to get stats for NVMe controller: {:?}", + name, e + ); + continue; + } + } + + let stats = r.await.expect("Failed awaiting at io_stats"); + if stats.is_err() { + error!("{}: failed to get stats for NVMe controller", name); + } else { + res.push(rpc::NvmeControllerStats { + name: name.to_string(), + stats: stats.ok().map(rpc::NvmeControllerIoStats::from), + }); + } + } + } + + Ok(rpc::StatNvmeControllersReply { + controllers: res, + }) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) +} + pub async fn list_controllers() -> GrpcResult { let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { let controllers = NVME_CONTROLLERS diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 9a4697717..138eb9844 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -12,11 +12,20 @@ use crate::{ bdev::{ nexus::{instances, nexus_bdev}, nexus_create, + nexus_create_v2, Reason, }, - core::{Bdev, BlockDeviceIoStats, CoreError, Protocol, Share}, + core::{ + Bdev, + BlockDeviceIoStats, + CoreError, + MayastorFeatures, + Protocol, + Share, + }, grpc::{ - controller_grpc::list_controllers, + controller_grpc::{controller_stats, list_controllers}, + mayastor_grpc::nexus_bdev::NexusNvmeParams, nexus_grpc::{ nexus_add_child, nexus_destroy, @@ -24,6 +33,7 @@ use crate::{ uuid_to_name, }, rpc_submit, + GrpcClientContext, GrpcResult, Serializer, }, @@ -32,36 +42,76 @@ use crate::{ nexus_uri::NexusBdevError, subsys::PoolConfig, }; +use futures::FutureExt; use nix::errno::Errno; use rpc::mayastor::*; -use std::{convert::TryFrom, ops::Deref, time::Duration}; +use std::{convert::TryFrom, fmt::Debug, ops::Deref, time::Duration}; use tonic::{Request, Response, Status}; #[derive(Debug)] struct UnixStream(tokio::net::UnixStream); +use ::function_name::named; +use git_version::git_version; +use std::panic::AssertUnwindSafe; + +impl GrpcClientContext { + #[track_caller] + pub fn new(req: &Request, fid: &str) -> Self + where + T: Debug, + { + Self { + args: format!("{:?}", req.get_ref()), + id: fid.to_string(), + } + } +} + #[derive(Debug)] pub struct MayastorSvc { name: String, interval: Duration, - rw_lock: tokio::sync::RwLock, + rw_lock: tokio::sync::RwLock>, } #[async_trait::async_trait] -impl Serializer for MayastorSvc +impl Serializer for MayastorSvc where - F: core::future::Future + Send + 'static, - R: Send, + T: Send + 'static, + F: core::future::Future> + Send + 'static, { - async fn locked(&self, f: F) -> R { - let mut lock = self.rw_lock.write().await; - trace!(?self.name, "locked"); - assert!(!*lock); - *lock = true; - let output = f.await; - tokio::time::sleep(self.interval).await; - trace!(?self.name, "unlocked"); - *lock = false; - output + async fn locked(&self, ctx: GrpcClientContext, f: F) -> Result { + let mut guard = self.rw_lock.write().await; + + // Store context as a marker of to detect abnormal termination of the + // request. Even though AssertUnwindSafe() allows us to + // intercept asserts in underlying method strategies, such a + // situation can still happen when the high-level future that + // represents gRPC call at the highest level (i.e. the one created + // by gRPC server) gets cancelled (due to timeout or somehow else). + // This can't be properly intercepted by 'locked' function itself in the + // first place, so the state needs to be cleaned up properly + // upon subsequent gRPC calls. + if let Some(c) = guard.replace(ctx) { + warn!("{}: gRPC method timed out, args: {}", c.id, c.args); + } + + let fut = AssertUnwindSafe(f).catch_unwind(); + let r = fut.await; + + // Request completed, remove the marker. + let ctx = guard.take().expect("gRPC context disappeared"); + + match r { + Ok(r) => r, + Err(_e) => { + warn!("{}: gRPC method panicked, args: {}", ctx.id, ctx.args); + Err(Status::cancelled(format!( + "{}: gRPC method panicked", + ctx.id + ))) + } + } } } @@ -70,7 +120,7 @@ impl MayastorSvc { Self { name: String::from("CSISvc"), interval, - rw_lock: tokio::sync::RwLock::new(false), + rw_lock: tokio::sync::RwLock::new(None), } } } @@ -154,87 +204,126 @@ impl From for Replica { } } +impl From for ReplicaV2 { + fn from(l: Lvol) -> Self { + Self { + name: l.name(), + uuid: l.uuid(), + pool: l.pool(), + thin: l.is_thin(), + size: l.size(), + share: l.shared().unwrap().into(), + uri: l.share_uri().unwrap(), + } + } +} + +impl From for rpc::mayastor::MayastorFeatures { + fn from(f: MayastorFeatures) -> Self { + Self { + asymmetric_namespace_access: f.asymmetric_namespace_access, + } + } +} + #[tonic::async_trait] impl mayastor_server::Mayastor for MayastorSvc { + #[named] async fn create_pool( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - - if args.disks.is_empty() { - return Err(Status::invalid_argument("Missing devices")); - } + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); - let rx = rpc_submit::<_, _, LvsError>(async move { - let pool = Lvs::create_or_import(args).await?; - // Capture current pool config and export to file. - PoolConfig::capture().export().await; - Ok(Pool::from(pool)) - })?; + if args.disks.is_empty() { + return Err(Status::invalid_argument("Missing devices")); + } - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + let rx = rpc_submit::<_, _, LvsError>(async move { + let pool = Lvs::create_or_import(args).await?; + // Capture current pool config and export to file. + PoolConfig::capture().export().await; + Ok(Pool::from(pool)) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn destroy_pool( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - let rx = rpc_submit::<_, _, LvsError>(async move { - if let Some(pool) = Lvs::lookup(&args.name) { - // Remove pool from current config and export to file. - // Do this BEFORE we actually destroy the pool. - let mut config = PoolConfig::capture(); - config.delete(&args.name); - config.export().await; - - pool.destroy().await?; - } - Ok(Null {}) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + info!("{:?}", args); + let rx = rpc_submit::<_, _, LvsError>(async move { + if let Some(pool) = Lvs::lookup(&args.name) { + // Remove pool from current config and export to file. + // Do this BEFORE we actually destroy the pool. + let mut config = PoolConfig::capture(); + config.delete(&args.name); + config.export().await; + + pool.destroy().await?; + } + Ok(Null {}) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn list_pools( &self, - _request: Request, + request: Request, ) -> GrpcResult { - self.locked(async move { - let rx = rpc_submit::<_, _, LvsError>(async move { - Ok(ListPoolsReply { - pools: Lvs::iter().map(|l| l.into()).collect::>(), - }) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let rx = rpc_submit::<_, _, LvsError>(async move { + Ok(ListPoolsReply { + pools: Lvs::iter() + .map(|l| l.into()) + .collect::>(), + }) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn create_replica( &self, request: Request, ) -> GrpcResult { - self.locked(async move { + self.locked(GrpcClientContext::new(&request, function_name!()), async move { let rx = rpc_submit(async move { let args = request.into_inner(); + if Lvs::lookup(&args.pool).is_none() { return Err(LvsError::Invalid { source: Errno::ENOSYS, @@ -257,7 +346,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } let p = Lvs::lookup(&args.pool).unwrap(); - match p.create_lvol(&args.uuid, args.size, false).await { + match p.create_lvol(&args.uuid, args.size, None, false).await { Ok(lvol) if Protocol::try_from(args.share)? == Protocol::Nvmf => { @@ -292,11 +381,80 @@ impl mayastor_server::Mayastor for MayastorSvc { }).await } + #[named] + async fn create_replica_v2( + &self, + request: Request, + ) -> GrpcResult { + self.locked(GrpcClientContext::new(&request, function_name!()), async move { + let rx = rpc_submit(async move { + let args = request.into_inner(); + + let lvs = match Lvs::lookup(&args.pool) { + Some(lvs) => lvs, + None => { + return Err(LvsError::Invalid { + source: Errno::ENOSYS, + msg: format!("Pool {} not found", args.pool), + }) + } + }; + + if let Some(b) = Bdev::lookup_by_name(&args.name) { + let lvol = Lvol::try_from(b)?; + return Ok(ReplicaV2::from(lvol)); + } + + if !matches!( + Protocol::try_from(args.share)?, + Protocol::Off | Protocol::Nvmf + ) { + return Err(LvsError::ReplicaShareProtocol { + value: args.share, + }); + } + + match lvs.create_lvol(&args.name, args.size, Some(&args.uuid), false).await { + Ok(lvol) + if Protocol::try_from(args.share)? == Protocol::Nvmf => + { + match lvol.share_nvmf(None).await { + Ok(s) => { + debug!("created and shared {} as {}", lvol, s); + Ok(ReplicaV2::from(lvol)) + } + Err(e) => { + debug!( + "failed to share created lvol {}: {} (destroying)", + lvol, + e.to_string() + ); + let _ = lvol.destroy().await; + Err(e) + } + } + } + Ok(lvol) => { + debug!("created lvol {}", lvol); + Ok(ReplicaV2::from(lvol)) + } + Err(e) => Err(e), + } + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }).await + } + + #[named] async fn destroy_replica( &self, request: Request, ) -> GrpcResult { - self.locked(async { + self.locked(GrpcClientContext::new(&request, function_name!()), async { let args = request.into_inner(); let rx = rpc_submit::<_, _, LvsError>(async move { if let Some(bdev) = Bdev::lookup_by_name(&args.uuid) { @@ -314,11 +472,12 @@ impl mayastor_server::Mayastor for MayastorSvc { .await } + #[named] async fn list_replicas( &self, - _request: Request, + request: Request, ) -> GrpcResult { - self.locked(async { + self.locked(GrpcClientContext::new(&request, function_name!()), async { let rx = rpc_submit::<_, _, LvsError>(async move { let mut replicas = Vec::new(); if let Some(bdev) = Bdev::bdev_first() { @@ -342,6 +501,35 @@ impl mayastor_server::Mayastor for MayastorSvc { .await } + #[named] + async fn list_replicas_v2( + &self, + request: Request, + ) -> GrpcResult { + self.locked(GrpcClientContext::new(&request, function_name!()), async { + let rx = rpc_submit::<_, _, LvsError>(async move { + let mut replicas = Vec::new(); + if let Some(bdev) = Bdev::bdev_first() { + replicas = bdev + .into_iter() + .filter(|b| b.driver() == "lvol") + .map(|b| ReplicaV2::from(Lvol::try_from(b).unwrap())) + .collect(); + } + + Ok(ListReplicasReplyV2 { + replicas, + }) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }) + .await + } + // TODO; lost track of what this is supposed to do async fn stat_replicas( &self, @@ -380,110 +568,161 @@ impl mayastor_server::Mayastor for MayastorSvc { .map(Response::new) } + #[named] async fn share_replica( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - let rx = rpc_submit(async move { - match Bdev::lookup_by_name(&args.uuid) { - Some(bdev) => { - let lvol = Lvol::try_from(bdev)?; - - // if we are already shared ... - if lvol.shared() - == Some(Protocol::try_from(args.share)?) - { - return Ok(ShareReplicaReply { - uri: lvol.share_uri().unwrap(), - }); - } - - match Protocol::try_from(args.share)? { - Protocol::Off => { - lvol.unshare().await?; - } - Protocol::Nvmf => { - lvol.share_nvmf(None).await?; - } - Protocol::Iscsi => { - return Err(LvsError::LvolShare { - source: CoreError::NotSupported { - source: Errno::ENOSYS, - }, - name: args.uuid, + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + let rx = rpc_submit(async move { + match Bdev::lookup_by_name(&args.uuid) { + Some(bdev) => { + let lvol = Lvol::try_from(bdev)?; + + // if we are already shared ... + if lvol.shared() + == Some(Protocol::try_from(args.share)?) + { + return Ok(ShareReplicaReply { + uri: lvol.share_uri().unwrap(), }); } - } - Ok(ShareReplicaReply { - uri: lvol.share_uri().unwrap(), - }) - } + match Protocol::try_from(args.share)? { + Protocol::Off => { + lvol.unshare().await?; + } + Protocol::Nvmf => { + lvol.share_nvmf(None).await?; + } + Protocol::Iscsi => { + return Err(LvsError::LvolShare { + source: CoreError::NotSupported { + source: Errno::ENOSYS, + }, + name: args.uuid, + }); + } + } - None => Err(LvsError::InvalidBdev { - source: NexusBdevError::BdevNotFound { - name: args.uuid.clone(), - }, - name: args.uuid, - }), - } - })?; + Ok(ShareReplicaReply { + uri: lvol.share_uri().unwrap(), + }) + } - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + None => Err(LvsError::InvalidBdev { + source: NexusBdevError::BdevNotFound { + name: args.uuid.clone(), + }, + name: args.uuid, + }), + } + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn create_nexus( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - let uuid = args.uuid.clone(); - let name = uuid_to_name(&args.uuid)?; - nexus_create( - &name, - args.size, - Some(&args.uuid), - &args.children, - ) - .await?; - let nexus = nexus_lookup(&uuid)?; - info!("Created nexus {}", uuid); - Ok(nexus.to_grpc()) - })?; - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + let uuid = args.uuid.clone(); + let name = uuid_to_name(&args.uuid)?; + nexus_create( + &name, + args.size, + Some(&args.uuid), + &args.children, + ) + .await?; + let nexus = nexus_lookup(&uuid)?; + info!("Created nexus {}", uuid); + Ok(nexus.to_grpc()) + })?; + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] + async fn create_nexus_v2( + &self, + request: Request, + ) -> GrpcResult { + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_create_v2( + &args.name, + args.size, + &args.uuid, + NexusNvmeParams { + min_cntlid: args.min_cntl_id as u16, + max_cntlid: args.max_cntl_id as u16, + resv_key: args.resv_key, + preempt_key: match args.preempt_key { + 0 => None, + k => std::num::NonZeroU64::new(k), + }, + }, + &args.children, + ) + .await?; + let nexus = nexus_lookup(&args.name)?; + info!("Created nexus {}", &args.name); + Ok(nexus.to_grpc()) + })?; + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) + .await + } + + #[named] async fn destroy_nexus( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - let args = request.into_inner(); - trace!("{:?}", args); - nexus_destroy(&args.uuid).await?; - Ok(Null {}) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + let args = request.into_inner(); + trace!("{:?}", args); + nexus_destroy(&args.uuid).await?; + Ok(Null {}) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } @@ -512,6 +751,33 @@ impl mayastor_server::Mayastor for MayastorSvc { .map(Response::new) } + async fn list_nexus_v2( + &self, + request: Request, + ) -> GrpcResult { + let args = request.into_inner(); + trace!("{:?}", args); + + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + let mut nexus_list: Vec = Vec::new(); + + for n in instances() { + if n.state.lock().deref() != &nexus_bdev::NexusState::Init { + nexus_list.push(n.to_grpc_v2().await); + } + } + + Ok(ListNexusV2Reply { + nexus_list, + }) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + } + async fn add_child_nexus( &self, request: Request, @@ -691,175 +957,207 @@ impl mayastor_server::Mayastor for MayastorSvc { .map(Response::new) } + #[named] async fn child_operation( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - let args = request.into_inner(); - trace!("{:?}", args); - - let onl = match args.action { - 1 => Ok(true), - 0 => Ok(false), - _ => Err(nexus_bdev::Error::InvalidKey {}), - }?; - - let nexus = nexus_lookup(&args.uuid)?; - if onl { - nexus.online_child(&args.uri).await?; - } else { - nexus.offline_child(&args.uri).await?; - } + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + let args = request.into_inner(); + trace!("{:?}", args); + + let onl = match args.action { + 1 => Ok(true), + 0 => Ok(false), + _ => Err(nexus_bdev::Error::InvalidKey {}), + }?; + + let nexus = nexus_lookup(&args.uuid)?; + if onl { + nexus.online_child(&args.uri).await?; + } else { + nexus.offline_child(&args.uri).await?; + } - Ok(Null {}) - })?; + Ok(Null {}) + })?; - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn start_rebuild( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - trace!("{:?}", args); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - nexus_lookup(&args.uuid)? - .start_rebuild(&args.uri) - .await - .map(|_| {})?; - Ok(Null {}) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + trace!("{:?}", args); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_lookup(&args.uuid)? + .start_rebuild(&args.uri) + .await + .map(|_| {})?; + Ok(Null {}) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn stop_rebuild( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - trace!("{:?}", args); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - nexus_lookup(&args.uuid)?.stop_rebuild(&args.uri).await?; - - Ok(Null {}) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + trace!("{:?}", args); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_lookup(&args.uuid)?.stop_rebuild(&args.uri).await?; + + Ok(Null {}) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn pause_rebuild( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let msg = request.into_inner(); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - nexus_lookup(&msg.uuid)?.pause_rebuild(&msg.uri).await?; - - Ok(Null {}) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let msg = request.into_inner(); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_lookup(&msg.uuid)?.pause_rebuild(&msg.uri).await?; + + Ok(Null {}) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn resume_rebuild( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let msg = request.into_inner(); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - nexus_lookup(&msg.uuid)?.resume_rebuild(&msg.uri).await?; - Ok(Null {}) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let msg = request.into_inner(); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_lookup(&msg.uuid)?.resume_rebuild(&msg.uri).await?; + Ok(Null {}) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn get_rebuild_state( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - trace!("{:?}", args); - nexus_lookup(&args.uuid)?.get_rebuild_state(&args.uri).await - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + trace!("{:?}", args); + nexus_lookup(&args.uuid)?.get_rebuild_state(&args.uri).await + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn get_rebuild_stats( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - trace!("{:?}", args); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - nexus_lookup(&args.uuid)?.get_rebuild_stats(&args.uri).await - })?; - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + trace!("{:?}", args); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_lookup(&args.uuid)?.get_rebuild_stats(&args.uri).await + })?; + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } + #[named] async fn get_rebuild_progress( &self, request: Request, ) -> GrpcResult { - self.locked(async move { - let args = request.into_inner(); - trace!("{:?}", args); - let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { - nexus_lookup(&args.uuid)?.get_rebuild_progress(&args.uri) - })?; - - rx.await - .map_err(|_| Status::cancelled("cancelled"))? - .map_err(Status::from) - .map(Response::new) - }) + self.locked( + GrpcClientContext::new(&request, function_name!()), + async move { + let args = request.into_inner(); + trace!("{:?}", args); + let rx = rpc_submit::<_, _, nexus_bdev::Error>(async move { + nexus_lookup(&args.uuid)?.get_rebuild_progress(&args.uri) + })?; + + rx.await + .map_err(|_| Status::cancelled("cancelled"))? + .map_err(Status::from) + .map(Response::new) + }, + ) .await } @@ -913,4 +1211,29 @@ impl mayastor_server::Mayastor for MayastorSvc { ) -> GrpcResult { list_controllers().await } + + async fn stat_nvme_controllers( + &self, + _request: Request, + ) -> GrpcResult { + controller_stats().await + } + + async fn get_mayastor_info( + &self, + _request: Request, + ) -> GrpcResult { + let features = MayastorFeatures::get_features().into(); + + let reply = MayastorInfoRequest { + version: git_version!( + args = ["--tags", "--abbrev=12"], + fallback = "unknown" + ) + .to_string(), + supported_features: Some(features), + }; + + Ok(Response::new(reply)) + } } diff --git a/mayastor/src/grpc/mod.rs b/mayastor/src/grpc/mod.rs index c57353bba..71195059e 100644 --- a/mayastor/src/grpc/mod.rs +++ b/mayastor/src/grpc/mod.rs @@ -41,10 +41,16 @@ mod mayastor_grpc; mod nexus_grpc; mod server; +#[derive(Debug)] +pub(crate) struct GrpcClientContext { + pub args: String, + pub id: String, +} + #[async_trait::async_trait] /// trait to lock serialize gRPC request outstanding -pub(crate) trait Serializer { - async fn locked(&self, f: F) -> R; +pub(crate) trait Serializer { + async fn locked(&self, ctx: GrpcClientContext, f: F) -> Result; } pub type GrpcResult = std::result::Result, Status>; diff --git a/mayastor/src/grpc/nexus_grpc.rs b/mayastor/src/grpc/nexus_grpc.rs index f0e9524a8..deed71acc 100644 --- a/mayastor/src/grpc/nexus_grpc.rs +++ b/mayastor/src/grpc/nexus_grpc.rs @@ -10,6 +10,7 @@ use crate::{ nexus_bdev::{Error, Nexus, NexusStatus}, nexus_child::{ChildState, NexusChild, Reason}, }, + core::{Protocol, Share}, rebuild::RebuildJob, }; @@ -73,6 +74,32 @@ impl Nexus { rebuilds: RebuildJob::count() as u32, } } + + pub async fn to_grpc_v2(&self) -> rpc::NexusV2 { + let mut ana_state = rpc::NvmeAnaState::NvmeAnaInvalidState; + + // Get ANA state only for published nexuses. + if let Some(Protocol::Nvmf) = self.shared() { + if let Ok(state) = self.get_ana_state().await { + ana_state = state; + } + } + + rpc::NexusV2 { + name: name_to_uuid(&self.name).to_string(), + uuid: self.uuid().to_string(), + size: self.size, + state: rpc::NexusState::from(self.status()) as i32, + device_uri: self.get_share_uri().unwrap_or_default(), + children: self + .children + .iter() + .map(|ch| ch.to_grpc()) + .collect::>(), + rebuilds: RebuildJob::count() as u32, + ana_state: ana_state as i32, + } + } } /// Convert nexus name to uuid. @@ -99,17 +126,26 @@ pub fn uuid_to_name(uuid: &str) -> Result { } } -/// Lookup a nexus by its uuid prepending "nexus-" prefix. Return error if -/// uuid is invalid or nexus not found. +/// Look up a nexus by name first (if created by nexus_create_v2) then by its +/// uuid prepending "nexus-" prefix. +/// Return error if nexus not found. pub fn nexus_lookup(uuid: &str) -> Result<&mut Nexus, Error> { - let name = uuid_to_name(uuid)?; - - if let Some(nexus) = instances().iter_mut().find(|n| n.name == name) { + if let Some(nexus) = instances().iter_mut().find(|n| n.name == uuid) { + Ok(nexus) + } else if let Some(nexus) = instances() + .iter_mut() + .find(|n| n.uuid().to_string() == uuid) + { Ok(nexus) } else { - Err(Error::NexusNotFound { - name: uuid.to_owned(), - }) + let name = uuid_to_name(uuid)?; + if let Some(nexus) = instances().iter_mut().find(|n| n.name == name) { + Ok(nexus) + } else { + Err(Error::NexusNotFound { + name: uuid.to_owned(), + }) + } } } diff --git a/mayastor/src/lib.rs b/mayastor/src/lib.rs index bde62d35f..17b8a14a8 100644 --- a/mayastor/src/lib.rs +++ b/mayastor/src/lib.rs @@ -5,6 +5,7 @@ extern crate tracing; extern crate nix; #[macro_use] extern crate serde; +extern crate function_name; extern crate serde_json; extern crate snafu; extern crate spdk_sys; diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index bff0ebd00..a65121d95 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -161,13 +161,12 @@ where } else { Style::new() }; - let scope = self .span .and_then(|id| self.context.span(id)) .or_else(|| self.context.lookup_current()) .into_iter() - .flat_map(|span| span.from_root().chain(std::iter::once(span))); + .flat_map(|span| span.scope().from_root()); for span in scope { write!(f, ":{}", bold.paint(span.metadata().name()))?; diff --git a/mayastor/src/lvs/lvol.rs b/mayastor/src/lvs/lvol.rs index 5107208b6..2a34e2da7 100644 --- a/mayastor/src/lvs/lvol.rs +++ b/mayastor/src/lvs/lvol.rs @@ -10,7 +10,6 @@ use async_trait::async_trait; use futures::channel::oneshot; use nix::errno::Errno; use pin_utils::core_reexport::fmt::Formatter; -use tracing::instrument; use spdk_sys::{ spdk_blob_get_xattr_value, @@ -22,6 +21,8 @@ use spdk_sys::{ vbdev_lvol_create_snapshot, vbdev_lvol_destroy, vbdev_lvol_get_from_bdev, + LVS_CLEAR_WITH_UNMAP, + SPDK_BDEV_LARGE_BUF_MAX_SIZE, }; use crate::{ @@ -121,7 +122,6 @@ impl Share for Lvol { } /// share the lvol as a nvmf target - #[instrument(level = "debug", err)] async fn share_nvmf( &self, cntlid_range: Option<(u16, u16)>, @@ -140,7 +140,6 @@ impl Share for Lvol { } /// unshare the nvmf target - #[instrument(level = "debug", err)] async fn unshare(&self) -> Result { let share = self.as_bdev() @@ -176,6 +175,10 @@ impl Share for Lvol { fn bdev_uri(&self) -> Option { None } + + fn bdev_uri_original(&self) -> Option { + None + } } impl Lvol { @@ -223,6 +226,50 @@ impl Lvol { } } + // wipe the first 8MB if unmap is not supported on failure the operation + // needs to be repeated + pub async fn wipe_super(&self) -> Result<(), Error> { + if !unsafe { self.0.as_ref().clear_method == LVS_CLEAR_WITH_UNMAP } { + let hdl = Bdev::open(&self.as_bdev(), true) + .and_then(|desc| desc.into_handle()) + .map_err(|e| { + error!(?self, ?e, "failed to wipe lvol"); + Error::RepDestroy { + source: Errno::ENXIO, + name: self.name(), + } + })?; + + // Set the buffer size to the maximum allowed by SPDK. + let buf_size = SPDK_BDEV_LARGE_BUF_MAX_SIZE as u64; + let buf = hdl.dma_malloc(buf_size).map_err(|e| { + error!( + ?self, + ?e, + "no memory available to allocate zero buffer" + ); + Error::RepDestroy { + source: Errno::ENOMEM, + name: self.name(), + } + })?; + // write zero to the first 8MB which wipes the metadata and the + // first 4MB of the data partition + let range = + std::cmp::min(self.as_bdev().size_in_bytes(), (1 << 20) * 8); + for offset in 0 .. (range / buf_size) { + hdl.write_at(offset * buf.len(), &buf).await.map_err(|e| { + error!(?self, ?e); + Error::RepDestroy { + source: Errno::EIO, + name: self.name(), + } + })?; + } + } + Ok(()) + } + /// returns a boolean indicating if the lvol is thin provisioned pub fn is_thin(&self) -> bool { unsafe { self.0.as_ref().thin_provision } @@ -239,7 +286,6 @@ impl Lvol { } /// destroy the lvol - #[instrument(level = "debug", err)] pub async fn destroy(self) -> Result { extern "C" fn destroy_cb(sender: *mut c_void, errno: i32) { let sender = @@ -247,11 +293,11 @@ impl Lvol { sender.send(errno).unwrap(); } - let name = self.name(); - // we must always unshare before destroying bdev let _ = self.unshare().await; + let name = self.name(); + let (s, r) = pair::(); unsafe { vbdev_lvol_destroy(self.0.as_ptr(), Some(destroy_cb), cb_arg(s)) @@ -276,8 +322,6 @@ impl Lvol { } /// write the property prop on to the lvol which is stored on disk - #[allow(clippy::unit_arg)] // here to silence the Ok(()) variant - #[instrument(level = "debug", err)] pub async fn set(&self, prop: PropValue) -> Result<(), Error> { let blob = unsafe { self.0.as_ref().blob }; assert!(!blob.is_null()); @@ -325,7 +369,6 @@ impl Lvol { } /// get/read a property from this lvol from disk - #[instrument(level = "debug", err)] pub async fn get(&self, prop: PropName) -> Result { let blob = unsafe { self.0.as_ref().blob }; assert!(!blob.is_null()); diff --git a/mayastor/src/lvs/lvs_pool.rs b/mayastor/src/lvs/lvs_pool.rs index 30f8e21a2..5bb2d6eb0 100644 --- a/mayastor/src/lvs/lvs_pool.rs +++ b/mayastor/src/lvs/lvs_pool.rs @@ -3,7 +3,6 @@ use std::{convert::TryFrom, fmt::Debug, os::raw::c_void, ptr::NonNull}; use futures::channel::oneshot; use nix::errno::Errno; use pin_utils::core_reexport::fmt::Formatter; -use tracing::instrument; use rpc::mayastor::CreatePoolRequest; use spdk_sys::{ @@ -16,20 +15,21 @@ use spdk_sys::{ vbdev_get_lvol_store_by_name, vbdev_get_lvs_bdev_by_lvs, vbdev_lvol_create, + vbdev_lvol_create_with_uuid, vbdev_lvol_store_first, vbdev_lvol_store_next, vbdev_lvs_create, vbdev_lvs_destruct, vbdev_lvs_examine, vbdev_lvs_unload, + LVOL_CLEAR_WITH_NONE, LVOL_CLEAR_WITH_UNMAP, - LVOL_CLEAR_WITH_WRITE_ZEROES, LVS_CLEAR_WITH_NONE, }; use url::Url; use crate::{ - bdev::Uri, + bdev::uri, core::{Bdev, IoType, Share, Uuid}, ffihelper::{cb_arg, pair, AsStr, ErrnoResult, FfiResult, IntoCString}, lvs::{Error, Lvol, PropName, PropValue}, @@ -38,7 +38,7 @@ use crate::{ impl From<*mut spdk_lvol_store> for Lvs { fn from(p: *mut spdk_lvol_store) -> Self { - Lvs(NonNull::new(p).unwrap()) + Lvs(NonNull::new(p).expect("lvol pointer is null")) } } @@ -128,6 +128,13 @@ impl Lvs { LvsIterator::default() } + /// export all LVS instances + pub async fn export_all() { + for pool in Self::iter() { + let _ = pool.export().await; + } + } + /// lookup a lvol store by its name pub fn lookup(name: &str) -> Option { let name = name.into_cstring(); @@ -181,7 +188,6 @@ impl Lvs { } /// imports a pool based on its name and base bdev name - #[instrument(level = "debug")] pub async fn import(name: &str, bdev: &str) -> Result { let (sender, receiver) = pair::>(); @@ -238,7 +244,6 @@ impl Lvs { } } - #[instrument(level = "debug", err)] /// Create a pool on base bdev pub async fn create(name: &str, bdev: &str) -> Result { let pool_name = name.into_cstring(); @@ -285,7 +290,6 @@ impl Lvs { } /// imports the pool if it exists, otherwise try to create it - #[instrument(level = "debug", err)] pub async fn create_or_import( args: CreatePoolRequest, ) -> Result { @@ -313,7 +317,7 @@ impl Lvs { }) .collect::>(); - let parsed = Uri::parse(&disks[0]).map_err(|e| Error::InvalidBdev { + let parsed = uri::parse(&disks[0]).map_err(|e| Error::InvalidBdev { source: e, name: args.name.clone(), })?; @@ -352,7 +356,10 @@ impl Lvs { error!("pool name mismatch"); Err(Error::Import { source, - name, + name: format!( + "a pool currently exists on the device with name: {}", + name + ), }) } // try to create the pool @@ -378,8 +385,6 @@ impl Lvs { } /// export the given lvl - #[allow(clippy::unit_arg)] // here to silence the () argument - #[instrument(level = "debug", err)] pub async fn export(self) -> Result<(), Error> { let pool = self.name().to_string(); let base_bdev = self.base_bdev(); @@ -399,7 +404,7 @@ impl Lvs { })?; info!("pool {} exported successfully", pool); - bdev_destroy(&base_bdev.bdev_uri().unwrap()) + bdev_destroy(&base_bdev.bdev_uri_original().unwrap()) .await .map_err(|e| Error::Destroy { source: e, @@ -447,8 +452,6 @@ impl Lvs { /// destroys the given pool deleting the on disk super blob before doing so, /// un share all targets - #[allow(clippy::unit_arg)] - #[instrument(level = "debug", err)] pub async fn destroy(self) -> Result<(), Error> { let pool = self.name().to_string(); let (s, r) = pair::(); @@ -475,7 +478,7 @@ impl Lvs { info!("pool {} destroyed successfully", pool); - bdev_destroy(&base_bdev.bdev_uri().unwrap()) + bdev_destroy(&base_bdev.bdev_uri_original().unwrap()) .await .map_err(|e| Error::Destroy { source: e, @@ -504,20 +507,19 @@ impl Lvs { None } } - - #[instrument(level = "debug", err)] /// create a new lvol on this pool pub async fn create_lvol( &self, name: &str, size: u64, + uuid: Option<&str>, thin: bool, ) -> Result { let clear_method = if self.base_bdev().io_type_supported(IoType::Unmap) { LVOL_CLEAR_WITH_UNMAP } else { - LVOL_CLEAR_WITH_WRITE_ZEROES + LVOL_CLEAR_WITH_NONE }; if Bdev::lookup_by_name(name).is_some() { @@ -531,15 +533,31 @@ impl Lvs { let cname = name.into_cstring(); unsafe { - vbdev_lvol_create( - self.0.as_ptr(), - cname.as_ptr(), - size, - thin, - clear_method, - Some(Lvol::lvol_cb), - cb_arg(s), - ) + match uuid { + Some(u) => { + let cuuid = u.into_cstring(); + + vbdev_lvol_create_with_uuid( + self.0.as_ptr(), + cname.as_ptr(), + size, + thin, + clear_method, + cuuid.as_ptr(), + Some(Lvol::lvol_cb), + cb_arg(s), + ) + } + None => vbdev_lvol_create( + self.0.as_ptr(), + cname.as_ptr(), + size, + thin, + clear_method, + Some(Lvol::lvol_cb), + cb_arg(s), + ), + } } .to_result(|e| Error::RepCreate { source: Errno::from_i32(e), @@ -555,6 +573,8 @@ impl Lvs { }) .map(|lvol| Lvol(NonNull::new(lvol).unwrap()))?; + lvol.wipe_super().await?; + info!("created {}", lvol); Ok(lvol) } diff --git a/mayastor/src/nexus_uri.rs b/mayastor/src/nexus_uri.rs index fb1d23a36..4cbed1330 100644 --- a/mayastor/src/nexus_uri.rs +++ b/mayastor/src/nexus_uri.rs @@ -1,6 +1,6 @@ use std::{convert::TryFrom, num::ParseIntError, str::ParseBoolError}; -use crate::{bdev::Uri, core::Bdev}; +use crate::{bdev::uri, core::Bdev}; use futures::channel::oneshot::Canceled; use nix::errno::Errno; use snafu::Snafu; @@ -11,63 +11,99 @@ use url::ParseError; #[derive(Debug, Snafu, Clone)] #[snafu(visibility = "pub(crate)")] pub enum NexusBdevError { - // Generic URL parse errors - #[snafu(display("Error parsing URI \"{}\"", uri))] + // Generic URL parse errors. + #[snafu(display("Error parsing URI '{}'", uri))] UrlParseError { source: ParseError, uri: String }, + + // No matching URI error. #[snafu(display( - "No matching URI found for bdev {} in aliases {:?}", + "No matching URI found for BDEV '{}' in aliases {:?}", name, aliases ))] BdevNoUri { name: String, aliases: Vec }, - #[snafu(display("Unsupported URI scheme: {}", scheme))] + + // Unsupported URI scheme. + #[snafu(display("Unsupported URI scheme: '{}'", scheme))] UriSchemeUnsupported { scheme: String }, - // Scheme specific URI format errors - #[snafu(display("Invalid URI \"{}\": {}", uri, message))] + + // Scheme-specific URI format errors. + #[snafu(display("Invalid URI '{}': {}", uri, message))] UriInvalid { uri: String, message: String }, + + // Bad value of a boolean parameter. #[snafu(display( - "Invalid URI \"{}\": could not parse {} parameter value", + "Invalid URI '{}': could not parse value of parameter '{}': '{}' is given, \ + a boolean expected", uri, - parameter + parameter, + value ))] BoolParamParseError { source: ParseBoolError, uri: String, parameter: String, + value: String, }, + + // Bad value of an integer parameter. #[snafu(display( - "Invalid URI \"{}\": could not parse {} parameter value", + "Invalid URI '{}': could not parse value of parameter '{}': '{}' is given, \ + an integer expected", uri, - parameter + parameter, + value ))] IntParamParseError { source: ParseIntError, uri: String, parameter: String, + value: String, }, + + // Bad value of a UUID parameter. #[snafu(display( - "Invalid URI \"{}\": could not parse uuid parameter value", - uri, + "Invalid URI '{}': could not parse value of UUID parameter", + uri ))] UuidParamParseError { source: uuid::Error, uri: String }, - // Bdev create/destroy errors - #[snafu(display("bdev {} already exists", name))] + + // BDEV name already exists. + #[snafu(display( + "Failed to create a BDEV: name '{}' already exists", + name + ))] BdevExists { name: String }, + + // Creating a BDEV with a different UUID. #[snafu(display( - "bdev {} already exists with a different uuid: {}", + "Failed to create a BDEV: '{}' already exists with a different UUID: '{}'", name, uuid ))] BdevWrongUuid { name: String, uuid: String }, - #[snafu(display("bdev {} not found", name))] + + // BDEV is not found. + #[snafu(display("BDEV '{}' could not be found", name))] BdevNotFound { name: String }, - #[snafu(display("Invalid parameters for bdev create {}", name))] - InvalidParams { source: Errno, name: String }, - #[snafu(display("Failed to create bdev {}", name))] + + // Invalid creation parameters. + #[snafu(display( + "Failed to create a BDEV '{}': invalid parameters", + name + ))] + CreateBdevInvalidParams { source: Errno, name: String }, + + // Generic creation failure. + #[snafu(display("Failed to create a BDEV '{}'", name))] CreateBdev { source: Errno, name: String }, - #[snafu(display("Failed to destroy bdev {}", name))] + + // Generic destruction failure. + #[snafu(display("Failed to destroy a BDEV '{}'", name))] DestroyBdev { source: Errno, name: String }, - #[snafu(display("Command canceled for bdev {}", name))] + + // Command canceled. + #[snafu(display("Command canceled for a BDEV '{}'", name))] CancelBdev { source: Canceled, name: String }, } @@ -75,22 +111,22 @@ pub enum NexusBdevError { /// Return the bdev name (which can be different from URI). pub async fn bdev_create(uri: &str) -> Result { info!(?uri, "create"); - Uri::parse(uri)?.create().await + uri::parse(uri)?.create().await } /// Parse URI and destroy bdev described in the URI. pub async fn bdev_destroy(uri: &str) -> Result<(), NexusBdevError> { info!(?uri, "destroy"); - Uri::parse(uri)?.destroy().await + uri::parse(uri)?.destroy().await } pub fn bdev_get_name(uri: &str) -> Result { - Ok(Uri::parse(uri)?.get_name()) + Ok(uri::parse(uri)?.get_name()) } impl std::cmp::PartialEq for &Bdev { fn eq(&self, uri: &url::Url) -> bool { - match Uri::parse(&uri.to_string()) { + match uri::parse(&uri.to_string()) { Ok(device) if device.get_name() == self.name() => { self.driver() == match uri.scheme() { @@ -105,7 +141,7 @@ impl std::cmp::PartialEq for &Bdev { impl std::cmp::PartialEq for Bdev { fn eq(&self, uri: &url::Url) -> bool { - match Uri::parse(&uri.to_string()) { + match uri::parse(&uri.to_string()) { Ok(device) if device.get_name() == self.name() => { self.driver() == match uri.scheme() { diff --git a/mayastor/src/persistent_store.rs b/mayastor/src/persistent_store.rs index c49bbee7f..8378c201d 100644 --- a/mayastor/src/persistent_store.rs +++ b/mayastor/src/persistent_store.rs @@ -215,7 +215,7 @@ impl PersistentStore { } /// Get the persistent store. - fn persistent_store() -> &'static Mutex { + fn new() -> &'static Mutex { PERSISTENT_STORE .get() .expect("Persistent store should have been initialised") @@ -225,19 +225,19 @@ impl PersistentStore { /// Get an instance of the backing store. fn backing_store() -> Etcd { - Self::persistent_store().lock().unwrap().store.clone() + Self::new().lock().unwrap().store.clone() } /// Get the endpoint of the backing store. fn endpoint() -> String { - Self::persistent_store().lock().unwrap().endpoint.clone() + Self::new().lock().unwrap().endpoint.clone() } /// Reconnects to the backing store and replaces the old connection with the /// new connection. async fn reconnect() { warn!("Attempting to reconnect to persistent store...."); - let persistent_store = Self::persistent_store(); + let persistent_store = Self::new(); let backing_store = Self::connect_to_backing_store(&PersistentStore::endpoint()).await; persistent_store.lock().unwrap().store = backing_store; diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index 934986986..06df1f44c 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -220,8 +220,10 @@ impl From for spdk_nvmf_transport_opts { pub struct NvmeBdevOpts { /// action take on timeout pub action_on_timeout: u32, - /// timeout for each command + /// timeout for IO commands pub timeout_us: u64, + /// timeout for admin commands + pub timeout_admin_us: u64, /// keep-alive timeout pub keep_alive_timeout_ms: u32, /// retry count @@ -268,6 +270,7 @@ impl Default for NvmeBdevOpts { Self { action_on_timeout: 4, timeout_us: try_from_env("NVME_TIMEOUT_US", 5_000_000), + timeout_admin_us: try_from_env("NVME_TIMEOUT_ADMIN_US", 5_000_000), keep_alive_timeout_ms: try_from_env("NVME_KATO_MS", 1_000), retry_count: try_from_env("NVME_RETRY_COUNT", 0), arbitration_burst: 0, @@ -290,6 +293,7 @@ impl From for NvmeBdevOpts { Self { action_on_timeout: o.action_on_timeout, timeout_us: o.timeout_us, + timeout_admin_us: o.timeout_admin_us, keep_alive_timeout_ms: o.keep_alive_timeout_ms, retry_count: o.retry_count, arbitration_burst: o.arbitration_burst, @@ -309,6 +313,7 @@ impl From<&NvmeBdevOpts> for spdk_bdev_nvme_opts { Self { action_on_timeout: o.action_on_timeout, timeout_us: o.timeout_us, + timeout_admin_us: o.timeout_admin_us, keep_alive_timeout_ms: o.keep_alive_timeout_ms, retry_count: o.retry_count, arbitration_burst: o.arbitration_burst, @@ -434,6 +439,12 @@ pub struct IscsiTgtOpts { max_large_data_in_per_connection: u32, /// todo max_r2t_per_connection: u32, + /// todo + pdu_pool_size: u32, + /// todo + immediate_data_pool_size: u32, + /// todo + data_out_pool_size: u32, } impl Default for IscsiTgtOpts { @@ -458,7 +469,13 @@ impl Default for IscsiTgtOpts { error_recovery_level: 0, allow_duplicate_isid: false, max_large_data_in_per_connection: 64, - max_r2t_per_connection: 64, + max_r2t_per_connection: 4, + // 2 * (MaxQueueDepth + MaxLargeDataInPerConnection + 2 * + // MaxR2TPerConnection + 8) + pdu_pool_size: 110 * (2 * (32 + 64 + 2 * 4 + 8)), + immediate_data_pool_size: 110 * 128, + // MaxSessions * MAX_DATA_OUT_PER_CONNECTION + data_out_pool_size: 110 * 16, } } } @@ -490,6 +507,9 @@ impl From<&IscsiTgtOpts> for spdk_iscsi_opts { AllowDuplicateIsid: o.allow_duplicate_isid, MaxLargeDataInPerConnection: o.max_large_data_in_per_connection, MaxR2TPerConnection: o.max_r2t_per_connection, + pdu_pool_size: o.pdu_pool_size, + immediate_data_pool_size: o.immediate_data_pool_size, + data_out_pool_size: o.data_out_pool_size, } } } @@ -531,6 +551,8 @@ pub struct PosixSocketOpts { recv_buf_size: u32, send_buf_size: u32, enable_recv_pipe: bool, + /// deprecated, use use enable_zerocopy_send_server or + /// enable_zerocopy_send_client instead enable_zero_copy_send: bool, enable_quickack: bool, enable_placement_id: u32, @@ -544,7 +566,7 @@ impl Default for PosixSocketOpts { recv_buf_size: try_from_env("SOCK_RECV_BUF_SIZE", 2097152), send_buf_size: try_from_env("SOCK_SEND_BUF_SIZE", 2097152), enable_recv_pipe: try_from_env("SOCK_ENABLE_RECV_PIPE", true), - enable_zero_copy_send: try_from_env("SOCK_ZERO_COPY_SEND", true), + enable_zero_copy_send: true, enable_quickack: try_from_env("SOCK_ENABLE_QUICKACK", true), enable_placement_id: try_from_env("SOCK_ENABLE_PLACEMENT_ID", 0), enable_zerocopy_send_server: try_from_env( @@ -553,7 +575,7 @@ impl Default for PosixSocketOpts { ), enable_zerocopy_send_client: try_from_env( "SOCK_ZEROCOPY_SEND_CLIENT", - true, + false, ), } } diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs index 935f91995..c6a1f298b 100644 --- a/mayastor/src/subsys/mbus/registration.rs +++ b/mayastor/src/subsys/mbus/registration.rs @@ -62,9 +62,9 @@ pub struct Registration { /// Configuration of the registration config: Configuration, /// Receive channel for messages and termination - rcv_chan: smol::channel::Receiver<()>, + rcv_chan: async_channel::Receiver<()>, /// Termination channel - fini_chan: smol::channel::Sender<()>, + fini_chan: async_channel::Sender<()>, } static MESSAGE_BUS_REG: OnceCell = OnceCell::new(); @@ -95,7 +95,7 @@ impl Registration { } fn new(node: &NodeId, grpc_endpoint: &str) -> Registration { - let (msg_sender, msg_receiver) = smol::channel::unbounded::<()>(); + let (msg_sender, msg_receiver) = async_channel::unbounded::<()>(); let config = Configuration { node: node.to_owned(), grpc_endpoint: grpc_endpoint.to_owned(), diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 36857262e..70cd7ce11 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -219,7 +219,7 @@ impl NvmfSubsystem { msg: "failed to add namespace ID".to_string(), }) } else { - info!("added NS ID {}", ns_id); + debug!(?bdev, ?ns_id, "added as namespace"); Ok(()) } } @@ -368,7 +368,7 @@ impl NvmfSubsystem { msg: "failed to start the subsystem".to_string(), })?; - info!("started {:?}", self.get_nqn()); + debug!(?self, "shared"); Ok(self.get_nqn()) } @@ -409,7 +409,7 @@ impl NvmfSubsystem { msg: "failed to stop the subsystem".to_string(), })?; - info!("stopped {}", self.get_nqn()); + debug!("stopped {}", self.get_nqn()); Ok(()) } diff --git a/mayastor/tests/aio_unmap.rs b/mayastor/tests/aio_unmap.rs deleted file mode 100644 index 850ecffd0..000000000 --- a/mayastor/tests/aio_unmap.rs +++ /dev/null @@ -1,571 +0,0 @@ -use common::MayastorTest; - -use mayastor::{ - core::{Bdev, BdevHandle, MayastorCliArgs, Share}, - ffihelper::cb_arg, - lvs::{Lvol, Lvs}, - nexus_uri::{bdev_create, bdev_destroy}, -}; - -use rpc::mayastor::CreatePoolRequest; - -use futures::channel::oneshot; -use once_cell::sync::Lazy; -use std::{ - ffi::{c_void, CString}, - io::{Error, ErrorKind}, - mem::MaybeUninit, -}; - -use spdk_sys::{ - spdk_bdev_free_io, - spdk_bdev_io, - spdk_bdev_unmap, - spdk_bdev_write_zeroes, -}; - -use nvmeadm::NvmeTarget; -use std::convert::TryFrom; - -use tracing::info; - -pub mod common; - -static BDEVNAME1: &str = "aio:///tmp/disk1.img"; -static DISKNAME1: &str = "/tmp/disk1.img"; - -// Get the I/O blocksize for the filesystem upon which the given file resides. -fn get_fs_blocksize(path: &str) -> Result { - Ok(stat(path)?.st_blksize as u64) -} - -// Return the number of (512-byte) blocks allocated for a given file. -// Note that the result is ALWAYS in terms of 512-byte blocks, -// regardless of the underlying filesystem I/O blocksize. -fn get_allocated_blocks(path: &str) -> Result { - Ok(stat(path)?.st_blocks as u64) -} - -// Obtain stat information for given file -fn stat(path: &str) -> Result { - let mut data: MaybeUninit = MaybeUninit::uninit(); - let cpath = CString::new(path).unwrap(); - - if unsafe { libc::stat64(cpath.as_ptr(), data.as_mut_ptr()) } < 0 { - return Err(Error::last_os_error()); - } - - Ok(unsafe { data.assume_init() }) -} - -extern "C" fn io_completion_cb( - io: *mut spdk_bdev_io, - success: bool, - arg: *mut c_void, -) { - let sender = - unsafe { Box::from_raw(arg as *const _ as *mut oneshot::Sender) }; - - unsafe { - spdk_bdev_free_io(io); - } - - sender - .send(success) - .expect("io completion callback - receiver side disappeared"); -} - -// Unmap a specified region. -// Partial blocks are zeroed rather than deallocated. -// This means that no blocks will be deallocated if the -// region is smaller than the filesystem I/O blocksize. -async fn unmap( - handle: &BdevHandle, - offset: u64, - nbytes: u64, -) -> Result<(), Error> { - let (sender, receiver) = oneshot::channel::(); - - let (desc, ch) = handle.io_tuple(); - let errno = unsafe { - spdk_bdev_unmap( - desc, - ch, - offset, - nbytes, - Some(io_completion_cb), - cb_arg(sender), - ) - }; - - if errno != 0 { - return Err(Error::from_raw_os_error(errno.abs())); - } - - if receiver.await.expect("failed awaiting unmap completion") { - return Ok(()); - } - - Err(Error::new(ErrorKind::Other, "unmap failed")) -} - -// Zero a specified region. -// This is performed as efficiently as possible according -// to the capabilities of the underlying bdev. -async fn write_zeroes( - handle: &BdevHandle, - offset: u64, - nbytes: u64, -) -> Result<(), Error> { - let (sender, receiver) = oneshot::channel::(); - let (desc, ch) = handle.io_tuple(); - - let errno = unsafe { - spdk_bdev_write_zeroes( - desc, - ch, - offset, - nbytes, - Some(io_completion_cb), - cb_arg(sender), - ) - }; - - if errno != 0 { - return Err(Error::from_raw_os_error(errno.abs())); - } - - if receiver - .await - .expect("failed awaiting write_zeroes completion") - { - return Ok(()); - } - - Err(Error::new(ErrorKind::Other, "write_zeroes failed")) -} - -fn setup() -> &'static MayastorTest<'static> { - static MAYASTOR: Lazy> = - Lazy::new(|| MayastorTest::new(MayastorCliArgs::default())); - &MAYASTOR -} - -#[tokio::test] -async fn zero_bdev_test() { - const FILE_SIZE: u64 = 64 * 1024; - - common::delete_file(&[DISKNAME1.into()]); - common::truncate_file(DISKNAME1, FILE_SIZE); - - let ms = setup(); - - // Create a bdev. - ms.spawn(async { - bdev_create(BDEVNAME1).await.expect("failed to create bdev"); - }) - .await; - - // Write some data to the bdev. - ms.spawn(async { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - let mut buf = handle.dma_malloc(4096).unwrap(); - buf.fill(42); - handle.write_at(2048, &buf).await.unwrap(); - handle.close(); - }) - .await; - - // Read data back from the bdev. - ms.spawn(async { - let handle = BdevHandle::open(BDEVNAME1, false, false) - .expect("failed to obtain bdev handle"); - let mut buf = handle.dma_malloc(4096).unwrap(); - handle.read_at(2048, &mut buf).await.unwrap(); - handle.close(); - - for &c in buf.as_slice() { - assert_eq!(c, 42, "each byte should have value 42"); - } - }) - .await; - - // Zero the region of the lvol. - ms.spawn(async { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - write_zeroes(&handle, 2048, 4096).await.unwrap(); - handle.close(); - }) - .await; - - // Re-read data from the bdev and verify that it has been zeroed. - ms.spawn(async { - let handle = BdevHandle::open(BDEVNAME1, false, false) - .expect("failed to obtain bdev handle"); - let mut buf = handle.dma_malloc(4096).unwrap(); - handle.read_at(2048, &mut buf).await.unwrap(); - handle.close(); - - for &c in buf.as_slice() { - assert_eq!(c, 0, "each byte should have value 0"); - } - }) - .await; - - // Destroy the bdev. - ms.spawn(async { - bdev_destroy(BDEVNAME1).await.unwrap(); - }) - .await; - - // Validate the state of mayastor. - ms.spawn(async { - // no bdevs - assert_eq!(Bdev::bdev_first().into_iter().count(), 0); - }) - .await; - - common::delete_file(&[DISKNAME1.into()]); -} - -#[tokio::test] -async fn unmap_bdev_test() { - const FILE_SIZE: u64 = 64 * 1024; - - common::delete_file(&[DISKNAME1.into()]); - common::truncate_file(DISKNAME1, FILE_SIZE); - - let ms = setup(); - - // Get underlying filesystem I/O blocksize. - let fs_blocksize = get_fs_blocksize(DISKNAME1).unwrap(); - info!("filesystem blocksize is {}", fs_blocksize); - - // Check that size of the file is sufficiently large to contain at least 16 - // blocks - assert!( - 16 * fs_blocksize <= FILE_SIZE * 1024, - "file {} is too small to contain 16 blocks ({} bytes)", - DISKNAME1, - 16 * fs_blocksize - ); - - // Verify that there are currently no blocks allocated - // for the sparse file that is our backing store. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap(), - 0, - "expected 0 blocks" - ); - - // Create bdev but do not perform any I/O. - ms.spawn(async { - bdev_create(BDEVNAME1).await.expect("failed to create bdev"); - }) - .await; - - // Verify that number of allocated blocks is still 0. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap(), - 0, - "expected 0 blocks" - ); - - // Write 10 blocks at an offset of 4 blocks. - let blocksize = fs_blocksize; - ms.spawn(async move { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - - let mut buf = handle.dma_malloc(10 * blocksize).unwrap(); - buf.fill(0xff); - - info!("writing 10 blocks"); - handle.write_at(4 * blocksize, &buf).await.unwrap(); - - handle.close(); - }) - .await; - - // Verify that 10 blocks have been allocated. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap() * 512, - 10 * fs_blocksize, - "expected 10 blocks ({} bytes)", - 10 * fs_blocksize - ); - - // Unmap 4 blocks at an offset of 6 blocks. - let blocksize = fs_blocksize; - ms.spawn(async move { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - - info!("unmapping 4 blocks"); - unmap(&handle, 6 * blocksize, 4 * blocksize).await.unwrap(); - - handle.close(); - }) - .await; - - // Verify that number of allocated blocks has been reduced to 6. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap() * 512, - 6 * fs_blocksize, - "expected 6 blocks ({} bytes)", - 6 * fs_blocksize - ); - - // Unmap 4 blocks at an offset of 10 blocks. - let blocksize = fs_blocksize; - ms.spawn(async move { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - - info!("unmapping 4 blocks"); - unmap(&handle, (6 + 4) * blocksize, 4 * blocksize) - .await - .unwrap(); - - handle.close(); - }) - .await; - - // Verify that number of allocated blocks has been reduced to 2. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap() * 512, - 2 * fs_blocksize, - "expected 2 blocks ({} bytes)", - 2 * fs_blocksize - ); - - if fs_blocksize > 1024 { - // Unmap 1024 BYTES at an offset of 4 blocks. - // This is less than the underlying filesystem allocation unit size - // (fs_blocksize), and is too small for any deallocation to occur. - // The specified region is zeroed but the number of allocated - // blocks should not change. - let blocksize = fs_blocksize; - ms.spawn(async move { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - - info!("unmapping 1024 BYTES"); - unmap(&handle, 4 * blocksize, 1024).await.unwrap(); - - handle.close(); - }) - .await; - - // Verify that "unmapped" region has been zeroed. - let blocksize = fs_blocksize; - ms.spawn(async move { - let handle = BdevHandle::open(BDEVNAME1, false, false) - .expect("failed to obtain bdev handle"); - let mut buf = handle.dma_malloc(1024).unwrap(); - - info!("reading 1024 BYTES"); - handle.read_at(4 * blocksize, &mut buf).await.unwrap(); - handle.close(); - - for &c in buf.as_slice() { - assert_eq!(c, 0, "each byte should have value 0"); - } - }) - .await; - - // Verify that number of allocated blocks has not changed. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap() * 512, - 2 * fs_blocksize, - "expected 2 blocks ({} bytes)", - 2 * fs_blocksize - ); - } - - // Create bdev and unmap 2 blocks at an offset of 4 blocks. - let blocksize = fs_blocksize; - ms.spawn(async move { - let handle = BdevHandle::open(BDEVNAME1, true, false) - .expect("failed to obtain bdev handle"); - - info!("unmapping 2 blocks"); - unmap(&handle, 4 * blocksize, 2 * blocksize).await.unwrap(); - - handle.close(); - }) - .await; - - // Destroy the bdev. - ms.spawn(async move { - bdev_destroy(BDEVNAME1).await.unwrap(); - }) - .await; - - // Verify that number of allocated blocks has been reduced to 0. - assert_eq!( - get_allocated_blocks(DISKNAME1).unwrap(), - 0, - "expected 0 blocks" - ); - - // Validate the state of mayastor. - ms.spawn(async { - // no bdevs - assert_eq!(Bdev::bdev_first().into_iter().count(), 0); - }) - .await; - - common::delete_file(&[DISKNAME1.into()]); -} - -#[tokio::test] -async fn unmap_share_test() { - const NUM_VOLS: usize = 2; - const FILE_SIZE: u64 = 64 * 1024; - const VOL_SIZE: u64 = 24 * 1024; - - common::delete_file(&[DISKNAME1.into()]); - common::truncate_file(DISKNAME1, FILE_SIZE); - - let ms = setup(); - - // Create a pool. - ms.spawn(async { - Lvs::create_or_import(CreatePoolRequest { - name: "tpool".into(), - disks: vec![BDEVNAME1.into()], - }) - .await - .unwrap(); - }) - .await; - - // Check that we can find the LVS. - ms.spawn(async { - assert_eq!(Lvs::iter().count(), 1); - let pool = Lvs::lookup("tpool").unwrap(); - info!("created pool: name={} UUID={}", pool.name(), pool.uuid()); - assert_eq!(pool.name(), "tpool"); - assert_eq!(pool.used(), 0); - assert_eq!(pool.base_bdev().name(), DISKNAME1); - }) - .await; - - // Create lvols on this pool. - ms.spawn(async { - let pool = Lvs::lookup("tpool").unwrap(); - for i in 0 .. NUM_VOLS { - pool.create_lvol(&format!("vol-{}", i), VOL_SIZE * 1024, false) - .await - .unwrap(); - } - }) - .await; - - info!( - "{} blocks allocated for {} after creating lvols", - get_allocated_blocks(DISKNAME1).unwrap(), - DISKNAME1 - ); - - // Share all replicas. - let targets = ms - .spawn(async { - let pool = Lvs::lookup("tpool").unwrap(); - assert_eq!(pool.lvols().unwrap().count(), NUM_VOLS); - - let mut targets: Vec = Vec::new(); - - for vol in pool.lvols().unwrap() { - vol.share_nvmf(None).await.unwrap(); - let uri = vol.share_uri().unwrap(); - info!("lvol {} shared as: {}", vol.name(), uri); - targets.push(NvmeTarget::try_from(uri).unwrap()); - } - - targets - }) - .await; - - let mut devlist: Vec = Vec::new(); - - // Attach all targets. - for target in &targets { - let devices = target.connect().unwrap(); - let dev = devices[0].path.to_string(); - info!("nvmf target attached to device: {}", dev); - devlist.push(dev); - } - - // Write to all devices - for dev in &devlist { - info!("writing to {} with dd ...", dev); - common::dd_urandom_blkdev(dev); - } - - // Disconnect all targets. - for target in &targets { - info!("disconnecting target"); - target.disconnect().unwrap(); - } - - info!( - "{} blocks allocated for {} after finished writing", - get_allocated_blocks(DISKNAME1).unwrap(), - DISKNAME1 - ); - - assert!( - get_allocated_blocks(DISKNAME1).unwrap() > 0, - "number of allocated blocks should be non-zero" - ); - - // Destroy the lvols. - ms.spawn(async { - let pool = Lvs::lookup("tpool").unwrap(); - - let vols: Vec = pool.lvols().unwrap().collect(); - assert_eq!(vols.len(), NUM_VOLS); - - for vol in vols { - vol.destroy().await.unwrap(); - } - }) - .await; - - info!( - "{} blocks allocated for {} after destroying lvols", - get_allocated_blocks(DISKNAME1).unwrap(), - DISKNAME1 - ); - - // Destroy the pool - ms.spawn(async { - let pool = Lvs::lookup("tpool").unwrap(); - assert_eq!(pool.lvols().unwrap().count(), 0); - - pool.destroy().await.unwrap(); - }) - .await; - - info!( - "{} blocks allocated for {} after destroying pool", - get_allocated_blocks(DISKNAME1).unwrap(), - DISKNAME1 - ); - - // Validate the state of mayastor. - ms.spawn(async { - // pools destroyed - assert_eq!(Lvs::iter().count(), 0); - - // no bdevs - assert_eq!(Bdev::bdev_first().into_iter().count(), 0); - }) - .await; - - common::delete_file(&[DISKNAME1.into()]); -} diff --git a/mayastor/tests/block_device_nvmf.rs b/mayastor/tests/block_device_nvmf.rs index 1a518027a..7389b504e 100755 --- a/mayastor/tests/block_device_nvmf.rs +++ b/mayastor/tests/block_device_nvmf.rs @@ -537,7 +537,7 @@ async fn nvmf_device_readv_test() { let ms = get_ms(); let (_test, dev_url) = launch_instance().await; let u = Arc::new(dev_url); - let mut url = Arc::clone(&u); + let mut url = u.clone(); // Placeholder structure to let all the fields outlive API invocations. struct IoCtx { @@ -653,7 +653,7 @@ async fn nvmf_device_readv_test() { .await; // Once all handles are closed, destroy the device. - url = Arc::clone(&u); + url = u.clone(); ms.spawn(async move { device_destroy(&(*url)).await.unwrap(); }) @@ -668,7 +668,7 @@ async fn nvmf_device_writev_test() { let ms = get_ms(); let (_test, dev_url) = launch_instance().await; let u = Arc::new(dev_url); - let url = Arc::clone(&u); + let url = u.clone(); static DEVICE_NAME: OnceCell = OnceCell::new(); @@ -839,7 +839,7 @@ async fn nvmf_device_readv_iovs_test() { let ms = get_ms(); let (_test, dev_url) = launch_instance().await; let u = Arc::new(dev_url); - let mut url = Arc::clone(&u); + let mut url = u.clone(); static DEVICE_NAME: OnceCell = OnceCell::new(); @@ -957,7 +957,7 @@ async fn nvmf_device_readv_iovs_test() { // Check that the callback has been called. check_callback_invocation(); - url = Arc::clone(&u); + url = u.clone(); ms.spawn(async move { let ctx = unsafe { Box::::from_raw(io_ctx.into_inner()) }; @@ -998,7 +998,7 @@ async fn nvmf_device_writev_iovs_test() { let ms = get_ms(); let (_test, dev_url) = launch_instance().await; let u = Arc::new(dev_url); - let url = Arc::clone(&u); + let url = u.clone(); // Clear callback invocation flag. clear_callback_invocation_flag(); @@ -1490,7 +1490,7 @@ async fn nvmf_reset_abort_io() { let ms = get_ms(); let (_test, dev_url) = launch_instance().await; let u = Arc::new(dev_url); - let mut url = Arc::clone(&u); + let mut url = u.clone(); // Placeholder structure to let all the fields outlive API invocations. struct IoCtx { @@ -1695,7 +1695,7 @@ async fn nvmf_reset_abort_io() { .await; // Once all handles are closed, destroy the device. - url = Arc::clone(&u); + url = u.clone(); ms.spawn(async move { device_destroy(&(*url)).await.unwrap(); }) diff --git a/mayastor/tests/common/bdev_io.rs b/mayastor/tests/common/bdev_io.rs index b21be8ea8..91ee7f78d 100644 --- a/mayastor/tests/common/bdev_io.rs +++ b/mayastor/tests/common/bdev_io.rs @@ -42,3 +42,14 @@ pub async fn read_some( } Ok(()) } + +pub async fn write_zeroes_some( + nexus_name: &str, + offset: u64, + len: u64, +) -> Result<(), CoreError> { + let h = BdevHandle::open(nexus_name, true, false)?; + + h.write_zeroes_at(offset, len).await?; + Ok(()) +} diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 98928ef77..044032fa1 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -1,6 +1,6 @@ pub use composer::*; -use crossbeam::crossbeam_channel::bounded; +use crossbeam::channel::bounded; use std::future::Future; use tokio::sync::oneshot::channel; diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index 8ac090ad0..8c6a2276f 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -393,12 +393,12 @@ pub fn compare_devices( stdout } -pub fn device_path_from_uri(device_uri: String) -> String { +pub fn device_path_from_uri(device_uri: &str) -> String { assert_ne!( - Url::parse(device_uri.as_str()), + Url::parse(device_uri), Err(ParseError::RelativeUrlWithoutBase) ); - let url = Url::parse(device_uri.as_str()).unwrap(); + let url = Url::parse(device_uri).unwrap(); String::from(url.path()) } diff --git a/mayastor/tests/core.rs b/mayastor/tests/core.rs index a25a71794..c72fded41 100644 --- a/mayastor/tests/core.rs +++ b/mayastor/tests/core.rs @@ -182,7 +182,7 @@ async fn core_4() { } else { nexus.add_child(BDEVNAME2, true).await.expect_err( &format!( - "Case {} - Child should have been added", + "Case {} - Child should not have been added", test_case_index ), ); @@ -230,7 +230,7 @@ async fn core_5() { .unwrap(); let nexus = nexus_lookup(nexus_name).unwrap(); let device = common::device_path_from_uri( - nexus + &nexus .share(ShareProtocolNexus::NexusNbd, None) .await .unwrap(), diff --git a/mayastor/tests/lvs_pool.rs b/mayastor/tests/lvs_pool.rs index 4f1ec7e2f..3506a5694 100644 --- a/mayastor/tests/lvs_pool.rs +++ b/mayastor/tests/lvs_pool.rs @@ -15,8 +15,8 @@ static DISKNAME2: &str = "/tmp/disk2.img"; #[tokio::test] async fn lvs_pool_test() { common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]); - common::truncate_file(DISKNAME1, 64 * 1024); - common::truncate_file(DISKNAME2, 64 * 1024); + common::truncate_file(DISKNAME1, 128 * 1024); + common::truncate_file(DISKNAME2, 128 * 1024); let args = MayastorCliArgs { reactor_mask: "0x3".into(), ..Default::default() @@ -123,9 +123,14 @@ async fn lvs_pool_test() { ms.spawn(async { let pool = Lvs::lookup("tpool").unwrap(); for i in 0 .. 10 { - pool.create_lvol(&format!("vol-{}", i), 4 * 1024, true) - .await - .unwrap(); + pool.create_lvol( + &format!("vol-{}", i), + 8 * 1024 * 1024, + None, + true, + ) + .await + .unwrap(); } assert_eq!(pool.lvols().unwrap().count(), 10); @@ -143,7 +148,12 @@ async fn lvs_pool_test() { for i in 0 .. 5 { pool2 - .create_lvol(&format!("pool2-vol-{}", i), 4 * 1024, false) + .create_lvol( + &format!("pool2-vol-{}", i), + 8 * 1024 * 1024, + None, + false, + ) .await .unwrap(); } @@ -204,7 +214,10 @@ async fn lvs_pool_test() { // test setting the share property that is stored on disk ms.spawn(async { let pool = Lvs::lookup("tpool").unwrap(); - let lvol = pool.create_lvol("vol-1", 1024 * 4, false).await.unwrap(); + let lvol = pool + .create_lvol("vol-1", 1024 * 1024 * 8, None, false) + .await + .unwrap(); lvol.set(PropValue::Shared(true)).await.unwrap(); assert_eq!( @@ -243,16 +256,23 @@ async fn lvs_pool_test() { let pool = Lvs::lookup("tpool").unwrap(); for i in 0 .. 10 { - pool.create_lvol(&format!("vol-{}", i), 4 * 1024, true) - .await - .unwrap(); + pool.create_lvol( + &format!("vol-{}", i), + 8 * 1024 * 1024, + None, + true, + ) + .await + .unwrap(); } for l in pool.lvols().unwrap() { l.share_nvmf(None).await.unwrap(); } - pool.create_lvol("notshared", 4 * 1024, true).await.unwrap(); + pool.create_lvol("notshared", 8 * 1024 * 1024, None, true) + .await + .unwrap(); pool.export().await.unwrap(); }) diff --git a/mayastor/tests/lvs_pool_rpc.rs b/mayastor/tests/lvs_pool_rpc.rs deleted file mode 100644 index 39bed762f..000000000 --- a/mayastor/tests/lvs_pool_rpc.rs +++ /dev/null @@ -1,139 +0,0 @@ -use rpc::mayastor::{ - CreatePoolRequest, - CreateReplicaRequest, - DestroyPoolRequest, - DestroyReplicaRequest, - Null, - ShareReplicaRequest, -}; - -pub mod common; -use common::compose::Builder; - -#[tokio::test] -async fn lvs_pool_rpc() { - let test = Builder::new() - .name("lvs-pool-grpc") - .with_clean(true) - .network("10.1.0.0/16") - .add_container("ms1") - .build() - .await - .unwrap(); - - // testing basic rpc methods - let mut handles = test.grpc_handles().await.unwrap(); - let gdl = handles.get_mut(0).unwrap(); - - // create a pool - gdl.mayastor - .create_pool(CreatePoolRequest { - name: "tpool".to_string(), - disks: vec!["malloc:///disk0?size_mb=64".into()], - }) - .await - .unwrap(); - - gdl.mayastor - .create_pool(CreatePoolRequest { - name: "tpool".to_string(), - disks: vec!["malloc:///disk0?size_mb=64".into()], - }) - .await - .unwrap(); - //list the pool - let list = gdl.mayastor.list_pools(Null {}).await.unwrap(); - - assert_eq!(list.into_inner().pools.len(), 1); - - // create replica not shared - gdl.mayastor - .create_replica(CreateReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), - pool: "tpool".to_string(), - size: 4 * 1024, - thin: false, - share: 0, - }) - .await - .unwrap(); - - // should succeed - gdl.mayastor - .create_replica(CreateReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), - pool: "tpool".to_string(), - size: 4 * 1024, - thin: false, - share: 0, - }) - .await - .unwrap(); - - // share replica - gdl.mayastor - .share_replica(ShareReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), - share: 1, - }) - .await - .unwrap(); - - // share again, should succeed - gdl.mayastor - .share_replica(ShareReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), - share: 1, - }) - .await - .unwrap(); - - // assert we are shared - assert!(gdl - .mayastor - .list_replicas(Null {}) - .await - .unwrap() - .into_inner() - .replicas[0] - .uri - .contains("nvmf://")); - - // unshare it - gdl.mayastor - .share_replica(ShareReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), - share: 0, - }) - .await - .unwrap(); - - // assert we are not shared - assert!(gdl - .mayastor - .list_replicas(Null {}) - .await - .unwrap() - .into_inner() - .replicas[0] - .uri - .contains("bdev://")); - - // destroy the replica - gdl.mayastor - .destroy_replica(DestroyReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), - }) - .await - .unwrap(); - - // destroy the pool - gdl.mayastor - .destroy_pool(DestroyPoolRequest { - name: "tpool".to_string(), - }) - .await - .unwrap(); - - test.logs("ms1").await.unwrap(); -} diff --git a/mayastor/tests/nexus_child_location.rs b/mayastor/tests/nexus_child_location.rs index d9bf1cff2..9d7e8e81a 100644 --- a/mayastor/tests/nexus_child_location.rs +++ b/mayastor/tests/nexus_child_location.rs @@ -42,28 +42,11 @@ async fn child_location() { .await .unwrap(); - // Create and share a bdev over iscsi - hdls[0] - .bdev - .create(BdevUri { - uri: "malloc:///disk1?size_mb=100".into(), - }) - .await - .unwrap(); - hdls[0] - .bdev - .share(BdevShareRequest { - name: "disk1".into(), - proto: "iscsi".into(), - }) - .await - .unwrap(); - let mayastor = MayastorTest::new(MayastorCliArgs::default()); mayastor .spawn(async move { - // Create a nexus with a local child, and two remote children (one - // exported over nvmf and the other over iscsi). + // Create a nexus with a local child, and one remote child + // (exported over nvmf). nexus_create( NEXUS_NAME, 1024 * 1024 * 50, @@ -74,10 +57,6 @@ async fn child_location() { "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", hdls[0].endpoint.ip() ), - format!( - "iscsi://{}:3260/iqn.2019-05.io.openebs:disk1/0", - hdls[0].endpoint.ip() - ), ], ) .await @@ -85,10 +64,9 @@ async fn child_location() { let nexus = nexus_lookup(NEXUS_NAME).expect("Failed to find nexus"); let children = &nexus.children; - assert_eq!(children.len(), 3); + assert_eq!(children.len(), 2); assert!(children[0].is_local().unwrap()); assert!(!children[1].is_local().unwrap()); - assert!(!children[2].is_local().unwrap()); }) .await; } diff --git a/mayastor/tests/nexus_children_add_remove.rs b/mayastor/tests/nexus_children_add_remove.rs index 2ca4c7a3d..0f1dcf6d0 100644 --- a/mayastor/tests/nexus_children_add_remove.rs +++ b/mayastor/tests/nexus_children_add_remove.rs @@ -78,6 +78,33 @@ async fn remove_children_from_nexus() { .await .expect("failed to remove child from nexus"); + ms.spawn(async { + let nexus = + nexus_lookup("remove_from_nexus").expect("nexus is not found!"); + nexus.remove_child(&format!("uring:///{}", DISKNAME2)).await + }) + .await + .expect_err("cannot remove the last child from nexus"); + + // add new child but don't rebuild, so it's not healthy! + ms.spawn(async { + let nexus = + nexus_lookup("remove_from_nexus").expect("nexus is not found!"); + nexus + .add_child(&format!("uring:///{}", DISKNAME1), true) + .await + }) + .await + .expect("should be able to add a child back"); + + ms.spawn(async { + let nexus = + nexus_lookup("remove_from_nexus").expect("nexus is not found!"); + nexus.remove_child(&format!("uring:///{}", DISKNAME2)).await + }) + .await + .expect_err("cannot remove the last healthy child from nexus"); + // destroy it ms.spawn(async { let nexus = diff --git a/mayastor/tests/nexus_io.rs b/mayastor/tests/nexus_io.rs new file mode 100644 index 000000000..58a749227 --- /dev/null +++ b/mayastor/tests/nexus_io.rs @@ -0,0 +1,608 @@ +//! Nexus IO tests for multipath NVMf, reservation, and write-zeroes +use common::bdev_io; +use mayastor::{ + bdev::{nexus_create, nexus_create_v2, nexus_lookup, NexusNvmeParams}, + core::MayastorCliArgs, + lvs::Lvs, +}; +use once_cell::sync::OnceCell; +use rpc::mayastor::{ + CreateNexusRequest, + CreateNexusV2Request, + CreatePoolRequest, + CreateReplicaRequest, + DestroyNexusRequest, + Null, + NvmeAnaState, + PublishNexusRequest, + ShareProtocolNexus, +}; +use std::process::{Command, ExitStatus}; + +pub mod common; +use common::{compose::Builder, MayastorTest}; + +static POOL_NAME: &str = "tpool"; +static NXNAME: &str = "nexus0"; +static UUID: &str = "cdc2a7db-3ac3-403a-af80-7fadc1581c47"; +static HOSTNQN: &str = "nqn.2019-05.io.openebs"; +static HOSTID0: &str = "53b35ce9-8e71-49a9-ab9b-cba7c5670fad"; +static HOSTID1: &str = "c1affd2d-ef79-4ba4-b5cf-8eb48f9c07d0"; + +static DISKNAME1: &str = "/tmp/disk1.img"; +static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; +static DISKNAME2: &str = "/tmp/disk2.img"; +static BDEVNAME2: &str = "aio:///host/tmp/disk2.img?blk_size=512"; + +static MAYASTOR: OnceCell = OnceCell::new(); + +fn get_ms() -> &'static MayastorTest<'static> { + MAYASTOR.get_or_init(|| MayastorTest::new(MayastorCliArgs::default())) +} + +fn nvme_connect( + target_addr: &str, + nqn: &str, + must_succeed: bool, +) -> ExitStatus { + let status = Command::new("nvme") + .args(&["connect"]) + .args(&["-t", "tcp"]) + .args(&["-a", target_addr]) + .args(&["-s", "8420"]) + .args(&["-n", nqn]) + .status() + .unwrap(); + + if !status.success() { + let msg = format!( + "failed to connect to {}, nqn {}: {}", + target_addr, nqn, status, + ); + if must_succeed { + panic!("{}", msg); + } else { + eprintln!("{}", msg); + } + } else { + std::thread::sleep(std::time::Duration::from_secs(1)); + } + + status +} + +fn get_mayastor_nvme_device() -> String { + let output_list = Command::new("nvme").args(&["list"]).output().unwrap(); + assert!( + output_list.status.success(), + "failed to list nvme devices, {}", + output_list.status + ); + let sl = String::from_utf8(output_list.stdout).unwrap(); + let nvmems: Vec<&str> = sl + .lines() + .filter(|line| line.contains("Mayastor NVMe controller")) + .collect(); + assert_eq!(nvmems.len(), 1); + let ns = nvmems[0].split(' ').collect::>()[0]; + ns.to_string() +} + +fn get_nvme_resv_report(nvme_dev: &str) -> serde_json::Value { + let output_resv = Command::new("nvme") + .args(&["resv-report"]) + .args(&[nvme_dev]) + .args(&["-c", "1"]) + .args(&["-o", "json"]) + .output() + .unwrap(); + assert!( + output_resv.status.success(), + "failed to get reservation report from {}: {}", + nvme_dev, + output_resv.status + ); + let resv_rep = String::from_utf8(output_resv.stdout).unwrap(); + let v: serde_json::Value = + serde_json::from_str(&resv_rep).expect("JSON was not well-formatted"); + v +} + +fn nvme_disconnect_nqn(nqn: &str) { + let output_dis = Command::new("nvme") + .args(&["disconnect"]) + .args(&["-n", nqn]) + .output() + .unwrap(); + assert!( + output_dis.status.success(), + "failed to disconnect from {}: {}", + nqn, + output_dis.status + ); +} + +#[tokio::test] +#[ignore] +/// Create the same nexus on both nodes with a replica on 1 node as their child. +async fn nexus_io_multipath() { + std::env::set_var("NEXUS_NVMF_ANA_ENABLE", "1"); + std::env::set_var("NEXUS_NVMF_RESV_ENABLE", "1"); + // create a new composeTest + let test = Builder::new() + .name("nexus_shared_replica_test") + .network("10.1.0.0/16") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); + + let mut hdls = test.grpc_handles().await.unwrap(); + + // create a pool on remote node + hdls[0] + .mayastor + .create_pool(CreatePoolRequest { + name: POOL_NAME.to_string(), + disks: vec!["malloc:///disk0?size_mb=64".into()], + }) + .await + .unwrap(); + + // create replica, shared over nvmf + hdls[0] + .mayastor + .create_replica(CreateReplicaRequest { + uuid: UUID.to_string(), + pool: POOL_NAME.to_string(), + size: 32 * 1024 * 1024, + thin: false, + share: 1, + }) + .await + .unwrap(); + + // create nexus on remote node with local replica as child + hdls[0] + .mayastor + .create_nexus(CreateNexusRequest { + uuid: UUID.to_string(), + size: 32 * 1024 * 1024, + children: [format!("loopback:///{}", UUID)].to_vec(), + }) + .await + .unwrap(); + + let mayastor = get_ms(); + let ip0 = hdls[0].endpoint.ip(); + let nexus_name = format!("nexus-{}", UUID); + let name = nexus_name.clone(); + mayastor + .spawn(async move { + // create nexus on local node with remote replica as child + nexus_create( + &name, + 32 * 1024 * 1024, + Some(UUID), + &[format!("nvmf://{}:8420/{}:{}", ip0, HOSTNQN, UUID)], + ) + .await + .unwrap(); + // publish nexus on local node over nvmf + nexus_lookup(&name) + .unwrap() + .share(ShareProtocolNexus::NexusNvmf, None) + .await + .unwrap(); + }) + .await; + + // publish nexus on other node + hdls[0] + .mayastor + .publish_nexus(PublishNexusRequest { + uuid: UUID.to_string(), + key: "".to_string(), + share: ShareProtocolNexus::NexusNvmf as i32, + }) + .await + .unwrap(); + + let nqn = format!("{}:nexus-{}", HOSTNQN, UUID); + nvme_connect("127.0.0.1", &nqn, true); + + // The first attempt will fail with "Duplicate cntlid x with y" error from + // kernel + for i in 0 .. 2 { + let status_c0 = nvme_connect(&ip0.to_string(), &nqn, false); + if i == 0 && status_c0.success() { + break; + } + assert!( + status_c0.success() || i != 1, + "failed to connect to remote nexus, {}", + status_c0 + ); + } + + let ns = get_mayastor_nvme_device(); + + mayastor + .spawn(async move { + // set nexus on local node ANA state to non-optimized + nexus_lookup(&nexus_name) + .unwrap() + .set_ana_state(NvmeAnaState::NvmeAnaNonOptimizedState) + .await + .unwrap(); + }) + .await; + + // +- nvme0 tcp traddr=127.0.0.1 trsvcid=8420 live + let output_subsys = Command::new("nvme") + .args(&["list-subsys"]) + .args(&[ns]) + .output() + .unwrap(); + assert!( + output_subsys.status.success(), + "failed to list nvme subsystem, {}", + output_subsys.status + ); + let subsys = String::from_utf8(output_subsys.stdout).unwrap(); + let nvmec: Vec<&str> = subsys + .lines() + .filter(|line| line.contains("traddr=127.0.0.1")) + .collect(); + assert_eq!(nvmec.len(), 1); + let nv: Vec<&str> = nvmec[0].split(' ').collect(); + assert_eq!(nv[7], "non-optimized", "incorrect ANA state"); + + // NQN: disconnected 2 controller(s) + let output_dis = Command::new("nvme") + .args(&["disconnect"]) + .args(&["-n", &nqn]) + .output() + .unwrap(); + assert!( + output_dis.status.success(), + "failed to disconnect from nexuses, {}", + output_dis.status + ); + let s = String::from_utf8(output_dis.stdout).unwrap(); + let v: Vec<&str> = s.split(' ').collect(); + tracing::info!("nvme disconnected: {:?}", v); + assert_eq!(v.len(), 4); + assert_eq!(v[1], "disconnected"); + assert_eq!(v[0], format!("NQN:{}", &nqn), "mismatched NQN disconnected"); + assert_eq!(v[2], "2", "mismatched number of controllers disconnected"); + + // Connect to remote replica to check key registered + let rep_nqn = format!("{}:{}", HOSTNQN, UUID); + nvme_connect(&ip0.to_string(), &rep_nqn, true); + + let rep_dev = get_mayastor_nvme_device(); + + let v = get_nvme_resv_report(&rep_dev); + assert_eq!(v["rtype"], 0, "should have no reservation type"); + assert_eq!(v["regctl"], 1, "should have 1 registered controller"); + assert_eq!( + v["ptpls"], 0, + "should have Persist Through Power Loss State as 0" + ); + assert_eq!( + v["regctlext"][0]["cntlid"], 0xffff, + "should have dynamic controller ID" + ); + assert_eq!( + v["regctlext"][0]["rcsts"], 0, + "should have reservation status as no reservation" + ); + assert_eq!( + v["regctlext"][0]["rkey"], 0x12345678, + "should have default registered key" + ); + + nvme_disconnect_nqn(&rep_nqn); + + // destroy nexus on remote node + hdls[0] + .mayastor + .destroy_nexus(DestroyNexusRequest { + uuid: UUID.to_string(), + }) + .await + .unwrap(); + + // verify that the replica is still shared over nvmf + assert!(hdls[0] + .mayastor + .list_replicas(Null {}) + .await + .unwrap() + .into_inner() + .replicas[0] + .uri + .contains("nvmf://")); +} + +#[tokio::test] +/// Create a nexus with a remote replica on 1 node as its child. +/// Create another nexus with the same remote replica as its child, verifying +/// that the write exclusive, all registrants reservation has also been +/// registered by the new nexus. +async fn nexus_io_resv_acquire() { + std::env::set_var("NEXUS_NVMF_RESV_ENABLE", "1"); + std::env::set_var("MAYASTOR_NVMF_HOSTID", HOSTID0); + let test = Builder::new() + .name("nexus_resv_acquire_test") + .network("10.1.0.0/16") + .add_container_bin( + "ms2", + composer::Binary::from_dbg("mayastor") + .with_env("NEXUS_NVMF_RESV_ENABLE", "1") + .with_env("MAYASTOR_NVMF_HOSTID", HOSTID1), + ) + .add_container_bin( + "ms1", + composer::Binary::from_dbg("mayastor") + .with_env("NEXUS_NVMF_RESV_ENABLE", "1") + .with_env("MAYASTOR_NVMF_HOSTID", HOSTID1), + ) + .with_clean(true) + .build() + .await + .unwrap(); + + let mut hdls = test.grpc_handles().await.unwrap(); + + // create a pool on remote node 1 + // grpc handles can be returned in any order, we simply define the first + // as "node 1" + hdls[0] + .mayastor + .create_pool(CreatePoolRequest { + name: POOL_NAME.to_string(), + disks: vec!["malloc:///disk0?size_mb=64".into()], + }) + .await + .unwrap(); + + // create replica, shared over nvmf + hdls[0] + .mayastor + .create_replica(CreateReplicaRequest { + uuid: UUID.to_string(), + pool: POOL_NAME.to_string(), + size: 32 * 1024 * 1024, + thin: false, + share: 1, + }) + .await + .unwrap(); + + let mayastor = get_ms(); + let ip0 = hdls[0].endpoint.ip(); + let resv_key = 0xabcd_ef00_1234_5678; + mayastor + .spawn(async move { + let mut nvme_params = NexusNvmeParams::default(); + nvme_params.set_resv_key(resv_key); + // create nexus on local node with remote replica as child + nexus_create_v2( + &NXNAME.to_string(), + 32 * 1024 * 1024, + UUID, + nvme_params, + &[format!("nvmf://{}:8420/{}:{}", ip0, HOSTNQN, UUID)], + ) + .await + .unwrap(); + bdev_io::write_some(&NXNAME.to_string(), 0, 0xff) + .await + .unwrap(); + bdev_io::read_some(&NXNAME.to_string(), 0, 0xff) + .await + .unwrap(); + }) + .await; + + // Connect to remote replica to check key registered + let rep_nqn = format!("{}:{}", HOSTNQN, UUID); + nvme_connect(&ip0.to_string(), &rep_nqn, true); + + let rep_dev = get_mayastor_nvme_device(); + + let v = get_nvme_resv_report(&rep_dev); + assert_eq!( + v["rtype"], 5, + "should have write exclusive, all registrants reservation" + ); + assert_eq!(v["regctl"], 1, "should have 1 registered controller"); + assert_eq!( + v["ptpls"], 0, + "should have Persist Through Power Loss State as 0" + ); + assert_eq!( + v["regctlext"][0]["cntlid"], 0xffff, + "should have dynamic controller ID" + ); + assert_eq!( + v["regctlext"][0]["rcsts"], 1, + "should have reservation status as reserved" + ); + assert_eq!( + v["regctlext"][0]["hostid"].as_str().unwrap(), + HOSTID0.to_string().replace("-", ""), + "should match host ID of NVMe client" + ); + assert_eq!( + v["regctlext"][0]["rkey"], resv_key, + "should have configured registered key" + ); + + // create nexus on remote node 2 with replica on node 1 as child + let resv_key2 = 0xfeed_f00d_bead_5678; + hdls[1] + .mayastor + .create_nexus_v2(CreateNexusV2Request { + name: NXNAME.to_string(), + uuid: UUID.to_string(), + size: 32 * 1024 * 1024, + min_cntl_id: 1, + max_cntl_id: 0xffef, + resv_key: resv_key2, + preempt_key: 0, + children: [format!("nvmf://{}:8420/{}:{}", ip0, HOSTNQN, UUID)] + .to_vec(), + }) + .await + .unwrap(); + + // Verify that the second nexus has registered + let v2 = get_nvme_resv_report(&rep_dev); + assert_eq!( + v2["rtype"], 5, + "should have write exclusive, all registrants reservation" + ); + assert_eq!(v2["regctl"], 2, "should have 2 registered controllers"); + assert_eq!( + v2["ptpls"], 0, + "should have Persist Through Power Loss State as 0" + ); + assert_eq!( + v2["regctlext"][1]["cntlid"], 0xffff, + "should have dynamic controller ID" + ); + assert_eq!( + v2["regctlext"][1]["rcsts"].as_u64().unwrap() & 0x1, + 0, + "should have reservation status as not reserved" + ); + assert_eq!( + v2["regctlext"][1]["rkey"], resv_key2, + "should have configured registered key" + ); + assert_eq!( + v2["regctlext"][1]["hostid"].as_str().unwrap(), + HOSTID1.to_string().replace("-", ""), + "should match host ID of NVMe client" + ); + + mayastor + .spawn(async move { + bdev_io::write_some(&NXNAME.to_string(), 0, 0xff) + .await + .expect("writes should still succeed"); + bdev_io::read_some(&NXNAME.to_string(), 0, 0xff) + .await + .expect("reads should succeed"); + + nexus_lookup(&NXNAME.to_string()) + .unwrap() + .destroy() + .await + .unwrap(); + }) + .await; + + nvme_disconnect_nqn(&rep_nqn); +} + +#[tokio::test] +/// Create a nexus with a local and a remote replica. +/// Verify that write-zeroes does actually write zeroes. +async fn nexus_io_write_zeroes() { + common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]); + common::truncate_file(DISKNAME1, 64 * 1024); + common::truncate_file(DISKNAME2, 64 * 1024); + + let test = Builder::new() + .name("nexus_io_write_zeroes_test") + .network("10.1.0.0/16") + .add_container_bin( + "ms1", + composer::Binary::from_dbg("mayastor") + .with_bind("/tmp", "/host/tmp"), + ) + .with_clean(true) + .build() + .await + .unwrap(); + + let mut hdls = test.grpc_handles().await.unwrap(); + + // create a pool on remote node + hdls[0] + .mayastor + .create_pool(CreatePoolRequest { + name: POOL_NAME.to_string(), + disks: vec![BDEVNAME2.to_string()], + }) + .await + .unwrap(); + + // create replica, shared over nvmf + hdls[0] + .mayastor + .create_replica(CreateReplicaRequest { + uuid: UUID.to_string(), + pool: POOL_NAME.to_string(), + size: 32 * 1024 * 1024, + thin: false, + share: 1, + }) + .await + .unwrap(); + + let mayastor = get_ms(); + let ip0 = hdls[0].endpoint.ip(); + let nexus_name = format!("nexus-{}", UUID); + let name = nexus_name.clone(); + mayastor + .spawn(async move { + // Create local pool and replica + Lvs::create_or_import(CreatePoolRequest { + name: POOL_NAME.to_string(), + disks: vec![BDEVNAME1.to_string()], + }) + .await + .unwrap(); + + let pool = Lvs::lookup(POOL_NAME).unwrap(); + pool.create_lvol(&UUID.to_string(), 32 * 1024 * 1024, None, true) + .await + .unwrap(); + + // create nexus on local node with 2 children, local and remote + nexus_create( + &name, + 32 * 1024 * 1024, + Some(UUID), + &[ + format!("loopback:///{}", UUID), + format!("nvmf://{}:8420/{}:{}", ip0, HOSTNQN, UUID), + ], + ) + .await + .unwrap(); + + bdev_io::write_some(&name, 0, 0xff).await.unwrap(); + // Read twice to ensure round-robin read from both replicas + bdev_io::read_some(&name, 0, 0xff) + .await + .expect("read should return block of 0xff"); + bdev_io::read_some(&name, 0, 0xff) + .await + .expect("read should return block of 0xff"); + bdev_io::write_zeroes_some(&name, 0, 512).await.unwrap(); + bdev_io::read_some(&name, 0, 0) + .await + .expect("read should return block of 0"); + bdev_io::read_some(&name, 0, 0) + .await + .expect("read should return block of 0"); + }) + .await; +} diff --git a/mayastor/tests/nexus_multipath.rs b/mayastor/tests/nexus_multipath.rs deleted file mode 100644 index 3c8eb062d..000000000 --- a/mayastor/tests/nexus_multipath.rs +++ /dev/null @@ -1,297 +0,0 @@ -//! Multipath NVMf tests -//! Create the same nexus on both nodes with a replica on 1 node as their child. -use mayastor::{ - bdev::{nexus_create, nexus_lookup}, - core::MayastorCliArgs, -}; -use rpc::mayastor::{ - CreateNexusRequest, - CreatePoolRequest, - CreateReplicaRequest, - DestroyNexusRequest, - Null, - NvmeAnaState, - PublishNexusRequest, - ShareProtocolNexus, -}; -use std::process::Command; - -pub mod common; -use common::{compose::Builder, MayastorTest}; - -static POOL_NAME: &str = "tpool"; -static UUID: &str = "cdc2a7db-3ac3-403a-af80-7fadc1581c47"; -static HOSTNQN: &str = "nqn.2019-05.io.openebs"; - -fn get_mayastor_nvme_device() -> String { - let output_list = Command::new("nvme").args(&["list"]).output().unwrap(); - assert!( - output_list.status.success(), - "failed to list nvme devices, {}", - output_list.status - ); - let sl = String::from_utf8(output_list.stdout).unwrap(); - let nvmems: Vec<&str> = sl - .lines() - .filter(|line| line.contains("Mayastor NVMe controller")) - .collect(); - assert_eq!(nvmems.len(), 1); - let ns = nvmems[0].split(' ').collect::>()[0]; - ns.to_string() -} - -#[tokio::test] -async fn nexus_multipath() { - std::env::set_var("NEXUS_NVMF_ANA_ENABLE", "1"); - // create a new composeTest - let test = Builder::new() - .name("nexus_shared_replica_test") - .network("10.1.0.0/16") - .add_container("ms1") - .with_clean(true) - .build() - .await - .unwrap(); - - let mut hdls = test.grpc_handles().await.unwrap(); - - // create a pool on remote node - hdls[0] - .mayastor - .create_pool(CreatePoolRequest { - name: POOL_NAME.to_string(), - disks: vec!["malloc:///disk0?size_mb=64".into()], - }) - .await - .unwrap(); - - // create replica, shared over nvmf - hdls[0] - .mayastor - .create_replica(CreateReplicaRequest { - uuid: UUID.to_string(), - pool: POOL_NAME.to_string(), - size: 32 * 1024 * 1024, - thin: false, - share: 1, - }) - .await - .unwrap(); - - // create nexus on remote node with local replica as child - hdls[0] - .mayastor - .create_nexus(CreateNexusRequest { - uuid: UUID.to_string(), - size: 32 * 1024 * 1024, - children: [format!("loopback:///{}", UUID)].to_vec(), - }) - .await - .unwrap(); - - let mayastor = MayastorTest::new(MayastorCliArgs::default()); - let ip0 = hdls[0].endpoint.ip(); - let nexus_name = format!("nexus-{}", UUID); - let name = nexus_name.clone(); - mayastor - .spawn(async move { - // create nexus on local node with remote replica as child - nexus_create( - &name, - 32 * 1024 * 1024, - Some(UUID), - &[format!("nvmf://{}:8420/{}:{}", ip0, HOSTNQN, UUID)], - ) - .await - .unwrap(); - // publish nexus on local node over nvmf - nexus_lookup(&name) - .unwrap() - .share(ShareProtocolNexus::NexusNvmf, None) - .await - .unwrap(); - }) - .await; - - // publish nexus on other node - hdls[0] - .mayastor - .publish_nexus(PublishNexusRequest { - uuid: UUID.to_string(), - key: "".to_string(), - share: ShareProtocolNexus::NexusNvmf as i32, - }) - .await - .unwrap(); - - let nqn = format!("{}:nexus-{}", HOSTNQN, UUID); - let status = Command::new("nvme") - .args(&["connect"]) - .args(&["-t", "tcp"]) - .args(&["-a", "127.0.0.1"]) - .args(&["-s", "8420"]) - .args(&["-n", &nqn]) - .status() - .unwrap(); - assert!( - status.success(), - "failed to connect to local nexus, {}", - status - ); - - // The first attempt will fail with "Duplicate cntlid x with y" error from - // kernel - for i in 0 .. 2 { - let status_c0 = Command::new("nvme") - .args(&["connect"]) - .args(&["-t", "tcp"]) - .args(&["-a", &ip0.to_string()]) - .args(&["-s", "8420"]) - .args(&["-n", &nqn]) - .status() - .unwrap(); - if i == 0 && status_c0.success() { - break; - } - assert!( - status_c0.success() || i != 1, - "failed to connect to remote nexus, {}", - status_c0 - ); - } - - let ns = get_mayastor_nvme_device(); - - mayastor - .spawn(async move { - // set nexus on local node ANA state to non-optimized - nexus_lookup(&nexus_name) - .unwrap() - .set_ana_state(NvmeAnaState::NvmeAnaNonOptimizedState) - .await - .unwrap(); - }) - .await; - - // +- nvme0 tcp traddr=127.0.0.1 trsvcid=8420 live - let output_subsys = Command::new("nvme") - .args(&["list-subsys"]) - .args(&[ns]) - .output() - .unwrap(); - assert!( - output_subsys.status.success(), - "failed to list nvme subsystem, {}", - output_subsys.status - ); - let subsys = String::from_utf8(output_subsys.stdout).unwrap(); - let nvmec: Vec<&str> = subsys - .lines() - .filter(|line| line.contains("traddr=127.0.0.1")) - .collect(); - assert_eq!(nvmec.len(), 1); - let nv: Vec<&str> = nvmec[0].split(' ').collect(); - assert_eq!(nv[7], "non-optimized", "incorrect ANA state"); - - // NQN: disconnected 2 controller(s) - let output_dis = Command::new("nvme") - .args(&["disconnect"]) - .args(&["-n", &nqn]) - .output() - .unwrap(); - assert!( - output_dis.status.success(), - "failed to disconnect from nexuses, {}", - output_dis.status - ); - let s = String::from_utf8(output_dis.stdout).unwrap(); - let v: Vec<&str> = s.split(' ').collect(); - tracing::info!("nvme disconnected: {:?}", v); - assert_eq!(v.len(), 4); - assert_eq!(v[1], "disconnected"); - assert_eq!(v[0], format!("NQN:{}", &nqn), "mismatched NQN disconnected"); - assert_eq!(v[2], "2", "mismatched number of controllers disconnected"); - - // Connect to remote replica to check key registered - let rep_nqn = format!("{}:{}", HOSTNQN, UUID); - let status = Command::new("nvme") - .args(&["connect"]) - .args(&["-t", "tcp"]) - .args(&["-a", &ip0.to_string()]) - .args(&["-s", "8420"]) - .args(&["-n", &rep_nqn]) - .status() - .unwrap(); - assert!( - status.success(), - "failed to connect to remote replica, {}", - status - ); - - let rep_dev = get_mayastor_nvme_device(); - - let output_resv = Command::new("nvme") - .args(&["resv-report"]) - .args(&[rep_dev]) - .args(&["-c", "1"]) - .args(&["-o", "json"]) - .output() - .unwrap(); - assert!( - output_resv.status.success(), - "failed to get reservation report from remote replica, {}", - output_resv.status - ); - let resv_rep = String::from_utf8(output_resv.stdout).unwrap(); - let v: serde_json::Value = - serde_json::from_str(&resv_rep).expect("JSON was not well-formatted"); - assert_eq!(v["rtype"], 0, "should have no reservation type"); - assert_eq!(v["regctl"], 1, "should have 1 registered controller"); - assert_eq!( - v["ptpls"], 0, - "should have Persist Through Power Loss State as 0" - ); - assert_eq!( - v["regctlext"][0]["cntlid"], 0xffff, - "should have dynamic controller ID" - ); - assert_eq!( - v["regctlext"][0]["rcsts"], 0, - "should have reservation status as no reservation" - ); - assert_eq!( - v["regctlext"][0]["rkey"], 0x12345678, - "should have default registered key" - ); - - let output_dis2 = Command::new("nvme") - .args(&["disconnect"]) - .args(&["-n", &rep_nqn]) - .output() - .unwrap(); - assert!( - output_dis2.status.success(), - "failed to disconnect from remote replica, {}", - output_dis2.status - ); - - // destroy nexus on remote node - hdls[0] - .mayastor - .destroy_nexus(DestroyNexusRequest { - uuid: UUID.to_string(), - }) - .await - .unwrap(); - - // verify that the replica is still shared over nvmf - assert!(hdls[0] - .mayastor - .list_replicas(Null {}) - .await - .unwrap() - .into_inner() - .replicas[0] - .uri - .contains("nvmf://")); -} diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index ed31edbb4..ae3836095 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -103,7 +103,7 @@ async fn nexus_create(size: u64, children: u64, fill_random: bool) { async fn nexus_share() -> String { let nexus = nexus_lookup(nexus_name()).unwrap(); let device = common::device_path_from_uri( - nexus + &nexus .share(ShareProtocolNexus::NexusNbd, None) .await .unwrap(), diff --git a/mayastor/tests/persistence.rs b/mayastor/tests/persistence.rs index 9ebcb984f..af9b22889 100644 --- a/mayastor/tests/persistence.rs +++ b/mayastor/tests/persistence.rs @@ -3,6 +3,7 @@ use common::compose::Builder; use composer::{Binary, ComposeTest, ContainerSpec, RpcHandle}; use etcd_client::Client; use rpc::mayastor::{ + AddChildNexusRequest, BdevShareRequest, BdevUri, Child, @@ -14,6 +15,8 @@ use rpc::mayastor::{ NexusState, Null, PublishNexusRequest, + RebuildStateRequest, + RemoveChildNexusRequest, ShareProtocolNexus, }; @@ -27,6 +30,7 @@ pub mod common; static ETCD_ENDPOINT: &str = "0.0.0.0:2379"; static CHILD1_UUID: &str = "d61b2fdf-1be8-457a-a481-70a42d0a2223"; static CHILD2_UUID: &str = "094ae8c6-46aa-4139-b4f2-550d39645db3"; +static CHILD3_UUID: &str = "ae09c08f-8909-4024-a9ae-c21a2a0596b9"; /// This test checks that when an unexpected restart occurs, all persisted info /// remains unchanged. In particular, the clean shutdown variable must be false. @@ -147,6 +151,7 @@ async fn persist_io_failure() { let ms1 = &mut test.grpc_handle("ms1").await.unwrap(); let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); + let ms4 = &mut test.grpc_handle("ms4").await.unwrap(); // Create bdevs and share over nvmf. let child1 = create_and_share_bdevs(ms2, CHILD1_UUID).await; @@ -214,6 +219,74 @@ async fn persist_io_failure() { // Expect child2 to be faulted due to an I/O error. let child = child_info(&nexus_info, &uuid(&child2)); assert!(!child.healthy); + + // Create new child and add to nexus + let child3 = create_and_share_bdevs(ms4, CHILD3_UUID).await; + + add_child_nexus(ms1, nexus_uuid, &child3, false).await; + + // Expect child3 to be degraded + assert_eq!( + get_nexus_state(ms1, nexus_uuid).await.unwrap(), + NexusState::NexusDegraded as i32 + ); + assert_eq!( + get_child(ms1, nexus_uuid, &child3).await.state, + ChildState::ChildDegraded as i32 + ); + + let response = etcd.get(nexus_uuid, None).await.expect("No entry found"); + let value = response.kvs().first().unwrap().value(); + let nexus_info: NexusInfo = serde_json::from_slice(value).unwrap(); + let child = child_info(&nexus_info, &uuid(&child3)); + assert!(!child.healthy); + + // Wait for rebuild to complete. + loop { + let replica_name = child3.to_string(); + let complete = match ms1 + .mayastor + .get_rebuild_state(RebuildStateRequest { + uuid: nexus_uuid.to_string(), + uri: replica_name, + }) + .await + { + Err(_e) => true, // Rebuild task completed and was removed + Ok(r) => r.into_inner().state == "complete", + }; + + if complete { + break; + } else { + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + } + + // Expect child3 to be healthy once rebuild completes + assert_eq!( + get_nexus_state(ms1, nexus_uuid).await.unwrap(), + NexusState::NexusDegraded as i32 + ); + assert_eq!( + get_child(ms1, nexus_uuid, &child3).await.state, + ChildState::ChildOnline as i32 + ); + + let response = etcd.get(nexus_uuid, None).await.expect("No entry found"); + let value = response.kvs().first().unwrap().value(); + let nexus_info: NexusInfo = serde_json::from_slice(value).unwrap(); + let child = child_info(&nexus_info, &uuid(&child3)); + assert!(child.healthy); + + // Remove child3 and verify that it is unhealthy + remove_child_nexus(ms1, nexus_uuid, &child3).await; + + let response = etcd.get(nexus_uuid, None).await.expect("No entry found"); + let value = response.kvs().first().unwrap().value(); + let nexus_info: NexusInfo = serde_json::from_slice(value).unwrap(); + let child = child_info(&nexus_info, &uuid(&child3)); + assert!(!child.healthy); } /// This test checks the behaviour when a connection to the persistent store is @@ -290,6 +363,10 @@ async fn start_infrastructure(test_name: &str) -> ComposeTest { "ms3", Binary::from_dbg("mayastor").with_args(vec!["-p", &etcd_endpoint]), ) + .add_container_bin( + "ms4", + Binary::from_dbg("mayastor").with_args(vec!["-p", &etcd_endpoint]), + ) .build() .await .unwrap(); @@ -309,6 +386,32 @@ async fn create_nexus(hdl: &mut RpcHandle, uuid: &str, children: Vec) { .expect("Failed to create nexus."); } +async fn add_child_nexus( + hdl: &mut RpcHandle, + uuid: &str, + child: &str, + norebuild: bool, +) { + hdl.mayastor + .add_child_nexus(AddChildNexusRequest { + uuid: uuid.to_string(), + uri: child.to_string(), + norebuild, + }) + .await + .expect("Failed to add child to nexus."); +} + +async fn remove_child_nexus(hdl: &mut RpcHandle, uuid: &str, child: &str) { + hdl.mayastor + .remove_child_nexus(RemoveChildNexusRequest { + uuid: uuid.to_string(), + uri: child.to_string(), + }) + .await + .expect("Failed to remove child from nexus."); +} + /// Publish a nexus with the given UUID over NVMf. async fn publish_nexus(hdl: &mut RpcHandle, uuid: &str) -> String { hdl.mayastor @@ -327,7 +430,7 @@ async fn publish_nexus(hdl: &mut RpcHandle, uuid: &str) -> String { async fn create_and_share_bdevs(hdl: &mut RpcHandle, uuid: &str) -> String { hdl.bdev .create(BdevUri { - uri: "malloc:///disk0?size_mb=100".into(), + uri: "malloc:///disk0?size_mb=64".into(), }) .await .unwrap(); diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs index 91b1b58e3..86358b1fa 100644 --- a/mayastor/tests/replica_snapshot.rs +++ b/mayastor/tests/replica_snapshot.rs @@ -79,7 +79,7 @@ async fn replica_snapshot() { .await .unwrap(); let pool = Lvs::lookup(POOL1_NAME).unwrap(); - pool.create_lvol(UUID1, 64 * 1024 * 1024, true) + pool.create_lvol(UUID1, 64 * 1024 * 1024, None, true) .await .unwrap(); create_nexus(0, &ip0).await; diff --git a/mbus-api/Cargo.toml b/mbus-api/Cargo.toml index 8407d0ab2..65232cbda 100644 --- a/mbus-api/Cargo.toml +++ b/mbus-api/Cargo.toml @@ -5,20 +5,19 @@ authors = ["Tiago Castro "] edition = "2018" [dependencies] -nats = "0.8" -tokio = { version = "1", features = ["full"] } -futures = "0.3" -serde_json = "1.0" -async-trait = "0.1.36" +nats = "0.15.2" +tokio = { version = "1.10.0", features = ["full"] } +futures = "0.3.16" +serde_json = "1.0.66" +async-trait = "0.1.51" dyn-clonable = "0.9.0" -smol = "1.0.0" -once_cell = "1.4.1" -snafu = "0.6" -strum = "0.19" -strum_macros = "0.19" -tracing = "0.1" +once_cell = "1.8.0" +snafu = "0.6.10" +strum = "0.21.0" +strum_macros = "0.21.1" +tracing = "0.1.26" percent-encoding = "2.1.0" -uuid = { version = "0.8", features = ["v4"] } +uuid = { version = "0.8.2", features = ["v4"] } [dev-dependencies] composer = { path = "../composer" } @@ -26,4 +25,4 @@ rpc = { path = "../rpc" } [dependencies.serde] features = ["derive"] -version = "1.0" +version = "1.0.127" diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs index 0c711151a..0507dfc6f 100644 --- a/mbus-api/src/lib.rs +++ b/mbus-api/src/lib.rs @@ -22,9 +22,8 @@ pub use mbus_nats::{ pub use receive::*; pub use send::*; use serde::{de::StdError, Deserialize, Serialize}; -use smol::io; use snafu::{ResultExt, Snafu}; -use std::{fmt::Debug, marker::PhantomData, str::FromStr, time::Duration}; +use std::{fmt::Debug, io, marker::PhantomData, str::FromStr, time::Duration}; use strum_macros::{AsRefStr, ToString}; /// Result wrapper for send/receive @@ -401,7 +400,7 @@ pub type BusMessage = nats::asynk::Message; /// MessageBus subscription pub type BusSubscription = nats::asynk::Subscription; /// MessageBus configuration options -pub type BusOptions = nats::Options; +pub type BusOptions = nats::asynk::Options; /// Save on typing pub type DynBus = Box; @@ -415,6 +414,11 @@ pub struct TimeoutOptions { pub(crate) timeout_step: std::time::Duration, /// max number of retries following the initial attempt's timeout pub(crate) max_retries: Option, + /// Server tcp read timeout when no messages are received. + /// When this timeout is triggered we attempt to send a Ping to the server. + /// If a Pong is not received within the same timeout the nats client + /// disconnects from the server. + tcp_read_timeout: std::time::Duration, } impl TimeoutOptions { @@ -427,6 +431,14 @@ impl TimeoutOptions { pub(crate) fn default_max_retries() -> u32 { 6 } + /// Default Server tcp read timeout when no messages are received. + pub(crate) fn default_tcp_read_timeout() -> Duration { + Duration::from_secs(30) + } + /// Get the tcp read timeout + pub(crate) fn tcp_read_timeout(&self) -> Duration { + self.tcp_read_timeout + } } impl Default for TimeoutOptions { @@ -435,6 +447,7 @@ impl Default for TimeoutOptions { timeout: Self::default_timeout(), timeout_step: Self::default_timeout_step(), max_retries: Some(Self::default_max_retries()), + tcp_read_timeout: Self::default_tcp_read_timeout(), } } } diff --git a/mbus-api/src/mbus_nats.rs b/mbus-api/src/mbus_nats.rs index 181f7eb65..6f1b3fef3 100644 --- a/mbus-api/src/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -64,7 +64,10 @@ struct NatsMessageBus { connection: Connection, } impl NatsMessageBus { - pub async fn connect(server: &str) -> Connection { + pub async fn connect( + timeout_opts: TimeoutOptions, + server: &str, + ) -> Connection { info!("Connecting to the nats server {}...", server); // We retry in a loop until successful. Once connected the nats // library will handle reconnections for us. @@ -73,6 +76,9 @@ impl NatsMessageBus { loop { match BusOptions::new() .max_reconnects(None) + .tcp_read_timeout(timeout_opts.tcp_read_timeout()) + .tcp_connect_timeout(timeout_opts.tcp_read_timeout()) + .cache_connect_urls(false) .disconnect_callback(|| { warn!("Connection to the NATS server has been lost."); }) @@ -81,7 +87,7 @@ impl NatsMessageBus { "Connection to the NATS server has been reestablished." ) }) - .connect_async(server) + .connect(server) .await { Ok(connection) => { @@ -112,8 +118,8 @@ impl NatsMessageBus { timeout_options: TimeoutOptions, ) -> Self { Self { - timeout_options, - connection: Self::connect(server).await, + timeout_options: timeout_options.clone(), + connection: Self::connect(timeout_options, server).await, } } } diff --git a/nix/lib/rust.nix b/nix/lib/rust.nix index 9c8846964..e984ad3f4 100644 --- a/nix/lib/rust.nix +++ b/nix/lib/rust.nix @@ -1,8 +1,9 @@ { sources ? import ../sources.nix }: let - pkgs = import sources.nixpkgs { overlays = [ (import sources.nixpkgs-mozilla) ]; }; + pkgs = + import sources.nixpkgs { overlays = [ (import sources.rust-overlay) ]; }; in -rec { - nightly = pkgs.rustChannelOf { channel = "nightly"; date = "2021-04-19"; }; - stable = pkgs.rustChannelOf { channel = "stable"; }; +with pkgs; rec { + nightly = rust-bin.nightly."2021-06-22".default; + stable = rust-bin.stable.latest.default; } diff --git a/nix/mayastor-overlay.nix b/nix/mayastor-overlay.nix index ecac7e3d4..082b2e145 100644 --- a/nix/mayastor-overlay.nix +++ b/nix/mayastor-overlay.nix @@ -9,7 +9,6 @@ self: super: { mayastor-adhoc = (super.callPackage ./pkgs/mayastor { }).adhoc; mayastor-dev = (super.callPackage ./pkgs/mayastor { }).debug; mkContainerEnv = super.callPackage ./lib/mkContainerEnv.nix { }; - moac = (import ./../csi/moac { pkgs = super; }).package; ms-buildenv = super.callPackage ./pkgs/ms-buildenv { }; nvmet-cli = super.callPackage ./pkgs/nvmet-cli { }; units = (super.callPackage ./pkgs/mayastor/units.nix { }); diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index 25beac199..5cb752bdb 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -6,31 +6,32 @@ # containerd triggered when there are too many layers: # https://github.com/containerd/containerd/issues/4684 -{ stdenv -, busybox +{ busybox , dockerTools , e2fsprogs , git , lib -, moac -, writeScriptBin -, xfsprogs , mayastor , mayastor-dev -, mayastor-adhoc +, stdenv , utillinux +, writeScriptBin +, xfsprogs }: let versionDrv = import ../../lib/version.nix { inherit lib stdenv git; }; version = builtins.readFile "${versionDrv}"; - env = lib.makeBinPath [ busybox xfsprogs e2fsprogs utillinux ]; + path = lib.makeBinPath [ "/" busybox xfsprogs e2fsprogs utillinux ]; # common props for all mayastor images mayastorImageProps = { tag = version; created = "now"; config = { - Env = [ "PATH=${env}" ]; + Env = [ + "PATH=${path}" + "RUST_BACKTRACE=1" + ]; ExposedPorts = { "10124/tcp" = { }; }; Entrypoint = [ "/bin/mayastor" ]; }; @@ -44,7 +45,10 @@ let created = "now"; config = { Entrypoint = [ "/bin/mayastor-csi" ]; - Env = [ "PATH=${env}" ]; + Env = [ + "PATH=${path}" + "RUST_BACKTRACE=1" + ]; }; extraCommands = '' mkdir tmp @@ -55,7 +59,7 @@ let tag = version; created = "now"; config = { - Env = [ "PATH=${env}" ]; + Env = [ "PATH=${path}" ]; }; extraCommands = '' mkdir tmp @@ -66,11 +70,15 @@ let #!${stdenv.shell} chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin" iscsiadm "$@" ''; + + mctl = writeScriptBin "mctl" '' + /bin/mayastor-client "$@" + ''; in { mayastor = dockerTools.buildImage (mayastorImageProps // { name = "mayadata/mayastor"; - contents = [ busybox mayastor ]; + contents = [ busybox mayastor mctl ]; }); mayastor-dev = dockerTools.buildImage (mayastorImageProps // { @@ -78,11 +86,9 @@ in contents = [ busybox mayastor-dev ]; }); - mayastor-adhoc = dockerTools.buildImage (mayastorImageProps // { - name = "mayadata/mayastor-adhoc"; - contents = [ busybox mayastor-adhoc ]; - }); - + # The algorithm for placing packages into the layers is not optimal. + # There are a couple of layers with negligible size and then there is one + # big layer with everything else. That defeats the purpose of layering. mayastor-csi = dockerTools.buildLayeredImage (mayastorCsiImageProps // { name = "mayadata/mayastor-csi"; contents = [ busybox mayastor mayastorIscsiadm ]; @@ -94,28 +100,6 @@ in contents = [ busybox mayastor-dev mayastorIscsiadm ]; }); - # The algorithm for placing packages into the layers is not optimal. - # There are a couple of layers with negligable size and then there is one - # big layer with everything else. That defeats the purpose of layering. - moac = dockerTools.buildLayeredImage { - name = "mayadata/moac"; - tag = version; - created = "now"; - contents = [ busybox moac ]; - config = { - Entrypoint = [ "${moac.out}/lib/node_modules/moac/moac" ]; - ExposedPorts = { "3000/tcp" = { }; }; - Env = [ "PATH=${moac.env}:${moac.out}/lib/node_modules/moac" ]; - WorkDir = "${moac.out}/lib/node_modules/moac"; - }; - extraCommands = '' - chmod u+w bin - ln -s ${moac.out}/lib/node_modules/moac/moac bin/moac - chmod u-w bin - ''; - maxLayers = 42; - }; - mayastor-client = dockerTools.buildImage (clientImageProps // { name = "mayadata/mayastor-client"; contents = [ busybox mayastor ]; diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 584f8f613..58b591788 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -5,7 +5,6 @@ , lcov , lib , libaio -, libiscsi , libbpf , libelf , liburing @@ -37,14 +36,13 @@ let # Derivation attributes for production version of libspdk drvAttrs = rec { - version = "21.04-ab79841"; + version = "21.07-8489d57e8"; src = fetchFromGitHub { owner = "openebs"; repo = "spdk"; - rev = "ab79841affa8713e68df45fcf36c286dfb3809ca"; - sha256 = "1rvnnw2n949c3kdd4rz5pc73sic2lgg36w1m25kkipzw7x1c57hm"; - #sha256 = lib.fakeSha256; + rev = "8489d57e82e95c05c794f56a47f62bfd6c459b7b"; + sha256 = "LWYEBJ8JukR24ugWQ7qmM5O6LNZad38HWfcJROlUodU="; fetchSubmodules = true; }; @@ -60,22 +58,21 @@ let buildInputs = [ binutils - fio - libtool + jansson libaio - libiscsi + libbpf + libbsd + libelf + libexecinfo + libpcap + libtool liburing libuuid nasm ncurses numactl openssl - libpcap - libbsd - jansson - libbpf - libelf - libexecinfo + (python3.withPackages (ps: with ps; [ pyelftools ])) zlib ]; @@ -94,24 +91,17 @@ let (if (targetPlatform.config != buildPlatform.config) then [ "--cross-prefix=${targetPlatform.config}" ] else [ ]) ++ [ "--without-isal" - "--with-iscsi-initiator" "--with-uring" "--disable-unit-tests" "--disable-tests" - "--with-fio=${pkgs.fio}/include" ]; - enableParallelBuilding = true; - configurePhase = '' patchShebangs ./. > /dev/null - ./configure ${builtins.concatStringsSep - " " - (builtins.filter - (opt: (builtins.match "--build=.*" opt) == null) - configureFlags) - } + ./configure ${builtins.concatStringsSep " " configureFlags} ''; + enableParallelBuilding = true; + hardeningDisable = [ "all" ]; @@ -120,10 +110,16 @@ let find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_sock_uring.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete + find . -type f -name 'libspdk_bdev_blobfs.a' -delete + find . -type f -name 'libspdk_bdev_ftl.a' -delete + find . -type f -name 'libspdk_bdev_gpt.a' -delete + find . -type f -name 'libspdk_bdev_passthru.a' -delete + find . -type f -name 'libspdk_bdev_raid.a' -delete + find . -type f -name 'libspdk_bdev_split.a' -delete + find . -type f -name 'libspdk_bdev_zone_block.a' -delete $CC -shared -o libspdk.so \ - -lc -laio -liscsi -lnuma -ldl -lrt -luuid -lpthread -lcrypto \ - -luring \ + -lc -laio -lnuma -ldl -lrt -luuid -lpthread -lcrypto -luring \ -Wl,--whole-archive \ $(find build/lib -type f -name 'libspdk_*.a*' -o -name 'librte_*.a*') \ $(find dpdk/build/lib -type f -name 'librte_*.a*') \ @@ -134,7 +130,6 @@ let installPhase = '' mkdir -p $out/lib mkdir $out/bin - mkdir $out/fio pushd include find . -type f -name "*.h" -exec install -D "{}" $out/include/{} \; @@ -154,8 +149,6 @@ let echo $(find $out -type f -name '*.a*' -delete) find . -executable -type f -name 'bdevperf' -exec install -D "{}" $out/bin \; - - cp build/fio/spdk_* $out/fio ''; }; in @@ -164,22 +157,26 @@ in pname = "libspdk"; separateDebugInfo = true; dontStrip = false; - configureFlags = drvAttrs.configureFlags ++ [ - "--disable-tests" - "--disable-unit-tests" - ]; }); debug = llvmPackages_11.stdenv.mkDerivation (drvAttrs // { pname = "libspdk-dev"; separateDebugInfo = false; dontStrip = true; - buildInputs = drvAttrs.buildInputs ++ [ cunit lcov ]; - configureFlags = drvAttrs.configureFlags ++ [ - "--enable-debug" - ]; + nativeBuildInputs = drvAttrs.nativeBuildInputs ++ [ cunit lcov ]; + buildInputs = drvAttrs.buildInputs ++ [ cunit lcov fio ]; + configurePhase = '' + patchShebangs ./. > /dev/null + ./configure ${builtins.concatStringsSep " " (drvAttrs.configureFlags ++ + [ + "--enable-debug" + "--with-fio=${pkgs.fio}/include" + ])} + ''; installPhase = drvAttrs.installPhase + '' echo "Copying test files" cp -ar test $out/test + mkdir $out/fio + cp build/fio/spdk_* $out/fio ''; }); } diff --git a/nix/pkgs/mayastor/cargo-package.nix b/nix/pkgs/mayastor/cargo-package.nix index 14645a095..9ce40a2e9 100644 --- a/nix/pkgs/mayastor/cargo-package.nix +++ b/nix/pkgs/mayastor/cargo-package.nix @@ -1,15 +1,13 @@ { stdenv -, clang +, clang_11 , dockerTools , e2fsprogs , lib , libaio -, libiscsi , libspdk , libspdk-dev , libudev , liburing -, llvmPackages , makeRustPlatform , numactl , openssl @@ -28,8 +26,8 @@ let channel = import ../../lib/rust.nix { inherit sources; }; rustPlatform = makeRustPlatform { - rustc = channel.stable.rust; - cargo = channel.stable.cargo; + rustc = channel.stable; + cargo = channel.stable; }; whitelistSource = src: allowedPrefixes: builtins.filterSource @@ -43,25 +41,22 @@ let "Cargo.lock" "Cargo.toml" "cli" + "composer" "csi" "devinfo" "jsonrpc" "mayastor" + "mbus-api" "nvmeadm" "rpc" "spdk-sys" "sysfs" - "mbus-api" - "composer" ]; buildProps = rec { name = "mayastor"; - # cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0m783ckamvr9143n0ahfqq3z5klix66fxq1vh466vvgpcw7jnnw5"; - inherit version cargoBuildFlags; src = whitelistSource ../../../. src_list; - LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib"; + LIBCLANG_PATH = "${llvmPackages_11.libclang.lib}/lib"; PROTOC = "${protobuf}/bin/protoc"; PROTOC_INCLUDE = "${protobuf}/include"; @@ -70,20 +65,26 @@ let llvmPackages_11.libclang protobuf libaio - libiscsi libudev liburing numactl openssl utillinux ]; - verifyCargoDeps = false; + cargoLock = { + lockFile = ../../../Cargo.lock; + outputHashes = { + "h2-0.3.3" = "sha256-Y4AaBj10ZOutI37sVRY4yVUYmVWj5dwPbPhBhPWHNiQ="; + "nats-0.15.2" = "sha256:1whr0v4yv31q5zwxhcqmx4qykgn5cgzvwlaxgq847mymzajpcsln"; + }; + }; doCheck = false; meta = { platforms = lib.platforms.linux; }; }; in { release = rustPlatform.buildRustPackage (buildProps // { + cargoBuildFlags = "--bin mayastor --bin mayastor-client --bin mayastor-csi"; buildType = "release"; buildInputs = buildProps.buildInputs ++ [ libspdk ]; SPDK_PATH = "${libspdk}"; @@ -93,38 +94,4 @@ in buildInputs = buildProps.buildInputs ++ [ libspdk-dev ]; SPDK_PATH = "${libspdk-dev}"; }); - # this is for an image that does not do a build of mayastor - adhoc = stdenv.mkDerivation { - name = "mayastor-adhoc"; - inherit version; - src = [ - ../../../target/debug/mayastor - ../../../target/debug/mayastor-csi - ../../../target/debug/mayastor-client - ../../../target/debug/jsonrpc - ]; - - buildInputs = [ - libaio - libiscsi.lib - libspdk-dev - liburing - libudev - openssl - xfsprogs - e2fsprogs - ]; - - unpackPhase = '' - for srcFile in $src; do - cp $srcFile $(stripHash $srcFile) - done - ''; - dontBuild = true; - dontConfigure = true; - installPhase = '' - mkdir -p $out/bin - install * $out/bin - ''; - }; } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 209036163..2999f895b 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -1,15 +1,13 @@ { stdenv -, clang +, clang_11 , dockerTools , e2fsprogs , lib , libaio -, libiscsi , libspdk , libspdk-dev , libudev , liburing -, llvmPackages , makeRustPlatform , numactl , openssl diff --git a/nix/pkgs/mayastor/units.nix b/nix/pkgs/mayastor/units.nix index 9b5526307..af87fe940 100644 --- a/nix/pkgs/mayastor/units.nix +++ b/nix/pkgs/mayastor/units.nix @@ -1,15 +1,13 @@ { stdenv -, clang +, clang_11 , dockerTools , e2fsprogs , lib , libaio -, libiscsi , libspdk , libspdk-dev , libudev , liburing -, llvmPackages , makeRustPlatform , numactl , openssl diff --git a/nix/sources.json b/nix/sources.json index 1cfa2e6a0..25546a427 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -35,16 +35,16 @@ "url": "https://github.com/NixOS/nixpkgs/archive/3600a82711987ac1267a96fd97974437b69f6806.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, - "nixpkgs-mozilla": { + "rust-overlay": { "branch": "master", - "description": "mozilla related nixpkgs (extends nixos/nixpkgs repo)", - "homepage": "https://github.com/mozilla/nixpkgs-mozilla", - "owner": "mozilla", - "repo": "nixpkgs-mozilla", - "rev": "3f3fba4e2066f28a1ad7ac60e86a688a92eb5b5f", - "sha256": "1mrj89gzrzhci4lssvzmmk31l715cddp7l39favnfs1qaijly814", + "description": "Pure and reproducible nix overlay for binary distributed rust toolchains", + "homepage": "", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "aa5f9c64c8865966b36726787721d6dc9f4948b5", + "sha256": "0pzaiqx4k43i3vfc74d25ins4k5zvwadqmijakkfpl4l5qr30sc6", "type": "tarball", - "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/3f3fba4e2066f28a1ad7ac60e86a688a92eb5b5f.tar.gz", + "url": "https://github.com/oxalica/rust-overlay/archive/aa5f9c64c8865966b36726787721d6dc9f4948b5.tar.gz", "url_template": "https://github.com///archive/.tar.gz" } } diff --git a/nvmeadm/Cargo.toml b/nvmeadm/Cargo.toml index 70cf7772c..830f1c0d3 100644 --- a/nvmeadm/Cargo.toml +++ b/nvmeadm/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Jeffry Molanus "] edition = "2018" [dependencies] -derive_builder = "0.10" -enum-primitive-derive = "0.2" -glob = "*" -ioctl-gen = "0.1" -libc = "0.2" -nix = "0.20" -num-traits = "0.2" -once_cell = "1.3" -snafu = "0.6" -uuid = { version = "0.8", features = ["v4"] } -url = "2.2.0" +derive_builder = "0.10.2" +enum-primitive-derive = "0.2.1" +glob = "0.3.0" +ioctl-gen = "0.1.1" +libc = "0.2.99" +nix = "0.22.1" +num-traits = "0.2.14" +once_cell = "1.8.0" +snafu = "0.6.10" +uuid = { version = "0.8.2", features = ["v4"] } +url = "2.2.2" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index e8efdebe0..ff7abb44e 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Jeffry Molanus "] edition = "2018" [build-dependencies] -tonic-build = "0.4" -prost-build = "0.7" +tonic-build = "0.5.2" +prost-build = "0.8.0" [dependencies] -tonic = "0.4" -bytes = "1.0" -prost = "0.7" -prost-derive = "0.7" -serde = { version = "1.0.98", features = ["derive"] } -serde_derive = "1.0.99" -serde_json = "1.0.40" +tonic = "0.5.2" +bytes = "1.0.1" +prost = "0.8.0" +prost-derive = "0.8.0" +serde = { version = "1.0.127", features = ["derive"] } +serde_derive = "1.0.127" +serde_json = "1.0.66" diff --git a/rpc/build.rs b/rpc/build.rs index 68dd39915..bbd4716d0 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -1,10 +1,26 @@ +use std::{path::Path, process::Command}; + extern crate tonic_build; fn main() { + if !Path::new("mayastor-api/.git").exists() { + let output = Command::new("git") + .args(&["submodule", "update", "--init"]) + .output() + .expect("failed to execute git command"); + dbg!(&output); + if !output.status.success() { + panic!("submodule checkout failed"); + } + } + tonic_build::configure() .build_server(true) .type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]") - .compile(&["proto/mayastor.proto"], &["proto"]) + .compile( + &["mayastor-api/protobuf/mayastor.proto"], + &["mayastor-api/protobuf"], + ) .unwrap_or_else(|e| { panic!("mayastor protobuf compilation failed: {}", e) }); diff --git a/rpc/mayastor-api b/rpc/mayastor-api new file mode 160000 index 000000000..bde7d04d3 --- /dev/null +++ b/rpc/mayastor-api @@ -0,0 +1 @@ +Subproject commit bde7d04d3155bd7bb6ac36b61f48e782904140c5 diff --git a/rpc/proto/mayastor.proto b/rpc/proto/mayastor.proto deleted file mode 100644 index b3aa08579..000000000 --- a/rpc/proto/mayastor.proto +++ /dev/null @@ -1,510 +0,0 @@ -// The definition of mayastor gRPC interface. - -// This interface is supposed to be independent on particular computing -// environment (i.e. kubernetes). - -syntax = "proto3"; - -option java_multiple_files = true; -option java_package = "io.openebs.mayastor"; -option java_outer_classname = "MayastorProto"; - -package mayastor; - -// Service for managing storage pools, replicas and nexus's. -// Replica holds the actual user data. From user application the -// data are accessed indirectly through nexus object. -service Mayastor { - // Storage pool related methods. - // - // Storage pool is made up of block devices disks and provides a storage - // space for thin provisioning of replicas. - - rpc CreatePool (CreatePoolRequest) returns (Pool) {} - rpc DestroyPool (DestroyPoolRequest) returns (Null) {} - rpc ListPools (Null) returns (ListPoolsReply) {} - - // Replica related methods. - // - // Replica allocates space from storage pool. - - rpc CreateReplica (CreateReplicaRequest) returns (Replica) {} - rpc DestroyReplica (DestroyReplicaRequest) returns (Null) {} - rpc ListReplicas (Null) returns (ListReplicasReply) {} - rpc StatReplicas (Null) returns (StatReplicasReply) {} - rpc ShareReplica (ShareReplicaRequest) returns (ShareReplicaReply) {} - - // Nexus related methods. - // - // Nexus is a logical frontend representing a data volume taking care of - // replication and rebuild in the background. - - rpc CreateNexus (CreateNexusRequest) returns (Nexus) {} - rpc DestroyNexus (DestroyNexusRequest) returns (Null) {} - rpc ListNexus (Null) returns (ListNexusReply) {} - rpc AddChildNexus (AddChildNexusRequest) returns (Child) {} - rpc RemoveChildNexus (RemoveChildNexusRequest) returns (Null) {} - rpc FaultNexusChild (FaultNexusChildRequest) returns (Null) {} - - // This method is called by control plane to construct a block device - // (/dev/...) that will be used to connect the nexus to the OS. - rpc PublishNexus (PublishNexusRequest) returns (PublishNexusReply) {} - rpc UnpublishNexus (UnpublishNexusRequest) returns (Null) {} - - // NVMe ANA state - rpc GetNvmeAnaState (GetNvmeAnaStateRequest) returns (GetNvmeAnaStateReply) {} - rpc SetNvmeAnaState (SetNvmeAnaStateRequest) returns (Null) {} - - // Nexus child operations - rpc ChildOperation(ChildNexusRequest) returns (Null) {} - - // Rebuild operations - rpc StartRebuild (StartRebuildRequest) returns (Null) {} - rpc StopRebuild (StopRebuildRequest) returns (Null) {} - rpc PauseRebuild (PauseRebuildRequest) returns (Null) {} - rpc ResumeRebuild (ResumeRebuildRequest) returns (Null) {} - rpc GetRebuildState (RebuildStateRequest) returns (RebuildStateReply) {} - rpc GetRebuildStats (RebuildStatsRequest) returns (RebuildStatsReply) {} - rpc GetRebuildProgress (RebuildProgressRequest) returns (RebuildProgressReply) {} - - // Snapshot operations - rpc CreateSnapshot (CreateSnapshotRequest) returns (CreateSnapshotReply) {} - - // Enumerate block devices on current host - rpc ListBlockDevices (ListBlockDevicesRequest) returns (ListBlockDevicesReply) {} - - // Obtain resource usage statistics for the current process - rpc GetResourceUsage (Null) returns (GetResourceUsageReply) {} - - // NVMe controllers - rpc ListNvmeControllers (Null) returns (ListNvmeControllersReply) {} -} - -// Means no arguments or no return value. -message Null {} - -// Create pool arguments. -// Currently we support only concatenation of disks (RAID-0). -message CreatePoolRequest { - string name = 1; // name of the pool - repeated string disks = 2; // disk device paths or URIs to be claimed by the pool -} - -// State of the storage pool (terminology comes from ZFS). -enum PoolState { - POOL_UNKNOWN = 0; - POOL_ONLINE = 1; // the pool is in normal working order - POOL_DEGRADED = 2; // the pool has experienced a failure but can still function - POOL_FAULTED = 3; // the pool is completely inaccessible -} - -// Storage pool properties -message Pool { - string name = 1; // name of the pool - repeated string disks = 2; // absolute disk paths claimed by the pool - PoolState state = 3; // current state of the pool - uint64 capacity = 5; // size of the pool in bytes - uint64 used = 6; // used bytes from the pool -} - -// Destroy pool arguments. -message DestroyPoolRequest { - string name = 1; // name of the pool -} - -// List of pools and their properties. -message ListPoolsReply { - repeated Pool pools = 1; // list of the pools -} - -// Protocol for remote storage access which exposes a replica. -enum ShareProtocolReplica { - REPLICA_NONE = 0; // not exposed - REPLICA_NVMF = 1; // NVMe over Fabrics (TCP) - REPLICA_ISCSI = 2; // iSCSI -} - -// Note that enum values use C++ scoping rules, meaning that enum values are siblings of their type, -// not children of it. -// So cannot use NBD, NVMF, and ISCSI as symbols for ShareProtocolNexus -enum ShareProtocolNexus { - NEXUS_NBD = 0; // local - NEXUS_NVMF = 1; // NVMe over Fabrics (TCP) - NEXUS_ISCSI = 2; // iSCSI -} - -// Create replica arguments. -message CreateReplicaRequest { - string uuid = 1; // uuid of the replica - string pool = 2; // name of the pool - uint64 size = 3; // size of the replica in bytes - bool thin = 4; // thin provisioning - ShareProtocolReplica share = 5; // protocol to expose the replica over -} - -// Destroy replica arguments. -message DestroyReplicaRequest { - string uuid = 1; // name of the replica -} - -// Replica properties -message Replica { - string uuid = 1; // uuid of the replica - string pool = 2; // name of the pool - bool thin = 3; // thin provisioning - uint64 size = 4; // size of the replica in bytes - ShareProtocolReplica share = 5; // protocol used for exposing the replica - string uri = 6; // uri usable by nexus to access it -} - -// List of replicas and their properties. -message ListReplicasReply { - repeated Replica replicas = 1; // list of the replicas -} - -// NOTE: We use struct instead of more suitable map type, because JS protobuf -// lib has problem (yields garbage) when decoding maps containing u64: -// https://github.com/protobufjs/protobuf.js/issues/1203 -message Stats { - uint64 num_read_ops = 1; - uint64 num_write_ops = 2; - uint64 bytes_read = 3; - uint64 bytes_written = 4; -} - -// Replica stats -message ReplicaStats { - string uuid = 1; // uuid of the replica - string pool = 2; // name of the pool - Stats stats = 3; // stat counters -} - -// List of replicas and their properties. -message StatReplicasReply { - repeated ReplicaStats replicas = 1; // list of the replicas -} - -// Share replica request. -message ShareReplicaRequest { - string uuid = 1; // uuid of the replica - ShareProtocolReplica share = 2; // protocol used for exposing the replica - // Use "NONE" to disable remote access. -} - -// Share replica response. -message ShareReplicaReply { - string uri = 1; // uri under which the replica is accessible by nexus -} - -// Create nexus arguments. -message CreateNexusRequest { - string uuid = 1; // this UUID will be set in as the UUID - uint64 size = 2; // size of the device in bytes - // replica can be iscsi and nvmf remote targets or a local spdk bdev - // (i.e. bdev:///name-of-the-bdev). - repeated string children = 3; // uris to the targets we connect to -} - -// State of the nexus child. -enum ChildState { - CHILD_UNKNOWN = 0; - CHILD_ONLINE = 1; // healthy and contains the latest bits - CHILD_DEGRADED = 2; // rebuild is in progress (or other recoverable error) - CHILD_FAULTED = 3; // unrecoverable error (control plane must act) -} - -// represents a child device part of a nexus -message Child { - string uri = 1; // uri of the child device - ChildState state = 2; // state of the child - int32 rebuild_progress = 3; -} - -// State of the nexus (terminology inspired by ZFS). -enum NexusState { - NEXUS_UNKNOWN = 0; - NEXUS_ONLINE = 1; // healthy and working - NEXUS_DEGRADED = 2; // not healthy but is able to serve IO (i.e. rebuild is in progress) - NEXUS_FAULTED = 3; // broken and unable to serve IO -} - -// represents a nexus device -message Nexus { - string uuid = 1; // name of the nexus - uint64 size = 2; // size of the volume in bytes - NexusState state = 3; // current state of the nexus - repeated Child children = 4; // array of children - // URI of the device for the volume (missing if not published). - // Missing property and empty string are treated the same. - string device_uri = 5; - uint32 rebuilds = 6; // total number of rebuild tasks -} - -message ListNexusReply { - repeated Nexus nexus_list = 1; -} - -message DestroyNexusRequest { - string uuid = 1; // uuid of the nexus -} - -message AddChildNexusRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // URI of the child device to be added - bool norebuild = 3; // auto start rebuilding -} - -message RemoveChildNexusRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // URI of the child device to be removed -} - -message FaultNexusChildRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // URI of the child device to be faulted -} - -// this message will be subject to change as we will add support for remote -// storage protocols. -message PublishNexusRequest { - string uuid = 1; // uuid of the nexus which to create device for - string key = 2; // encryption key - ShareProtocolNexus share = 3; // protocol used for the front end. -} - -message PublishNexusReply { - string device_uri = 1; // i.e. file:///dev/nbd0 -} - -message UnpublishNexusRequest { - string uuid = 1; // uuid of the nexus which to destroy -} - -enum NvmeAnaState { - NVME_ANA_INVALID_STATE = 0; // invalid, do not use - NVME_ANA_OPTIMIZED_STATE = 0x1; - NVME_ANA_NON_OPTIMIZED_STATE = 0x2; - NVME_ANA_INACCESSIBLE_STATE = 0x3; - NVME_ANA_PERSISTENT_LOSS_STATE = 0x4; // not yet supported - NVME_ANA_CHANGE_STATE = 0xF; // not yet supported -} - -message GetNvmeAnaStateRequest { - string uuid = 1; // uuid of the nexus -} - -message GetNvmeAnaStateReply { - NvmeAnaState ana_state = 1; -} - -message SetNvmeAnaStateRequest { - string uuid = 1; // uuid of the nexus - NvmeAnaState ana_state = 2; -} - -enum ChildAction { - offline = 0; - online = 1; -} - -message ChildNexusRequest { - string uuid = 1; - string uri = 2; - ChildAction action = 3; -} - -message RebuildStateRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the destination child -} - -message RebuildStateReply { - string state = 1; // current rebuild state (i.e. ready/running/completed etc.) -} - -message RebuildStatsRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the destination child -} - -message RebuildStatsReply { - uint64 blocks_total = 1; // total number of blocks to recover - uint64 blocks_recovered = 2; // number of blocks recovered - uint64 progress = 3; // rebuild progress % - uint64 segment_size_blks = 4; // granularity of each recovery copy in blocks - uint64 block_size = 5; // size in bytes of each block - uint64 tasks_total = 6; // total number of concurrent rebuild tasks - uint64 tasks_active = 7; // number of current active tasks -} - -message StartRebuildRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the child to be rebuilt -} - -message StopRebuildRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the destination child -} - -message PauseRebuildRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the destination child -} - -message ResumeRebuildRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the destination child -} - -message RebuildProgressRequest { - string uuid = 1; // uuid of the nexus - string uri = 2; // uri of the destination child -} - -message RebuildProgressReply { - uint32 progress = 1; // progress percentage -} - -message CreateSnapshotRequest { - string uuid = 1; // uuid of the nexus -} - -message CreateSnapshotReply { - string name = 1; // name of snapshot created -} - -message BlockDevice { - message Partition { - string parent = 1; // devname of parent device to which this partition belongs - uint32 number = 2; // partition number - string name = 3; // partition name - string scheme = 4; // partition scheme: gpt, dos, ... - string typeid = 5; // partition type identifier - string uuid = 6; // UUID identifying partition - } - message Filesystem { - string fstype = 1; // filesystem type: ext3, ntfs, ... - string label = 2; // volume label - string uuid = 3; // UUID identifying the volume (filesystem) - string mountpoint = 4; // path where filesystem is currently mounted - } - string devname = 1; // entry in /dev associated with device - string devtype = 2; // currently "disk" or "partition" - uint32 devmajor = 3; // major device number - uint32 devminor = 4; // minor device number - string model = 5; // device model - useful for identifying mayastor devices - string devpath = 6; // official device path - repeated string devlinks = 7; // list of udev generated symlinks by which device may be identified - uint64 size = 8; // size of device in (512 byte) blocks - Partition partition = 9; // partition information in case where device represents a partition - Filesystem filesystem = 10; // filesystem information in case where a filesystem is present - bool available = 11; // identifies if device is available for use (ie. is not "currently" in use) -} - -message ListBlockDevicesRequest { - bool all = 1; // list "all" block devices found (not just "available" ones) -} - -message ListBlockDevicesReply { - repeated BlockDevice devices = 1; -} - -message ResourceUsage { - int64 soft_faults = 1; // page reclaims (soft page faults) - int64 hard_faults = 2; // hard page faults - int64 swaps = 3; // swaps - int64 in_block_ops = 4; // input block operations - int64 out_block_ops = 5; // output block operations - int64 ipc_msg_send = 6; // IPC messages sent - int64 ipc_msg_rcv = 7; // IPC messages received - int64 signals = 8; // signals received - int64 vol_csw = 9; // voluntary context switches - int64 invol_csw = 10; // involuntary context switches -} - -message GetResourceUsageReply { - ResourceUsage usage = 1; -} - -// Anything what follows here are private interfaces used for interacting with -// mayastor outside the scope of CSI. - -service BdevRpc { - rpc List(mayastor.Null) returns (Bdevs) {} - rpc Create(BdevUri) returns (CreateReply) {} - rpc Destroy(BdevUri) returns (Null) {} - rpc Share(BdevShareRequest) returns (BdevShareReply) {} - rpc Unshare(CreateReply) returns (Null) {} -} - -message BdevShareRequest { - string name = 1; - string proto = 2; -} - -message BdevShareReply { - string uri = 1; -} - -message Bdev { - string name = 1; - string uuid = 2; - uint64 num_blocks = 3; - uint32 blk_size = 5; - bool claimed = 6; - string claimed_by = 7; - string aliases = 8; - string uri = 9; - string product_name = 10; - string share_uri = 11; - -} - -message Bdevs { - repeated Bdev bdevs = 1; -} - -message BdevUri { - string uri = 1; -} - -message CreateReply { - string name = 1; -} - -enum NvmeControllerState { - NEW = 0; - INITIALIZING = 1; - RUNNING = 2; - FAULTED = 3; - UNCONFIGURING = 4; - UNCONFIGURED = 5; -} - -message NvmeController { - string name = 1; // NVMe controller name - NvmeControllerState state = 2; // Current state of the NVMe controller - uint64 size = 3; // Size of the controller's namespace (0 if no namespace attached). - uint32 blk_size = 4; // Block size of the namespace (0 if no namespace attached). -} - -message ListNvmeControllersReply { - repeated NvmeController controllers = 1; -} - -// SPDK json-rpc proxy service - -service JsonRpc { - // Call a (SPDK) json-rpc method - rpc JsonRpcCall (JsonRpcRequest) returns (JsonRpcReply) {} -} - -message JsonRpcRequest { - string method = 1; - string params = 2; -} - -message JsonRpcReply { - string result = 1; -} diff --git a/scripts/check-coredumps.sh b/scripts/check-coredumps.sh index bcd4872f4..f187c6ccb 100755 --- a/scripts/check-coredumps.sh +++ b/scripts/check-coredumps.sh @@ -53,7 +53,7 @@ fi # Iterate over new coredumps and print a summary and stack for each echo "Looking for new coredumps ..." echo -coredump_pids=$(coredumpctl list --quiet --no-legend --since="$since" | awk '{ print $5 }') +coredump_pids=$(coredumpctl list --quiet --no-legend --since="$since" | grep -v "sshd$" | awk '{ print $5 }') coredump_count=0 for pid in $coredump_pids; do coredump_count=$((coredump_count + 1)) diff --git a/scripts/generate-deploy-yamls.sh b/scripts/generate-deploy-yamls.sh index 49d51a28b..2f4aecde6 100755 --- a/scripts/generate-deploy-yamls.sh +++ b/scripts/generate-deploy-yamls.sh @@ -165,17 +165,23 @@ if [ -n "$pools" ]; then done fi +# etcd yaml files +output_dir_etcd="$output_dir/etcd" +if [ ! -d "$output_dir_etcd" ]; then + mkdir -p "$output_dir_etcd" +else + rm -rf "${output_dir_etcd:?}/"* +fi + # update helm dependencies ( cd "$SCRIPTDIR"/../chart && helm dependency update ) # generate the yaml helm template --set "$template_params" mayastor "$SCRIPTDIR/../chart" --output-dir="$tmpd" --namespace mayastor -f "$SCRIPTDIR/../chart/$profile/values.yaml" --set "$helm_string" -f "$helm_file" -# mayastor and nats yaml files -mv "$tmpd"/mayastor/templates/*.yaml "$output_dir/" +# our own autogenerated yaml files +mv -f "$tmpd"/mayastor/templates/* "$output_dir/" -# etcd yaml files -output_dir_etcd="$output_dir/etcd" -if [ ! -d "$output_dir_etcd" ]; then - mkdir -p "$output_dir_etcd" -fi -mv "$tmpd"/mayastor/charts/etcd/templates/*.yaml "$output_dir_etcd" +# bitnamis etcd files +mv "$tmpd"/mayastor/charts/etcd/templates/*.yaml "$output_dir_etcd/" +# trim trailing-whitespace +for file in "$output_dir_etcd/"*.yaml; do sed -i -e's/[ \t]*$//' "$file"; done diff --git a/scripts/grpc-test.sh b/scripts/grpc-test.sh index afd9c4842..de607b930 100755 --- a/scripts/grpc-test.sh +++ b/scripts/grpc-test.sh @@ -4,6 +4,7 @@ set -euxo pipefail export PATH="$PATH:${HOME}/.cargo/bin" export npm_config_jobs=$(nproc) +export MOAC=true cargo build --all cd "$(dirname "$0")/../test/grpc" diff --git a/scripts/js-check.sh b/scripts/js-check.sh index 4828927d4..ac484146a 100755 --- a/scripts/js-check.sh +++ b/scripts/js-check.sh @@ -5,27 +5,17 @@ set -euo pipefail SCRIPTDIR=$(dirname "$0") -# Decouple JS files according to the sub-project that they belong to. -MOAC_FILES= +# Filter only the files that belong to grpc tests MS_TEST_FILES= for path in "$@"; do - rel_path=`echo $path | sed 's,csi/moac/,,'` + rel_path=`echo $path | sed 's,test/grpc/,,'` if [ "$rel_path" != "$path" ]; then - MOAC_FILES="$MOAC_FILES $rel_path" - else - rel_path=`echo $path | sed 's,test/grpc/,,'` - if [ "$rel_path" != "$path" ]; then - MS_TEST_FILES="$MS_TEST_FILES $rel_path" - fi + MS_TEST_FILES="$MS_TEST_FILES $rel_path" fi done -# Run semistandard check for each subproject -if [ -n "$MOAC_FILES" ]; then - ( cd $SCRIPTDIR/../csi/moac && npx semistandard --fix $MOAC_FILES ) -fi - +# Run semistandard check if [ -n "$MS_TEST_FILES" ]; then ( cd $SCRIPTDIR/../test/grpc && npx semistandard --fix $MS_TEST_FILES ) fi diff --git a/scripts/moac-test.sh b/scripts/moac-test.sh deleted file mode 100755 index 2d881760f..000000000 --- a/scripts/moac-test.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -set -euxo pipefail - -cd "$(dirname "$0")/../csi/moac" -export npm_config_jobs=$(nproc) -npm install -npm run prepare -npm run compile - -./node_modules/mocha/bin/mocha test/index.ts \ - --reporter test/multi_reporter.js \ - --reporter-options reporters="xunit spec",output=../../moac-xunit-report.xml diff --git a/scripts/pytest-tests.sh b/scripts/pytest-tests.sh new file mode 100755 index 000000000..55faea1f0 --- /dev/null +++ b/scripts/pytest-tests.sh @@ -0,0 +1,51 @@ +#! /usr/bin/env bash + +set -eu -o pipefail + +function run_tests() +{ + while read name extra + do + if [[ "$name" = '#'* ]] + then + continue + fi + if [ -z "$name" ] + then + continue + fi + if [ -d "$name" ] + then + ( + set -x + python -m pytest --tc-file='test_config.ini' --docker-compose="$name" "$name" + ) + fi + done +} + +if [ "${SRCDIR:-unset}" = unset ] +then + echo "SRCDIR must be set to the root of your working tree" 2>&1 + exit 1 +fi + +cd "$SRCDIR/test/python" && source ./venv/bin/activate + +run_tests << 'END' + +tests/replica +tests/publish +tests/rebuild + +# tests/csi + +tests/ana_client +tests/cli_controller +tests/replica_uuid +# tests/rpc + +tests/nexus_multipath +tests/nexus + +END diff --git a/scripts/release.sh b/scripts/release.sh index 4a6919374..68e897ee7 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -119,9 +119,9 @@ cd $SCRIPTDIR/.. if [ -z "$IMAGES" ]; then if [ -z "$DEBUG" ]; then - IMAGES="mayastor mayastor-csi mayastor-client moac" + IMAGES="mayastor mayastor-csi mayastor-client" else - IMAGES="mayastor-dev mayastor-csi-dev mayastor-client moac" + IMAGES="mayastor-dev mayastor-csi-dev mayastor-client" fi fi diff --git a/shell.nix b/shell.nix index 67550d91d..4064731a6 100644 --- a/shell.nix +++ b/shell.nix @@ -1,11 +1,12 @@ -{ nospdk ? false, norust ? false }: +{ nospdk ? false }: let sources = import ./nix/sources.nix; pkgs = import sources.nixpkgs { overlays = [ (_: _: { inherit sources; }) (import ./nix/mayastor-overlay.nix) ]; }; -in with pkgs; +in +with pkgs; let nospdk_moth = "You have requested environment without SPDK, you should provide it!"; @@ -14,27 +15,17 @@ let channel = import ./nix/lib/rust.nix { inherit sources; }; # python environment for test/python pytest_inputs = python3.withPackages - (ps: with ps; [ virtualenv grpcio grpcio-tools asyncssh ]); -in mkShell { - + (ps: with ps; [ virtualenv grpcio grpcio-tools asyncssh black ]); +in +mkShell { + name = "mayastor-dev-shell"; # fortify does not work with -O0 which is used by spdk when --enable-debug hardeningDisable = [ "fortify" ]; buildInputs = [ clang_11 cowsay - docker - docker-compose - e2fsprogs - envsubst # for e2e tests etcd fio - gdb - git - go - gptfdisk - kind - kubectl - kubernetes-helm libaio libiscsi libudev @@ -44,19 +35,16 @@ in mkShell { nats-server ninja nodejs-16_x - numactl nvme-cli - nvmet-cli + numactl openssl pkg-config pre-commit procps - python3 pytest_inputs + python3 utillinux - xfsprogs - ] ++ (if (nospdk) then [ libspdk-dev.buildInputs ] else [ libspdk-dev ]) - ++ pkgs.lib.optional (!norust) channel.nightly.rust; + ] ++ (if (nospdk) then [ libspdk-dev.buildInputs ] else [ libspdk-dev ]); LIBCLANG_PATH = mayastor.LIBCLANG_PATH; PROTOC = mayastor.PROTOC; @@ -69,17 +57,12 @@ in mkShell { ${pkgs.lib.optionalString (nospdk) ''export RUSTFLAGS="-C link-args=-Wl,-rpath,$(pwd)/spdk-sys/spdk"''} ${pkgs.lib.optionalString (nospdk) "echo"} - ${pkgs.lib.optionalString (norust) "cowsay ${norust_moth}"} - ${pkgs.lib.optionalString (norust) "echo 'Hint: use rustup tool.'"} - ${pkgs.lib.optionalString (norust) "echo"} + + echo 'Hint: use rustup tool.' + echo # SRCDIR is needed by docker-compose files as it requires absolute paths export SRCDIR=`pwd` - # python compiled proto files needed by pytest - python -m grpc_tools.protoc -I `realpath rpc/proto` --python_out=test/python --grpc_python_out=test/python mayastor.proto - virtualenv --no-setuptools test/python/venv - source test/python/venv/bin/activate - pip install -r test/python/requirements.txt pre-commit install pre-commit install --hook commit-msg ''; diff --git a/spdk-sys/Cargo.toml b/spdk-sys/Cargo.toml index 3178204bf..3206a1191 100644 --- a/spdk-sys/Cargo.toml +++ b/spdk-sys/Cargo.toml @@ -12,5 +12,5 @@ authors = [ ] [build-dependencies] -bindgen = "0.58" -cc = "1.0" +bindgen = "0.59.1" +cc = "1.0.69" diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index a99efae3d..1428f0465 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -91,6 +91,9 @@ fn main() { .allowlist_function("^nvme_qpair_.*") .allowlist_function("^nvme_ctrlr_.*") .blocklist_type("^longfunc") + .allowlist_type("^spdk_nvme_ns_flags") + .allowlist_type("^spdk_nvme_registered_ctrlr.*") + .allowlist_type("^spdk_nvme_reservation.*") .allowlist_var("^NVMF.*") .allowlist_var("^SPDK.*") .allowlist_var("^spdk.*") @@ -123,7 +126,6 @@ fn main() { println!("cargo:rustc-link-search={}/spdk", manifest_dir); println!("cargo:rustc-link-lib=spdk"); println!("cargo:rustc-link-lib=aio"); - println!("cargo:rustc-link-lib=iscsi"); println!("cargo:rustc-link-lib=dl"); println!("cargo:rustc-link-lib=uuid"); println!("cargo:rustc-link-lib=numa"); diff --git a/spdk-sys/build.sh b/spdk-sys/build.sh index a4e7a6853..49fe668c0 100755 --- a/spdk-sys/build.sh +++ b/spdk-sys/build.sh @@ -12,7 +12,6 @@ rm libspdk.so ./configure --enable-debug \ --target-arch=nehalem \ --without-isal \ - --with-iscsi-initiator \ --with-crypto \ --with-uring \ --disable-unit-tests \ @@ -25,11 +24,13 @@ make -j $(nproc) find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_sock_uring.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete +find . -type f -name 'libspdk_bdev_blobfs.a' -delete find . -type f -name 'libspdk_bdev_ftl.a' -delete find . -type f -name 'libspdk_bdev_gpt.a' -delete +find . -type f -name 'libspdk_bdev_passthru.a' -delete find . -type f -name 'libspdk_bdev_raid.a' -delete find . -type f -name 'libspdk_bdev_split.a' -delete -find . -type f -name 'libspdk_bdev_blobfs.a' -delete +find . -type f -name 'libspdk_bdev_zone_block.a' -delete # the event libraries are the libraries that parse configuration files # we do our own config file parsing, and we setup our own targets. diff --git a/spdk-sys/wrapper.h b/spdk-sys/wrapper.h index c2d1dffba..e8f904e82 100644 --- a/spdk-sys/wrapper.h +++ b/spdk-sys/wrapper.h @@ -1,7 +1,6 @@ #include #include #include -#include #include #include #include @@ -24,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -32,8 +32,10 @@ #include #include #include +#include #include #include +#include #include "logwrapper.h" #include "nvme_helper.h" diff --git a/terraform/README.adoc b/terraform/README.adoc index 5a8ffeebd..66e6efd71 100644 --- a/terraform/README.adoc +++ b/terraform/README.adoc @@ -267,21 +267,21 @@ docker-compose up ---- Subsequently, you can push mayastor images there using docker or skopeo. -Examples below are for moac image. Similarly you should push mayastor and -mayastor-csi images. Replace "hostname" by the name of your registry host. +Example below is for mayastor image. Similarly you should push mayastor-csi +image. Replace "hostname" by the name of your registry host. [source,bash] ---- -nix-build '' -A images.moac +nix-build '' -A images.mayastor docker load ' -A images.moac -skopeo copy --dest-tls-verify=false docker-archive:result docker://hostname:5000/mayadata/moac:latest +nix-build '' -A images.mayastor +skopeo copy --dest-tls-verify=false docker-archive:result docker://hostname:5000/mayadata/mayastor:latest ---- Nodes in the k8s cluster will refuse to pull images from such registry @@ -289,7 +289,7 @@ because it is insecure (not using tls). To work around this problem modify `mod/k8s/repo.sh` adding insecure-registry to daemon.json and then provision your cluster. -Now edit mayastor deployment yamls and change all mayastor & moac image -names to point to your private docker registry. For moac image that would be -`image: mayadata/moac:latest` -> `image: hostname:5000/mayadata/moac:latest`. +Now edit mayastor deployment yamls and change all mayastor(-csi) image +names to point to your private docker registry. For mayastor image that would be +`image: mayadata/mayastor:latest` -> `image: hostname:5000/mayadata/mayastor:latest`. diff --git a/terraform/mod/k8s/repo.sh b/terraform/mod/k8s/repo.sh index c4a813d15..0ca78b88c 100644 --- a/terraform/mod/k8s/repo.sh +++ b/terraform/mod/k8s/repo.sh @@ -52,5 +52,17 @@ sudo sysctl --system sudo mkdir -p /etc/containerd sudo containerd config default | sudo tee /etc/containerd/config.toml +# Rebooting a node does not kill the containers they are running resulting +# in a vary long timeout before the system moves forward. +# +# There are various bugs upstream detailing this and it supposed to be fixed a few weeks ago (June) +# This is *not* that fix rather a work around. The containers will not shut down "cleanly". +sudo mkdir -p /etc/systemd/system/containerd.service.d +sudo tee /etc/sysctl.d/system/containerd.service.d/override.conf >/dev/null <=6.9.0" + } }, "node_modules/@babel/highlight": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz", - "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.0.tgz", + "integrity": "sha512-t8MH41kUQylBtu2+4IQA3atqevA2lRgqA2wyVB/YiWmsDSuylZZuXOUy9ric30hfzauEFfdsuk/eXTRrGrfd0g==", "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", + "@babel/helper-validator-identifier": "^7.15.7", "chalk": "^2.0.0", "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" } }, "node_modules/@babel/highlight/node_modules/ansi-styles": { @@ -91,6 +97,14 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "engines": { + "node": ">=0.8.0" + } + }, "node_modules/@babel/highlight/node_modules/has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", @@ -110,6 +124,56 @@ "node": ">=4" } }, + "node_modules/@eslint/eslintrc": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz", + "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^13.9.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^3.13.1", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/@eslint/eslintrc/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@eslint/eslintrc/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@eslint/eslintrc/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@grpc/grpc-js": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.2.tgz", @@ -138,6 +202,24 @@ "node": ">=6" } }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz", + "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==", + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==" + }, "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", @@ -201,6 +283,11 @@ "@types/node": "*" } }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=" + }, "node_modules/@types/lodash": { "version": "4.14.137", "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.137.tgz", @@ -216,6 +303,11 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.21.tgz", "integrity": "sha512-CBgLNk4o3XMnqMc0rhb6lc77IwShMEglz05deDcn2lQxyXEZivfwgYJu7SMha9V5XcrP6qZuevTHV/QrN2vjKQ==" }, + "node_modules/@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==" + }, "node_modules/abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", @@ -233,9 +325,12 @@ } }, "node_modules/acorn-jsx": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==" + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } }, "node_modules/ajv": { "version": "6.12.6", @@ -246,35 +341,20 @@ "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, "node_modules/ansi-colors": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.3.tgz", - "integrity": "sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", "engines": { "node": ">=6" } }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-escapes/node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "engines": { - "node": ">=10" - } - }, "node_modules/ansi-regex": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", @@ -321,26 +401,58 @@ } }, "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dependencies": { - "sprintf-js": "~1.0.2" - } + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/array-includes": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.3.tgz", - "integrity": "sha512-gcem1KlBU7c9rB+Rq8/3PPKsK2kjqeEBa3bD5kkQo4nYlOHQCJqIJFqBXDEfwaRuYTT4E+FxA9xez7Gf/e3Q7A==", + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.4.tgz", + "integrity": "sha512-ZTNSQkmWumEbiHO2GF4GmWxYVTiQyJy2XOTa15sdQSrvKn7l+180egQMqlrMOUMCyLMD7pmyQe4mMDUT6Behrw==", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", + "es-abstract": "^1.19.1", "get-intrinsic": "^1.1.1", - "is-string": "^1.0.5" + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.5.tgz", + "integrity": "sha512-KaYU+S+ndVqyUnignHftkwc58o3uVU1jzczILJ1tN2YaIZpFIKBiP/x/j97E5MVPsaCloPbqWLB/8qCTVvT2qg==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.5.tgz", + "integrity": "sha512-08u6rVyi1Lj7oqWbS9nUxliETrtIROT4XGTA4D/LWGten6E3ocm7cy9SIrmNHOL5XVbVuckUp3X6Xyg8/zpvHA==", + "dependencies": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/ascli": { @@ -361,11 +473,11 @@ } }, "node_modules/astral-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", - "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", "engines": { - "node": ">=4" + "node": ">=8" } }, "node_modules/async": { @@ -437,6 +549,9 @@ "dependencies": { "function-bind": "^1.1.1", "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/callsites": { @@ -483,11 +598,6 @@ "node": ">=10" } }, - "node_modules/chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" - }, "node_modules/check-error": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", @@ -497,23 +607,23 @@ } }, "node_modules/chokidar": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.3.0.tgz", - "integrity": "sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", + "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", "dependencies": { - "anymatch": "~3.1.1", + "anymatch": "~3.1.2", "braces": "~3.0.2", - "glob-parent": "~5.1.0", + "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", - "readdirp": "~3.2.0" + "readdirp": "~3.6.0" }, "engines": { "node": ">= 8.10.0" }, "optionalDependencies": { - "fsevents": "~2.1.1" + "fsevents": "~2.3.2" } }, "node_modules/chownr": { @@ -521,25 +631,6 @@ "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" }, - "node_modules/cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dependencies": { - "restore-cursor": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cli-width": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", - "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", - "engines": { - "node": ">= 10" - } - }, "node_modules/cliui": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", @@ -600,51 +691,38 @@ "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=" }, - "node_modules/contains-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", - "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, "node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dependencies": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" }, "engines": { - "node": ">=4.8" + "node": ">= 8" } }, "node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", + "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", "dependencies": { "ms": "2.1.2" }, "engines": { "node": ">=6.0" - } - }, - "node_modules/debug-log": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/debug-log/-/debug-log-1.0.1.tgz", - "integrity": "sha1-IwdjLUwEOCuN+KMvcLiVBG1SdF8=", - "engines": { - "node": ">=0.10.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, "node_modules/decamelize": { @@ -675,9 +753,9 @@ } }, "node_modules/deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" }, "node_modules/define-properties": { "version": "1.1.3", @@ -690,27 +768,6 @@ "node": ">= 0.4" } }, - "node_modules/deglob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/deglob/-/deglob-4.0.1.tgz", - "integrity": "sha512-/g+RDZ7yf2HvoW+E5Cy+K94YhgcFgr6C8LuHZD1O5HoNPkf3KY6RfXJ0DBGlB/NkLi5gml+G9zqRzk9S0mHZCg==", - "dependencies": { - "find-root": "^1.0.0", - "glob": "^7.0.5", - "ignore": "^5.0.0", - "pkg-config": "^1.1.0", - "run-parallel": "^1.1.2", - "uniq": "^1.0.1" - } - }, - "node_modules/deglob/node_modules/ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", - "engines": { - "node": ">= 4" - } - }, "node_modules/delegates": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", @@ -728,9 +785,9 @@ } }, "node_modules/diff": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", - "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", "engines": { "node": ">=0.3.1" } @@ -747,9 +804,20 @@ } }, "node_modules/emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", + "dependencies": { + "ansi-colors": "^4.1.1" + }, + "engines": { + "node": ">=8.6" + } }, "node_modules/error-ex": { "version": "1.3.2", @@ -760,43 +828,36 @@ } }, "node_modules/es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.1.tgz", + "integrity": "sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w==", "dependencies": { "call-bind": "^1.0.2", "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "get-intrinsic": "^1.1.1", + "get-symbol-description": "^1.0.0", "has": "^1.0.3", "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", + "internal-slot": "^1.0.3", + "is-callable": "^1.2.4", "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", + "is-string": "^1.0.7", + "is-weakref": "^1.0.1", + "object-inspect": "^1.11.0", "object-keys": "^1.1.1", "object.assign": "^4.1.2", "string.prototype.trimend": "^1.0.4", "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" + "unbox-primitive": "^1.0.1" }, "engines": { "node": ">= 0.4" - } - }, - "node_modules/es-abstract/node_modules/object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" }, - "engines": { - "node": ">= 0.4" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/es-to-primitive": { @@ -810,56 +871,73 @@ }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "engines": { + "node": ">=6" } }, "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "engines": { - "node": ">=0.8.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/eslint": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz", - "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==", + "version": "7.32.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz", + "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==", "dependencies": { - "@babel/code-frame": "^7.0.0", + "@babel/code-frame": "7.12.11", + "@eslint/eslintrc": "^0.4.3", + "@humanwhocodes/config-array": "^0.5.0", "ajv": "^6.10.0", - "chalk": "^2.1.0", - "cross-spawn": "^6.0.5", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", "debug": "^4.0.1", "doctrine": "^3.0.0", - "eslint-scope": "^5.0.0", - "eslint-utils": "^1.4.3", - "eslint-visitor-keys": "^1.1.0", - "espree": "^6.1.2", - "esquery": "^1.0.1", + "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.1", + "esquery": "^1.4.0", "esutils": "^2.0.2", - "file-entry-cache": "^5.0.1", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", - "globals": "^12.1.0", + "glob-parent": "^5.1.2", + "globals": "^13.6.0", "ignore": "^4.0.6", "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", - "inquirer": "^7.0.0", "is-glob": "^4.0.0", "js-yaml": "^3.13.1", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.3.0", - "lodash": "^4.17.14", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", "minimatch": "^3.0.4", - "mkdirp": "^0.5.1", "natural-compare": "^1.4.0", - "optionator": "^0.8.3", + "optionator": "^0.9.1", "progress": "^2.0.0", - "regexpp": "^2.0.1", - "semver": "^6.1.2", - "strip-ansi": "^5.2.0", - "strip-json-comments": "^3.0.1", - "table": "^5.2.3", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", + "table": "^6.0.9", "text-table": "^0.2.0", "v8-compile-cache": "^2.0.3" }, @@ -867,52 +945,96 @@ "eslint": "bin/eslint.js" }, "engines": { - "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + "node": "^10.12.0 || >=12.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint-config-semistandard": { - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-15.0.0.tgz", - "integrity": "sha512-volIMnosUvzyxGkYUA5QvwkahZZLeUx7wcS0+7QumPn+MMEBbV6P7BY1yukamMst0w3Et3QZlCjQEwQ8tQ6nug==" + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-16.0.0.tgz", + "integrity": "sha512-oD8QOo4mSInRJhQb3Zi6L8HebwZaB6SI3A+NNrPdVN0nN1K45L5pXK3joY+ksWDlT3ew/M+fJk2tuMCjIpjRzQ==", + "peerDependencies": { + "eslint": ">=7.12.1", + "eslint-config-standard": ">=16.0.3", + "eslint-plugin-import": ">=2.22.1", + "eslint-plugin-node": ">=11.1.0", + "eslint-plugin-promise": ">=4.2.1" + } }, "node_modules/eslint-config-standard": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz", - "integrity": "sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg==" + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.3.tgz", + "integrity": "sha512-x4fmJL5hGqNJKGHSjnLdgA6U6h1YW/G2dW9fA+cyVur4SK6lyue8+UgNKWlZtUDTXvgKDD/Oa3GQjmB5kjtVvg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "peerDependencies": { + "eslint": "^7.12.1", + "eslint-plugin-import": "^2.22.1", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-promise": "^4.2.1 || ^5.0.0" + } }, "node_modules/eslint-config-standard-jsx": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-8.1.0.tgz", - "integrity": "sha512-ULVC8qH8qCqbU792ZOO6DaiaZyHNS/5CZt3hKqHkEhVlhPEPN3nfBqqxJCyp59XrjIBZPu1chMYe9T2DXZ7TMw==" + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", + "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "peerDependencies": { + "eslint": "^7.12.1", + "eslint-plugin-react": "^7.21.5" + } }, "node_modules/eslint-import-resolver-node": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz", - "integrity": "sha512-ogtf+5AB/O+nM6DIeBUNr2fuT7ot9Qg/1harBfBtaP13ekEWFQEEMP94BCB7zaNW3gyY+8SHYF00rnqYwXKWOA==", + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz", + "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==", "dependencies": { - "debug": "^2.6.9", - "resolve": "^1.13.1" + "debug": "^3.2.7", + "resolve": "^1.20.0" } }, "node_modules/eslint-import-resolver-node/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dependencies": { - "ms": "2.0.0" + "ms": "^2.1.1" } }, - "node_modules/eslint-import-resolver-node/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, "node_modules/eslint-module-utils": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.0.tgz", - "integrity": "sha512-6j9xxegbqe8/kZY8cYpcp0xhbK0EgJlg3g9mib3/miLaExuuwc3n5UEfSnU6hWMbT0FAYVvDbL9RrRgpUeQIvA==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.1.tgz", + "integrity": "sha512-fjoetBXQZq2tSTWZ9yWVl2KuFrTZZH3V+9iD1V1RfpDgxzJR+mPd/KZmMiA8gbPqdBzpNiEHOuT7IYEWxrH0zQ==", "dependencies": { - "debug": "^2.6.9", + "debug": "^3.2.7", + "find-up": "^2.1.0", "pkg-dir": "^2.0.0" }, "engines": { @@ -920,59 +1042,110 @@ } }, "node_modules/eslint-module-utils/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dependencies": { - "ms": "2.0.0" + "ms": "^2.1.1" } }, - "node_modules/eslint-module-utils/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + "node_modules/eslint-module-utils/node_modules/find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dependencies": { + "locate-path": "^2.0.0" + }, + "engines": { + "node": ">=4" + } }, - "node_modules/eslint-plugin-es": { + "node_modules/eslint-module-utils/node_modules/locate-path": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-2.0.0.tgz", - "integrity": "sha512-f6fceVtg27BR02EYnBhgWLFQfK6bN4Ll0nQFrBHOlCsAyxeZkn0NHns5O0YZOPrV1B3ramd6cgFwaoFLcSkwEQ==", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", "dependencies": { - "eslint-utils": "^1.4.2", - "regexpp": "^3.0.0" + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" }, "engines": { - "node": ">=8.10.0" + "node": ">=4" } }, - "node_modules/eslint-plugin-es/node_modules/regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", + "node_modules/eslint-module-utils/node_modules/p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dependencies": { + "p-try": "^1.0.0" + }, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/eslint-plugin-import": { - "version": "2.18.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.18.2.tgz", - "integrity": "sha512-5ohpsHAiUBRNaBWAF08izwUGlbrJoJJ+W9/TBwsGoR1MnlgfwMIKrFeSjWbt6moabiXW9xNvtFz+97KHRfI4HQ==", + "node_modules/eslint-module-utils/node_modules/p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", "dependencies": { - "array-includes": "^3.0.3", - "contains-path": "^0.1.0", - "debug": "^2.6.9", - "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.2", - "eslint-module-utils": "^2.4.0", - "has": "^1.0.3", - "minimatch": "^3.0.4", - "object.values": "^1.1.0", - "read-pkg-up": "^2.0.0", - "resolve": "^1.11.0" + "p-limit": "^1.1.0" }, "engines": { "node": ">=4" } }, + "node_modules/eslint-module-utils/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "engines": { + "node": ">=4" + } + }, + "node_modules/eslint-plugin-es": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", + "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", + "dependencies": { + "eslint-utils": "^2.0.0", + "regexpp": "^3.0.0" + }, + "engines": { + "node": ">=8.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=4.19.1" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.25.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.25.2.tgz", + "integrity": "sha512-qCwQr9TYfoBHOFcVGKY9C9unq05uOxxdklmBXLVvcwo68y5Hta6/GzCZEMx2zQiu0woKNEER0LE7ZgaOfBU14g==", + "dependencies": { + "array-includes": "^3.1.4", + "array.prototype.flat": "^1.2.5", + "debug": "^2.6.9", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-module-utils": "^2.7.0", + "has": "^1.0.3", + "is-core-module": "^2.7.0", + "is-glob": "^4.0.3", + "minimatch": "^3.0.4", + "object.values": "^1.1.5", + "resolve": "^1.20.0", + "tsconfig-paths": "^3.11.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" + } + }, "node_modules/eslint-plugin-import/node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", @@ -982,12 +1155,11 @@ } }, "node_modules/eslint-plugin-import/node_modules/doctrine": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", - "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", "dependencies": { - "esutils": "^2.0.2", - "isarray": "^1.0.0" + "esutils": "^2.0.2" }, "engines": { "node": ">=0.10.0" @@ -999,12 +1171,12 @@ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "node_modules/eslint-plugin-node": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-10.0.0.tgz", - "integrity": "sha512-1CSyM/QCjs6PXaT18+zuAXsjXGIGo5Rw630rSKwokSs2jrYURQc4R5JZpoanNCqwNmepg+0eZ9L7YiRUJb8jiQ==", + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", + "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", "dependencies": { - "eslint-plugin-es": "^2.0.0", - "eslint-utils": "^1.4.2", + "eslint-plugin-es": "^3.0.0", + "eslint-utils": "^2.0.0", "ignore": "^5.1.1", "minimatch": "^3.0.4", "resolve": "^1.10.1", @@ -1012,12 +1184,15 @@ }, "engines": { "node": ">=8.10.0" + }, + "peerDependencies": { + "eslint": ">=5.16.0" } }, "node_modules/eslint-plugin-node/node_modules/ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.9.tgz", + "integrity": "sha512-2zeMQpbKz5dhZ9IwL0gbxSW5w0NK/MSAMtNuhgIHEPmaU3vPdKPL0UdvUCXs5SS4JAwsBxysK5sFMW8ocFiVjQ==", "engines": { "node": ">= 4" } @@ -1031,30 +1206,38 @@ } }, "node_modules/eslint-plugin-promise": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", - "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-5.1.1.tgz", + "integrity": "sha512-XgdcdyNzHfmlQyweOPTxmc7pIsS6dE4MvwhXWMQ2Dxs1XAL2GJDilUsjWen6TWik0aSI+zD/PqocZBblcm9rdA==", "engines": { - "node": ">=6" + "node": "^10.12.0 || >=12.0.0" + }, + "peerDependencies": { + "eslint": "^7.0.0" } }, "node_modules/eslint-plugin-react": { - "version": "7.14.3", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.14.3.tgz", - "integrity": "sha512-EzdyyBWC4Uz2hPYBiEJrKCUi2Fn+BJ9B/pJQcjw5X+x/H2Nm59S4MJIvL4O5NEE0+WbnQwEBxWY03oUk+Bc3FA==", + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.21.5.tgz", + "integrity": "sha512-8MaEggC2et0wSF6bUeywF7qQ46ER81irOdWS4QWxnnlAEsnzeBevk1sWh7fhpCghPpXb+8Ks7hvaft6L/xsR6g==", "dependencies": { - "array-includes": "^3.0.3", + "array-includes": "^3.1.1", + "array.prototype.flatmap": "^1.2.3", "doctrine": "^2.1.0", "has": "^1.0.3", - "jsx-ast-utils": "^2.1.0", - "object.entries": "^1.1.0", - "object.fromentries": "^2.0.0", - "object.values": "^1.1.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "object.entries": "^1.1.2", + "object.fromentries": "^2.0.2", + "object.values": "^1.1.1", "prop-types": "^15.7.2", - "resolve": "^1.10.1" + "resolve": "^1.18.1", + "string.prototype.matchall": "^4.0.2" }, "engines": { "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7" } }, "node_modules/eslint-plugin-react/node_modules/doctrine": { @@ -1068,11 +1251,6 @@ "node": ">=0.10.0" } }, - "node_modules/eslint-plugin-standard": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.2.tgz", - "integrity": "sha512-nKptN8l7jksXkwFk++PhJB3cCDTcXOEyhISIN86Ue2feJ1LFyY3PrY3/xT2keXlJSY5bpmbiTG0f885/YKAvTA==" - }, "node_modules/eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", @@ -1086,17 +1264,20 @@ } }, "node_modules/eslint-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", - "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", "dependencies": { "eslint-visitor-keys": "^1.1.0" }, "engines": { "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" } }, - "node_modules/eslint-visitor-keys": { + "node_modules/eslint-utils/node_modules/eslint-visitor-keys": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", @@ -1104,76 +1285,65 @@ "node": ">=4" } }, - "node_modules/eslint/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "node_modules/eslint-visitor-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", "engines": { - "node": ">=6" + "node": ">=10" } }, - "node_modules/eslint/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, + "node_modules/eslint/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "engines": { - "node": ">=4" + "node": ">=8" } }, - "node_modules/eslint/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "node_modules/eslint/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" + "sprintf-js": "~1.0.2" } }, - "node_modules/eslint/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "node_modules/eslint/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/eslint/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/eslint/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" } }, "node_modules/eslint/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "7.3.5", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", + "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", + "dependencies": { + "lru-cache": "^6.0.0" + }, "bin": { "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/eslint/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dependencies": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.1" }, "engines": { - "node": ">=6" + "node": ">=8" } }, "node_modules/eslint/node_modules/strip-json-comments": { @@ -1182,30 +1352,30 @@ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/espree": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", + "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", "dependencies": { - "has-flag": "^3.0.0" + "acorn": "^7.4.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^1.3.0" }, "engines": { - "node": ">=4" + "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/espree": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", - "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", - "dependencies": { - "acorn": "^7.1.1", - "acorn-jsx": "^5.2.0", - "eslint-visitor-keys": "^1.1.0" - }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", "engines": { - "node": ">=6.0.0" + "node": ">=4" } }, "node_modules/esprima": { @@ -1232,9 +1402,9 @@ } }, "node_modules/esquery/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "engines": { "node": ">=4.0" } @@ -1251,9 +1421,9 @@ } }, "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "engines": { "node": ">=4.0" } @@ -1274,19 +1444,6 @@ "node": ">=0.10.0" } }, - "node_modules/external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "dependencies": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -1302,26 +1459,15 @@ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/file-entry-cache": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", - "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dependencies": { - "flat-cache": "^2.0.1" + "flat-cache": "^3.0.4" }, "engines": { - "node": ">=4" + "node": "^10.12.0 || >=12.0.0" } }, "node_modules/fill-range": { @@ -1348,61 +1494,59 @@ "find-process": "bin/find-process.js" } }, - "node_modules/find-root": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", - "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==" - }, "node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dependencies": { - "locate-path": "^3.0.0" + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" }, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/flat": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/flat/-/flat-4.1.1.tgz", - "integrity": "sha512-FmTtBsHskrU6FJ2VxCnsDb84wu9zhmO3cUX2kGFb5tuwhfXxGciiT0oRY+cck35QmG+NmGh5eLz6lLCpWTqwpA==", - "dependencies": { - "is-buffer": "~2.0.3" - }, + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", "bin": { "flat": "cli.js" } }, "node_modules/flat-cache": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", - "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", + "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", "dependencies": { - "flatted": "^2.0.0", - "rimraf": "2.6.3", - "write": "1.0.3" + "flatted": "^3.1.0", + "rimraf": "^3.0.2" }, "engines": { - "node": ">=4" + "node": "^10.12.0 || >=12.0.0" } }, "node_modules/flat-cache/node_modules/rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "dependencies": { "glob": "^7.1.3" }, "bin": { "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.2.tgz", + "integrity": "sha512-JaTY/wtrcSyvXJl4IMFHPKyFur1sE9AUqc0QnhOaJ0CxHtAoIV8pYDzeEfAaNEtGkOfq4gr3LBFmdXW5mOQFnA==" }, "node_modules/fs-minipass": { "version": "1.2.7", @@ -1418,9 +1562,10 @@ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "node_modules/fsevents": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", - "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "hasInstallScript": true, "optional": true, "os": [ "darwin" @@ -1478,20 +1623,41 @@ "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/get-stdin": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-7.0.0.tgz", - "integrity": "sha512-zRKcywvrXlXsA0v0i9Io4KDRaAw7+a1ZpjRwl9Wox8PFlVCCHra7E9c4kqXCoCM9nR5tBkaTTZRBoCm60bFqTQ==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", + "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", "engines": { - "node": ">=8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-symbol-description": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", + "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -1502,6 +1668,9 @@ }, "engines": { "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/glob-parent": { @@ -1516,20 +1685,23 @@ } }, "node_modules/globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "version": "13.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.12.0.tgz", + "integrity": "sha512-uS8X6lSKN2JumVoXrbUz+uG4BYG+eiawqm3qFcT7ammfbUHeCBoJMlHcec/S3krSk73/AE/f0szYFmgAA3kYZg==", "dependencies": { - "type-fest": "^0.8.1" + "type-fest": "^0.20.2" }, "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==" + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz", + "integrity": "sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg==" }, "node_modules/growl": { "version": "1.10.5", @@ -1627,7 +1799,10 @@ "node_modules/has-bigints": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==" + "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/has-flag": { "version": "4.0.0", @@ -1643,6 +1818,23 @@ "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-unicode": { @@ -1658,11 +1850,6 @@ "he": "bin/he" } }, - "node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==" - }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -1700,6 +1887,9 @@ }, "engines": { "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/imurmurhash": { @@ -1737,72 +1927,17 @@ "node": "*" } }, - "node_modules/inquirer": { - "version": "7.3.3", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.3.3.tgz", - "integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==", - "dependencies": { - "ansi-escapes": "^4.2.1", - "chalk": "^4.1.0", - "cli-cursor": "^3.1.0", - "cli-width": "^3.0.0", - "external-editor": "^3.0.3", - "figures": "^3.0.0", - "lodash": "^4.17.19", - "mute-stream": "0.0.8", - "run-async": "^2.4.0", - "rxjs": "^6.6.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0", - "through": "^2.3.6" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/inquirer/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/inquirer/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "node_modules/internal-slot": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", + "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", "dependencies": { - "ansi-regex": "^5.0.0" + "get-intrinsic": "^1.1.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" }, "engines": { - "node": ">=8" + "node": ">= 0.4" } }, "node_modules/invert-kv": { @@ -1819,9 +1954,15 @@ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" }, "node_modules/is-bigint": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz", - "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==" + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "dependencies": { + "has-bigints": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/is-binary-path": { "version": "2.1.0", @@ -1835,46 +1976,54 @@ } }, "node_modules/is-boolean-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz", - "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", "dependencies": { - "call-bind": "^1.0.0" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" - } - }, - "node_modules/is-buffer": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", - "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", - "engines": { - "node": ">=4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.4.tgz", + "integrity": "sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w==", "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-core-module": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.3.0.tgz", - "integrity": "sha512-xSphU2KG9867tsYdLD4RWQ1VqdFl4HTO9Thf3I/3dLEfr0dbPTWKsuCKrgqMljg4nPE+Gq0VCnzT3gr0CyBmsw==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.0.tgz", + "integrity": "sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==", "dependencies": { "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-extglob": { @@ -1897,9 +2046,9 @@ } }, "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dependencies": { "is-extglob": "^2.1.1" }, @@ -1913,6 +2062,9 @@ "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==", "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-number": { @@ -1924,42 +2076,98 @@ } }, "node_modules/is-number-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz", - "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.6.tgz", + "integrity": "sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "engines": { + "node": ">=8" } }, "node_modules/is-regex": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz", - "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", "dependencies": { "call-bind": "^1.0.2", - "has-symbols": "^1.0.1" + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", "dependencies": { - "has-symbols": "^1.0.1" + "has-symbols": "^1.0.2" }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-weakref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.1.tgz", + "integrity": "sha512-b2jKc2pQZjaeFYWEf7ScFj+Be1I+PXmlu572Q8coTXZ+LD/QQZ7ShPMst8h16riVgyXTQwUsFEl74mDvc/3MHQ==", + "dependencies": { + "call-bind": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/isarray": { @@ -1978,12 +2186,11 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" @@ -2004,13 +2211,24 @@ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=" }, + "node_modules/json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, "node_modules/jsx-ast-utils": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-2.4.1.tgz", - "integrity": "sha512-z1xSldJ6imESSzOjd3NNkieVJKRlKYSOtMG8SFyCj2FIrvSaSuli/WjpBkEzCBoR9bYYYFgqJw61Xhu7Lcgk+w==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.2.1.tgz", + "integrity": "sha512-uP5vu8xfy2F9A6LGC22KO7e2/vGTS1MhP+18f++ZNlf0Ohaxbc9nIEwHAsejlJKyzfZzU5UIhe5ItYkitcZnZA==", "dependencies": { - "array-includes": "^3.1.1", - "object.assign": "^4.1.0" + "array-includes": "^3.1.3", + "object.assign": "^4.1.2" }, "engines": { "node": ">=4.0" @@ -2028,41 +2246,52 @@ } }, "node_modules/levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dependencies": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" }, "engines": { "node": ">= 0.8.0" } }, "node_modules/load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", + "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", "dependencies": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" + "graceful-fs": "^4.1.15", + "parse-json": "^4.0.0", + "pify": "^4.0.1", + "strip-bom": "^3.0.0", + "type-fest": "^0.3.0" }, "engines": { - "node": ">=4" + "node": ">=6" + } + }, + "node_modules/load-json-file/node_modules/type-fest": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", + "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", + "engines": { + "node": ">=6" } }, "node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" + "p-locate": "^5.0.0" }, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/lodash": { @@ -2080,71 +2309,34 @@ "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz", "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=" }, - "node_modules/log-symbols": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-3.0.0.tgz", - "integrity": "sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ==", - "dependencies": { - "chalk": "^2.4.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/log-symbols/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/log-symbols/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/log-symbols/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=" }, - "node_modules/log-symbols/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" }, - "node_modules/log-symbols/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" - } + "node_modules/lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", + "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=" }, - "node_modules/log-symbols/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", "dependencies": { - "has-flag": "^3.0.0" + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" }, "engines": { - "node": ">=4" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/long": { @@ -2163,14 +2355,22 @@ "loose-envify": "cli.js" } }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, "engines": { - "node": ">=6" + "node": ">=10" } }, + "node_modules/lru-cache/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, "node_modules/minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", @@ -2216,198 +2416,166 @@ } }, "node_modules/mocha": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-7.2.0.tgz", - "integrity": "sha512-O9CIypScywTVpNaRrCAgoUnJgozpIofjKUYmJhiCIJMiuYnLI6otcb1/kpW9/n/tJODHGZ7i8aLQoDVsMtOKQQ==", + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.3.tgz", + "integrity": "sha512-Xcpl9FqXOAYqI3j79pEtHBBnQgVXIhpULjGQa7DVb0Po+VzmSIK9kanAiWLHoRR/dbZ2qpdPshuXr8l1VaHCzw==", "dependencies": { - "ansi-colors": "3.2.3", + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", "browser-stdout": "1.3.1", - "chokidar": "3.3.0", - "debug": "3.2.6", - "diff": "3.5.0", - "escape-string-regexp": "1.0.5", - "find-up": "3.0.0", - "glob": "7.1.3", + "chokidar": "3.5.2", + "debug": "4.3.2", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.7", "growl": "1.10.5", "he": "1.2.0", - "js-yaml": "3.13.1", - "log-symbols": "3.0.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", "minimatch": "3.0.4", - "mkdirp": "0.5.5", - "ms": "2.1.1", - "node-environment-flags": "1.0.6", - "object.assign": "4.1.0", - "strip-json-comments": "2.0.1", - "supports-color": "6.0.0", - "which": "1.3.1", - "wide-align": "1.1.3", - "yargs": "13.3.2", - "yargs-parser": "13.1.2", - "yargs-unparser": "1.6.0" + "ms": "2.1.3", + "nanoid": "3.1.25", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "workerpool": "6.1.5", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" }, "bin": { "_mocha": "bin/_mocha", "mocha": "bin/mocha" }, "engines": { - "node": ">= 8.10.0" + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" } }, "node_modules/mocha/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/mocha/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "engines": { - "node": ">=4" + "node": ">=8" } }, "node_modules/mocha/node_modules/cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dependencies": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "node_modules/mocha/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/mocha/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/mocha/node_modules/debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/mocha/node_modules/glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - } - }, - "node_modules/mocha/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" } }, "node_modules/mocha/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "engines": { - "node": ">=4" + "node": ">=8" } }, "node_modules/mocha/node_modules/ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/mocha/node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=6" + "node": ">=8" } }, "node_modules/mocha/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dependencies": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.1" }, "engines": { - "node": ">=6" + "node": ">=8" + } + }, + "node_modules/mocha/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/mocha/node_modules/supports-color": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.0.0.tgz", - "integrity": "sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg==", + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dependencies": { - "has-flag": "^3.0.0" + "has-flag": "^4.0.0" }, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" } }, "node_modules/mocha/node_modules/wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dependencies": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, "node_modules/mocha/node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } }, "node_modules/mocha/node_modules/yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", "dependencies": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" } }, "node_modules/ms": { @@ -2425,6 +2593,17 @@ "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz", "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==" }, + "node_modules/nanoid": { + "version": "3.1.25", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.25.tgz", + "integrity": "sha512-rdwtIXaXCLFAQbnfqDRnI6jaRHp9fTcYBjtFKE8eezcZ7LuLjhUaQGNeMXf1HmRoCH32CLz6XwX0TtxEOS/A3Q==", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, "node_modules/nats": { "version": "1.4.12", "resolved": "https://registry.npmjs.org/nats/-/nats-1.4.12.tgz", @@ -2472,20 +2651,6 @@ "ms": "^2.1.1" } }, - "node_modules/nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "node_modules/node-environment-flags": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/node-environment-flags/-/node-environment-flags-1.0.6.tgz", - "integrity": "sha512-5Evy2epuL+6TM0lCQGpFIj6KwiEsGh1SrHUhTbNX+sLbBtjidPZFAnVK9y5yU1+h//RitLbRHTIMyxQPtxMdHw==", - "dependencies": { - "object.getownpropertydescriptors": "^2.0.3", - "semver": "^5.7.0" - } - }, "node_modules/node-pre-gyp": { "version": "0.16.0", "resolved": "https://registry.npmjs.org/node-pre-gyp/-/node-pre-gyp-0.16.0.tgz", @@ -2518,17 +2683,6 @@ "nopt": "bin/nopt.js" } }, - "node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -2596,9 +2750,12 @@ } }, "node_modules/object-inspect": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.10.2.tgz", - "integrity": "sha512-gz58rdPpadwztRrPjZE9DZLOABUpTGdcANUgOwBFO1C+HZZhePoP83M65WGDmbpwFYJSWqavbl4SgDn4k8RYTA==" + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.11.0.tgz", + "integrity": "sha512-jp7ikS6Sd3GxQfZJPyH3cjcbJF6GZPClgdV+EFygjFLQ5FmW/dRUnTd9PQ9k0JhoNDabWFbpF1yCdSWCC6gexg==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/object-keys": { "version": "1.1.1", @@ -2609,72 +2766,65 @@ } }, "node_modules/object.assign": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", - "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", "dependencies": { - "define-properties": "^1.1.2", - "function-bind": "^1.1.1", - "has-symbols": "^1.0.0", - "object-keys": "^1.0.11" + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object.entries": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.3.tgz", - "integrity": "sha512-ym7h7OZebNS96hn5IJeyUmaWhaSM4SVtAPPfNLQEI2MYWCO2egsITb9nab2+i/Pwibx+R0mtn+ltKJXRSeTMGg==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.5.tgz", + "integrity": "sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==", "dependencies": { - "call-bind": "^1.0.0", + "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1", - "has": "^1.0.3" + "es-abstract": "^1.19.1" }, "engines": { "node": ">= 0.4" } }, "node_modules/object.fromentries": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.4.tgz", - "integrity": "sha512-EsFBshs5RUUpQEY1D4q/m59kMfz4YJvxuNCJcv/jWwOJr34EaVnG11ZrZa0UHB3wnzV1wx8m58T4hQL8IuNXlQ==", + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.5.tgz", + "integrity": "sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" + "es-abstract": "^1.19.1" }, "engines": { "node": ">= 0.4" - } - }, - "node_modules/object.getownpropertydescriptors": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz", - "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2" }, - "engines": { - "node": ">= 0.8" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" + "es-abstract": "^1.19.1" }, "engines": { "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/once": { @@ -2685,28 +2835,17 @@ "wrappy": "1" } }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", "dependencies": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" }, "engines": { "node": ">= 0.8.0" @@ -2754,33 +2893,39 @@ } }, "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dependencies": { - "p-try": "^2.0.0" + "yocto-queue": "^0.1.0" }, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dependencies": { - "p-limit": "^2.0.0" + "p-limit": "^3.0.2" }, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", "engines": { - "node": ">=6" + "node": ">=4" } }, "node_modules/parent-module": { @@ -2795,14 +2940,15 @@ } }, "node_modules/parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", "dependencies": { - "error-ex": "^1.2.0" + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" }, "engines": { - "node": ">=0.10.0" + "node": ">=4" } }, "node_modules/partial-compare": { @@ -2811,11 +2957,11 @@ "integrity": "sha1-aKwbhk5GO3E+ZZMdIIPBVgYIgzo=" }, "node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "engines": { - "node": ">=4" + "node": ">=8" } }, "node_modules/path-is-absolute": { @@ -2827,28 +2973,17 @@ } }, "node_modules/path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "engines": { - "node": ">=4" + "node": ">=8" } }, "node_modules/path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "node_modules/path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "dependencies": { - "pify": "^2.0.0" - }, - "engines": { - "node": ">=4" - } + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "node_modules/pathval": { "version": "1.1.1", @@ -2859,11 +2994,14 @@ } }, "node_modules/picomatch": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", - "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", + "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", "engines": { "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/pidof": { @@ -2875,11 +3013,11 @@ } }, "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", "engines": { - "node": ">=0.10.0" + "node": ">=6" } }, "node_modules/pkg-conf": { @@ -2894,60 +3032,68 @@ "node": ">=6" } }, - "node_modules/pkg-conf/node_modules/load-json-file": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", - "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", + "node_modules/pkg-conf/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", "dependencies": { - "graceful-fs": "^4.1.15", - "parse-json": "^4.0.0", - "pify": "^4.0.1", - "strip-bom": "^3.0.0", - "type-fest": "^0.3.0" + "locate-path": "^3.0.0" }, "engines": { "node": ">=6" } }, - "node_modules/pkg-conf/node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "node_modules/pkg-conf/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" }, "engines": { - "node": ">=4" + "node": ">=6" } }, - "node_modules/pkg-conf/node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "node_modules/pkg-conf/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, "engines": { "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/pkg-conf/node_modules/type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", + "node_modules/pkg-conf/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, "engines": { "node": ">=6" } }, - "node_modules/pkg-config": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pkg-config/-/pkg-config-1.1.1.tgz", - "integrity": "sha1-VX7yLXPaPIg3EHdmxS6tq94pj+Q=", - "dependencies": { - "debug-log": "^1.0.0", - "find-root": "^1.0.0", - "xtend": "^4.0.1" - }, + "node_modules/pkg-conf/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "engines": { - "node": ">=0.10" + "node": ">=6" + } + }, + "node_modules/pkg-conf/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "engines": { + "node": ">=4" } }, "node_modules/pkg-dir": { @@ -3006,18 +3152,18 @@ "node": ">=4" } }, - "node_modules/pkg-dir/node_modules/p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "node_modules/pkg-dir/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", "engines": { "node": ">=4" } }, "node_modules/prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "engines": { "node": ">= 0.8.0" } @@ -3078,10 +3224,13 @@ "node": ">=6" } }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } }, "node_modules/rc": { "version": "1.2.8", @@ -3113,84 +3262,6 @@ "node": ">=0.8" } }, - "node_modules/read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "dependencies": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "dependencies": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dependencies": { - "locate-path": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dependencies": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dependencies": { - "p-try": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dependencies": { - "p-limit": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "engines": { - "node": ">=4" - } - }, "node_modules/readable-stream": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", @@ -3206,22 +3277,40 @@ } }, "node_modules/readdirp": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.2.0.tgz", - "integrity": "sha512-crk4Qu3pmXwgxdSgGhgA/eXiJAPQiX4GMOZZMXnqKxHX7TaoL+3gQVo/WeuAiogr07DpnfjIMpXXa+PAIvwPGQ==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "dependencies": { - "picomatch": "^2.0.4" + "picomatch": "^2.2.1" }, "engines": { - "node": ">= 8" + "node": ">=8.10.0" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", + "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/regexpp": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", - "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", "engines": { - "node": ">=6.5.0" + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" } }, "node_modules/require-directory": { @@ -3232,10 +3321,13 @@ "node": ">=0.10.0" } }, - "node_modules/require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } }, "node_modules/resolve": { "version": "1.20.0", @@ -3244,6 +3336,9 @@ "dependencies": { "is-core-module": "^2.2.0", "path-parse": "^1.0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/resolve-from": { @@ -3254,18 +3349,6 @@ "node": ">=4" } }, - "node_modules/restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/rimraf": { "version": "2.7.1", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", @@ -3277,33 +3360,6 @@ "rimraf": "bin.js" } }, - "node_modules/run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, "node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", @@ -3320,26 +3376,39 @@ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" }, "node_modules/semistandard": { - "version": "14.2.3", - "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-14.2.3.tgz", - "integrity": "sha512-vSfN5ewUHz85dpnB5wf8Xo/btFNfVI8UmPYe0xpYLNEfrZQi/qLhtx12XnB/jNlsB2CrCokHRXlzl2zPoXghjw==", - "dependencies": { - "eslint": "~6.8.0", - "eslint-config-semistandard": "15.0.0", - "eslint-config-standard": "14.1.1", - "eslint-config-standard-jsx": "8.1.0", - "eslint-plugin-import": "~2.18.0", - "eslint-plugin-node": "~10.0.0", - "eslint-plugin-promise": "~4.2.1", - "eslint-plugin-react": "~7.14.2", - "eslint-plugin-standard": "~4.0.0", - "standard-engine": "^12.0.0" + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-16.0.1.tgz", + "integrity": "sha512-ApAJ9fMAIwYuk5xI2HWSCd8s5o5L95abxU4dYl6ovUX6Rcww/7oxtaSuu9wLFL/Gfj/EXx1h6S4itXy5vyL60Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "eslint": "^7.27.0", + "eslint-config-semistandard": "16.0.0", + "eslint-config-standard": "16.0.3", + "eslint-config-standard-jsx": "10.0.0", + "eslint-plugin-import": "^2.22.1", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-promise": "^5.1.0", + "eslint-plugin-react": "~7.21.5", + "standard-engine": "^14.0.0" }, "bin": { "semistandard": "bin/cmd.js" }, "engines": { - "node": ">=6" + "node": ">=10.12.0" } }, "node_modules/semver": { @@ -3350,28 +3419,49 @@ "semver": "bin/semver" } }, + "node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, "node_modules/set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" }, "node_modules/shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dependencies": { - "shebang-regex": "^1.0.0" + "shebang-regex": "^3.0.0" }, "engines": { - "node": ">=0.10.0" + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" } }, - "node_modules/shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "engines": { - "node": ">=0.10.0" + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/signal-exit": { @@ -3385,92 +3475,57 @@ "integrity": "sha1-jXlaJ+ojlT32tSuRCB5eImZZk8U=" }, "node_modules/slice-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", - "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", "dependencies": { - "ansi-styles": "^3.2.0", - "astral-regex": "^1.0.0", - "is-fullwidth-code-point": "^2.0.0" + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" }, "engines": { - "node": ">=6" - } - }, - "node_modules/slice-ansi/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" + "node": ">=10" }, - "engines": { - "node": ">=4" - } - }, - "node_modules/slice-ansi/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, - "node_modules/slice-ansi/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, "node_modules/slice-ansi/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "engines": { - "node": ">=4" - } - }, - "node_modules/spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", - "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==" - }, - "node_modules/spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" + "node": ">=8" } }, - "node_modules/spdx-license-ids": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.7.tgz", - "integrity": "sha512-U+MTEOO0AiDzxwFvoa4JVnMV6mZlJKk2sBLt90s7G0Gd0Mlknc7kxEn3nuDPNZRta7O2uy8oLcZLVT+4sqNZHQ==" - }, "node_modules/sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" }, "node_modules/standard-engine": { - "version": "12.1.0", - "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-12.1.0.tgz", - "integrity": "sha512-DVJnWM1CGkag4ucFLGdiYWa5/kJURPONmMmk17p8FT5NE4UnPZB1vxWnXnRo2sPSL78pWJG8xEM+1Tu19z0deg==", + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", + "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], "dependencies": { - "deglob": "^4.0.1", - "get-stdin": "^7.0.0", + "get-stdin": "^8.0.0", "minimist": "^1.2.5", - "pkg-conf": "^3.1.0" + "pkg-conf": "^3.1.0", + "xdg-basedir": "^4.0.0" }, "engines": { "node": ">=8.10" @@ -3497,6 +3552,24 @@ "node": ">=0.10.0" } }, + "node_modules/string.prototype.matchall": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.6.tgz", + "integrity": "sha512-6WgDX8HmQqvEd7J+G6VtAahhsQIssiZ8zl7zKh1VDMFyL3hRTJP4FTNA3RbIp2TOQ9AYNDcc7e3fH0Qbup+DBg==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "has-symbols": "^1.0.2", + "internal-slot": "^1.0.3", + "regexp.prototype.flags": "^1.3.1", + "side-channel": "^1.0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/string.prototype.trimend": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", @@ -3504,6 +3577,9 @@ "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trimstart": { @@ -3513,6 +3589,9 @@ "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/strip-ansi": { @@ -3574,97 +3653,122 @@ } }, "node_modules/table": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", - "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/table/-/table-6.7.2.tgz", + "integrity": "sha512-UFZK67uvyNivLeQbVtkiUs8Uuuxv24aSL4/Vil2PJVtMgU8Lx0CYkP12uCGa3kjyQzOSgV1+z9Wkb82fCGsO0g==", "dependencies": { - "ajv": "^6.10.2", - "lodash": "^4.17.14", - "slice-ansi": "^2.1.0", - "string-width": "^3.0.0" + "ajv": "^8.0.1", + "lodash.clonedeep": "^4.5.0", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=6.0.0" + "node": ">=10.0.0" + } + }, + "node_modules/table/node_modules/ajv": { + "version": "8.6.3", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.3.tgz", + "integrity": "sha512-SMJOdDP6LqTkD0Uq8qLi+gMwSt0imXLSV080qFVwJCpH9U6Mb+SUGHAXM0KNbcBPguytWyvFxcHgMLe2D2XSpw==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, "node_modules/table/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "engines": { - "node": ">=6" + "node": ">=8" } }, "node_modules/table/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "engines": { - "node": ">=4" + "node": ">=8" } }, + "node_modules/table/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, "node_modules/table/node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=6" + "node": ">=8" } }, "node_modules/table/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dependencies": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.1" }, "engines": { - "node": ">=6" + "node": ">=8" } }, "node_modules/tar": { - "version": "4.4.13", - "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz", - "integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==", - "dependencies": { - "chownr": "^1.1.1", - "fs-minipass": "^1.2.5", - "minipass": "^2.8.6", - "minizlib": "^1.2.1", - "mkdirp": "^0.5.0", - "safe-buffer": "^5.1.2", - "yallist": "^3.0.3" + "version": "4.4.19", + "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.19.tgz", + "integrity": "sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==", + "dependencies": { + "chownr": "^1.1.4", + "fs-minipass": "^1.2.7", + "minipass": "^2.9.0", + "minizlib": "^1.3.3", + "mkdirp": "^0.5.5", + "safe-buffer": "^5.2.1", + "yallist": "^3.1.1" }, "engines": { "node": ">=4.5" } }, + "node_modules/tar/node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" - }, - "node_modules/tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dependencies": { - "os-tmpdir": "~1.0.2" - }, - "engines": { - "node": ">=0.6.0" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -3684,10 +3788,16 @@ "tweetnacl": "^1.0.3" } }, - "node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" + "node_modules/tsconfig-paths": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.11.0.tgz", + "integrity": "sha512-7ecdYDnIdmv639mmDwslG6KQg1Z9STTz1j7Gcz0xa+nshh/gKDAHcPxRbWOsA3SPp0tXP2leTcY9Kw+NAkfZzA==", + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.1", + "minimist": "^1.2.0", + "strip-bom": "^3.0.0" + } }, "node_modules/tweetnacl": { "version": "1.0.3", @@ -3695,11 +3805,11 @@ "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" }, "node_modules/type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "dependencies": { - "prelude-ls": "~1.1.2" + "prelude-ls": "^1.2.1" }, "engines": { "node": ">= 0.8.0" @@ -3714,11 +3824,14 @@ } }, "node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "engines": { - "node": ">=8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/unbox-primitive": { @@ -3730,13 +3843,11 @@ "has-bigints": "^1.0.1", "has-symbols": "^1.0.2", "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" - }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -3755,24 +3866,18 @@ "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==" }, - "node_modules/validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dependencies": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dependencies": { "isexe": "^2.0.0" }, "bin": { - "which": "bin/which" + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" } }, "node_modules/which-boxed-primitive": { @@ -3785,13 +3890,11 @@ "is-number-object": "^1.0.4", "is-string": "^1.0.5", "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" - }, "node_modules/wide-align": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", @@ -3819,6 +3922,11 @@ "node": ">=0.10.0" } }, + "node_modules/workerpool": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", + "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==" + }, "node_modules/wrap-ansi": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", @@ -3836,17 +3944,6 @@ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, - "node_modules/write": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", - "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", - "dependencies": { - "mkdirp": "^0.5.1" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/wtfnode": { "version": "0.8.4", "resolved": "https://registry.npmjs.org/wtfnode/-/wtfnode-0.8.4.tgz", @@ -3855,12 +3952,12 @@ "wtfnode": "proxy.js" } }, - "node_modules/xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "node_modules/xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", "engines": { - "node": ">=0.4" + "node": ">=8" } }, "node_modules/y18n": { @@ -3888,165 +3985,81 @@ } }, "node_modules/yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dependencies": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - }, - "node_modules/yargs-parser/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", "engines": { - "node": ">=6" + "node": ">=10" } }, "node_modules/yargs-unparser": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-1.6.0.tgz", - "integrity": "sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", "dependencies": { - "flat": "^4.1.0", - "lodash": "^4.17.15", - "yargs": "^13.3.0" + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" }, "engines": { - "node": ">=6" + "node": ">=10" } }, - "node_modules/yargs-unparser/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "node_modules/yargs-unparser/node_modules/camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", "engines": { - "node": ">=6" - } - }, - "node_modules/yargs-unparser/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" + "node": ">=10" }, - "engines": { - "node": ">=4" - } - }, - "node_modules/yargs-unparser/node_modules/cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dependencies": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "node_modules/yargs-unparser/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/yargs-unparser/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/yargs-unparser/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "node_modules/yargs-unparser/node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", "engines": { - "node": ">=4" - } - }, - "node_modules/yargs-unparser/node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" + "node": ">=10" }, - "engines": { - "node": ">=6" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/yargs-unparser/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "engines": { - "node": ">=6" - } - }, - "node_modules/yargs-unparser/node_modules/wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dependencies": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" + "node": ">=10" }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs-unparser/node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" - }, - "node_modules/yargs-unparser/node_modules/yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dependencies": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } } }, "dependencies": { "@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "version": "7.12.11", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.11.tgz", + "integrity": "sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==", "requires": { - "@babel/highlight": "^7.12.13" + "@babel/highlight": "^7.10.4" } }, "@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==" + "version": "7.15.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.15.7.tgz", + "integrity": "sha512-K4JvCtQqad9OY2+yTU8w+E82ywk/fe+ELNlt1G8z3bVGlZfn/hOcQQsUhGhW/N+tb3fxK800wLtKOE/aM0m72w==" }, "@babel/highlight": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz", - "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.0.tgz", + "integrity": "sha512-t8MH41kUQylBtu2+4IQA3atqevA2lRgqA2wyVB/YiWmsDSuylZZuXOUy9ric30hfzauEFfdsuk/eXTRrGrfd0g==", "requires": { - "@babel/helper-validator-identifier": "^7.12.11", + "@babel/helper-validator-identifier": "^7.15.7", "chalk": "^2.0.0", "js-tokens": "^4.0.0" }, @@ -4082,6 +4095,11 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" + }, "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", @@ -4097,6 +4115,46 @@ } } }, + "@eslint/eslintrc": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz", + "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==", + "requires": { + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^13.9.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^3.13.1", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "dependencies": { + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==" + } + } + }, "@grpc/grpc-js": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.2.tgz", @@ -4121,6 +4179,21 @@ "protobufjs": "^6.8.6" } }, + "@humanwhocodes/config-array": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz", + "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==", + "requires": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + } + }, + "@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==" + }, "@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", @@ -4184,6 +4257,11 @@ "@types/node": "*" } }, + "@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=" + }, "@types/lodash": { "version": "4.14.137", "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.137.tgz", @@ -4199,6 +4277,11 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.21.tgz", "integrity": "sha512-CBgLNk4o3XMnqMc0rhb6lc77IwShMEglz05deDcn2lQxyXEZivfwgYJu7SMha9V5XcrP6qZuevTHV/QrN2vjKQ==" }, + "@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==" + }, "abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", @@ -4210,9 +4293,10 @@ "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==" }, "acorn-jsx": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==" + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "requires": {} }, "ajv": { "version": "6.12.6", @@ -4226,24 +4310,9 @@ } }, "ansi-colors": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.3.tgz", - "integrity": "sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw==" - }, - "ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "requires": { - "type-fest": "^0.21.3" - }, - "dependencies": { - "type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==" - } - } + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==" }, "ansi-regex": { "version": "2.1.1", @@ -4282,23 +4351,40 @@ } }, "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "array-includes": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.4.tgz", + "integrity": "sha512-ZTNSQkmWumEbiHO2GF4GmWxYVTiQyJy2XOTa15sdQSrvKn7l+180egQMqlrMOUMCyLMD7pmyQe4mMDUT6Behrw==", "requires": { - "sprintf-js": "~1.0.2" + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "is-string": "^1.0.7" } }, - "array-includes": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.3.tgz", - "integrity": "sha512-gcem1KlBU7c9rB+Rq8/3PPKsK2kjqeEBa3bD5kkQo4nYlOHQCJqIJFqBXDEfwaRuYTT4E+FxA9xez7Gf/e3Q7A==", + "array.prototype.flat": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.5.tgz", + "integrity": "sha512-KaYU+S+ndVqyUnignHftkwc58o3uVU1jzczILJ1tN2YaIZpFIKBiP/x/j97E5MVPsaCloPbqWLB/8qCTVvT2qg==", + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.0" + } + }, + "array.prototype.flatmap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.5.tgz", + "integrity": "sha512-08u6rVyi1Lj7oqWbS9nUxliETrtIROT4XGTA4D/LWGten6E3ocm7cy9SIrmNHOL5XVbVuckUp3X6Xyg8/zpvHA==", "requires": { - "call-bind": "^1.0.2", + "call-bind": "^1.0.0", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "get-intrinsic": "^1.1.1", - "is-string": "^1.0.5" + "es-abstract": "^1.19.0" } }, "ascli": { @@ -4316,9 +4402,9 @@ "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==" }, "astral-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", - "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==" }, "async": { "version": "3.2.0", @@ -4413,29 +4499,24 @@ "supports-color": "^7.1.0" } }, - "chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" - }, "check-error": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=" }, "chokidar": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.3.0.tgz", - "integrity": "sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", + "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", "requires": { - "anymatch": "~3.1.1", + "anymatch": "~3.1.2", "braces": "~3.0.2", - "fsevents": "~2.1.1", - "glob-parent": "~5.1.0", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", - "readdirp": "~3.2.0" + "readdirp": "~3.6.0" } }, "chownr": { @@ -4443,19 +4524,6 @@ "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" }, - "cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "requires": { - "restore-cursor": "^3.1.0" - } - }, - "cli-width": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", - "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==" - }, "cliui": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", @@ -4504,41 +4572,29 @@ "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=" }, - "contains-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", - "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=" - }, "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "requires": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" } }, "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", + "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", "requires": { "ms": "2.1.2" } }, - "debug-log": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/debug-log/-/debug-log-1.0.1.tgz", - "integrity": "sha1-IwdjLUwEOCuN+KMvcLiVBG1SdF8=" - }, "decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", @@ -4558,9 +4614,9 @@ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" }, "deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" }, "define-properties": { "version": "1.1.3", @@ -4570,26 +4626,6 @@ "object-keys": "^1.0.12" } }, - "deglob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/deglob/-/deglob-4.0.1.tgz", - "integrity": "sha512-/g+RDZ7yf2HvoW+E5Cy+K94YhgcFgr6C8LuHZD1O5HoNPkf3KY6RfXJ0DBGlB/NkLi5gml+G9zqRzk9S0mHZCg==", - "requires": { - "find-root": "^1.0.0", - "glob": "^7.0.5", - "ignore": "^5.0.0", - "pkg-config": "^1.1.0", - "run-parallel": "^1.1.2", - "uniq": "^1.0.1" - }, - "dependencies": { - "ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==" - } - } - }, "delegates": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", @@ -4601,9 +4637,9 @@ "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=" }, "diff": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", - "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==" + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==" }, "doctrine": { "version": "3.0.0", @@ -4614,9 +4650,17 @@ } }, "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", + "requires": { + "ansi-colors": "^4.1.1" + } }, "error-ex": { "version": "1.3.2", @@ -4627,39 +4671,30 @@ } }, "es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.1.tgz", + "integrity": "sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w==", "requires": { "call-bind": "^1.0.2", "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "get-intrinsic": "^1.1.1", + "get-symbol-description": "^1.0.0", "has": "^1.0.3", "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", + "internal-slot": "^1.0.3", + "is-callable": "^1.2.4", "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", + "is-string": "^1.0.7", + "is-weakref": "^1.0.1", + "object-inspect": "^1.11.0", "object-keys": "^1.1.1", "object.assign": "^4.1.2", "string.prototype.trimend": "^1.0.4", "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" - }, - "dependencies": { - "object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "requires": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - } - } + "unbox-primitive": "^1.0.1" } }, "es-to-primitive": { @@ -4672,219 +4707,230 @@ "is-symbol": "^1.0.2" } }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" + }, "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==" }, "eslint": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz", - "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==", + "version": "7.32.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz", + "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==", "requires": { - "@babel/code-frame": "^7.0.0", + "@babel/code-frame": "7.12.11", + "@eslint/eslintrc": "^0.4.3", + "@humanwhocodes/config-array": "^0.5.0", "ajv": "^6.10.0", - "chalk": "^2.1.0", - "cross-spawn": "^6.0.5", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", "debug": "^4.0.1", "doctrine": "^3.0.0", - "eslint-scope": "^5.0.0", - "eslint-utils": "^1.4.3", - "eslint-visitor-keys": "^1.1.0", - "espree": "^6.1.2", - "esquery": "^1.0.1", + "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.1", + "esquery": "^1.4.0", "esutils": "^2.0.2", - "file-entry-cache": "^5.0.1", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", - "globals": "^12.1.0", + "glob-parent": "^5.1.2", + "globals": "^13.6.0", "ignore": "^4.0.6", "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", - "inquirer": "^7.0.0", "is-glob": "^4.0.0", "js-yaml": "^3.13.1", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.3.0", - "lodash": "^4.17.14", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", "minimatch": "^3.0.4", - "mkdirp": "^0.5.1", "natural-compare": "^1.4.0", - "optionator": "^0.8.3", + "optionator": "^0.9.1", "progress": "^2.0.0", - "regexpp": "^2.0.1", - "semver": "^6.1.2", - "strip-ansi": "^5.2.0", - "strip-json-comments": "^3.0.1", - "table": "^5.2.3", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", + "table": "^6.0.9", "text-table": "^0.2.0", "v8-compile-cache": "^2.0.3" }, "dependencies": { "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "requires": { - "color-convert": "^1.9.0" + "sprintf-js": "~1.0.2" } }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" + "argparse": "^1.0.7", + "esprima": "^4.0.0" } }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "semver": { + "version": "7.3.5", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", + "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", "requires": { - "color-name": "1.1.3" + "lru-cache": "^6.0.0" } }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - }, "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.1" } }, "strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } } } }, "eslint-config-semistandard": { - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-15.0.0.tgz", - "integrity": "sha512-volIMnosUvzyxGkYUA5QvwkahZZLeUx7wcS0+7QumPn+MMEBbV6P7BY1yukamMst0w3Et3QZlCjQEwQ8tQ6nug==" + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-16.0.0.tgz", + "integrity": "sha512-oD8QOo4mSInRJhQb3Zi6L8HebwZaB6SI3A+NNrPdVN0nN1K45L5pXK3joY+ksWDlT3ew/M+fJk2tuMCjIpjRzQ==", + "requires": {} }, "eslint-config-standard": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz", - "integrity": "sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg==" + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.3.tgz", + "integrity": "sha512-x4fmJL5hGqNJKGHSjnLdgA6U6h1YW/G2dW9fA+cyVur4SK6lyue8+UgNKWlZtUDTXvgKDD/Oa3GQjmB5kjtVvg==", + "requires": {} }, "eslint-config-standard-jsx": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-8.1.0.tgz", - "integrity": "sha512-ULVC8qH8qCqbU792ZOO6DaiaZyHNS/5CZt3hKqHkEhVlhPEPN3nfBqqxJCyp59XrjIBZPu1chMYe9T2DXZ7TMw==" + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", + "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", + "requires": {} }, "eslint-import-resolver-node": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz", - "integrity": "sha512-ogtf+5AB/O+nM6DIeBUNr2fuT7ot9Qg/1harBfBtaP13ekEWFQEEMP94BCB7zaNW3gyY+8SHYF00rnqYwXKWOA==", + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz", + "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==", "requires": { - "debug": "^2.6.9", - "resolve": "^1.13.1" + "debug": "^3.2.7", + "resolve": "^1.20.0" }, "dependencies": { "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "requires": { - "ms": "2.0.0" + "ms": "^2.1.1" } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, "eslint-module-utils": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.0.tgz", - "integrity": "sha512-6j9xxegbqe8/kZY8cYpcp0xhbK0EgJlg3g9mib3/miLaExuuwc3n5UEfSnU6hWMbT0FAYVvDbL9RrRgpUeQIvA==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.1.tgz", + "integrity": "sha512-fjoetBXQZq2tSTWZ9yWVl2KuFrTZZH3V+9iD1V1RfpDgxzJR+mPd/KZmMiA8gbPqdBzpNiEHOuT7IYEWxrH0zQ==", "requires": { - "debug": "^2.6.9", + "debug": "^3.2.7", + "find-up": "^2.1.0", "pkg-dir": "^2.0.0" }, "dependencies": { "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "requires": { - "ms": "2.0.0" + "ms": "^2.1.1" } }, - "ms": { + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "requires": { + "locate-path": "^2.0.0" + } + }, + "locate-path": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "requires": { + "p-limit": "^1.1.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" } } }, "eslint-plugin-es": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-2.0.0.tgz", - "integrity": "sha512-f6fceVtg27BR02EYnBhgWLFQfK6bN4Ll0nQFrBHOlCsAyxeZkn0NHns5O0YZOPrV1B3ramd6cgFwaoFLcSkwEQ==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", + "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", "requires": { - "eslint-utils": "^1.4.2", + "eslint-utils": "^2.0.0", "regexpp": "^3.0.0" - }, - "dependencies": { - "regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==" - } } }, "eslint-plugin-import": { - "version": "2.18.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.18.2.tgz", - "integrity": "sha512-5ohpsHAiUBRNaBWAF08izwUGlbrJoJJ+W9/TBwsGoR1MnlgfwMIKrFeSjWbt6moabiXW9xNvtFz+97KHRfI4HQ==", + "version": "2.25.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.25.2.tgz", + "integrity": "sha512-qCwQr9TYfoBHOFcVGKY9C9unq05uOxxdklmBXLVvcwo68y5Hta6/GzCZEMx2zQiu0woKNEER0LE7ZgaOfBU14g==", "requires": { - "array-includes": "^3.0.3", - "contains-path": "^0.1.0", + "array-includes": "^3.1.4", + "array.prototype.flat": "^1.2.5", "debug": "^2.6.9", - "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.2", - "eslint-module-utils": "^2.4.0", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-module-utils": "^2.7.0", "has": "^1.0.3", + "is-core-module": "^2.7.0", + "is-glob": "^4.0.3", "minimatch": "^3.0.4", - "object.values": "^1.1.0", - "read-pkg-up": "^2.0.0", - "resolve": "^1.11.0" + "object.values": "^1.1.5", + "resolve": "^1.20.0", + "tsconfig-paths": "^3.11.0" }, "dependencies": { "debug": { @@ -4896,12 +4942,11 @@ } }, "doctrine": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", - "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", "requires": { - "esutils": "^2.0.2", - "isarray": "^1.0.0" + "esutils": "^2.0.2" } }, "ms": { @@ -4912,12 +4957,12 @@ } }, "eslint-plugin-node": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-10.0.0.tgz", - "integrity": "sha512-1CSyM/QCjs6PXaT18+zuAXsjXGIGo5Rw630rSKwokSs2jrYURQc4R5JZpoanNCqwNmepg+0eZ9L7YiRUJb8jiQ==", + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", + "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", "requires": { - "eslint-plugin-es": "^2.0.0", - "eslint-utils": "^1.4.2", + "eslint-plugin-es": "^3.0.0", + "eslint-utils": "^2.0.0", "ignore": "^5.1.1", "minimatch": "^3.0.4", "resolve": "^1.10.1", @@ -4925,9 +4970,9 @@ }, "dependencies": { "ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==" + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.9.tgz", + "integrity": "sha512-2zeMQpbKz5dhZ9IwL0gbxSW5w0NK/MSAMtNuhgIHEPmaU3vPdKPL0UdvUCXs5SS4JAwsBxysK5sFMW8ocFiVjQ==" }, "semver": { "version": "6.3.0", @@ -4937,24 +4982,27 @@ } }, "eslint-plugin-promise": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", - "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==" + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-5.1.1.tgz", + "integrity": "sha512-XgdcdyNzHfmlQyweOPTxmc7pIsS6dE4MvwhXWMQ2Dxs1XAL2GJDilUsjWen6TWik0aSI+zD/PqocZBblcm9rdA==", + "requires": {} }, "eslint-plugin-react": { - "version": "7.14.3", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.14.3.tgz", - "integrity": "sha512-EzdyyBWC4Uz2hPYBiEJrKCUi2Fn+BJ9B/pJQcjw5X+x/H2Nm59S4MJIvL4O5NEE0+WbnQwEBxWY03oUk+Bc3FA==", + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.21.5.tgz", + "integrity": "sha512-8MaEggC2et0wSF6bUeywF7qQ46ER81irOdWS4QWxnnlAEsnzeBevk1sWh7fhpCghPpXb+8Ks7hvaft6L/xsR6g==", "requires": { - "array-includes": "^3.0.3", + "array-includes": "^3.1.1", + "array.prototype.flatmap": "^1.2.3", "doctrine": "^2.1.0", "has": "^1.0.3", - "jsx-ast-utils": "^2.1.0", - "object.entries": "^1.1.0", - "object.fromentries": "^2.0.0", - "object.values": "^1.1.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "object.entries": "^1.1.2", + "object.fromentries": "^2.0.2", + "object.values": "^1.1.1", "prop-types": "^15.7.2", - "resolve": "^1.10.1" + "resolve": "^1.18.1", + "string.prototype.matchall": "^4.0.2" }, "dependencies": { "doctrine": { @@ -4967,11 +5015,6 @@ } } }, - "eslint-plugin-standard": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.2.tgz", - "integrity": "sha512-nKptN8l7jksXkwFk++PhJB3cCDTcXOEyhISIN86Ue2feJ1LFyY3PrY3/xT2keXlJSY5bpmbiTG0f885/YKAvTA==" - }, "eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", @@ -4982,26 +5025,40 @@ } }, "eslint-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", - "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", "requires": { "eslint-visitor-keys": "^1.1.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==" + } } }, "eslint-visitor-keys": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", - "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", + "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==" }, "espree": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", - "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", + "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==", "requires": { - "acorn": "^7.1.1", - "acorn-jsx": "^5.2.0", - "eslint-visitor-keys": "^1.1.0" + "acorn": "^7.4.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^1.3.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==" + } } }, "esprima": { @@ -5018,9 +5075,9 @@ }, "dependencies": { "estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==" + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==" } } }, @@ -5033,9 +5090,9 @@ }, "dependencies": { "estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==" + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==" } } }, @@ -5049,16 +5106,6 @@ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" }, - "external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "requires": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - } - }, "fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -5074,20 +5121,12 @@ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" }, - "figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, "file-entry-cache": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", - "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "requires": { - "flat-cache": "^2.0.1" + "flat-cache": "^3.0.4" } }, "fill-range": { @@ -5108,41 +5147,33 @@ "debug": "^4.1.1" } }, - "find-root": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", - "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==" - }, "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "requires": { - "locate-path": "^3.0.0" + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" } }, "flat": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/flat/-/flat-4.1.1.tgz", - "integrity": "sha512-FmTtBsHskrU6FJ2VxCnsDb84wu9zhmO3cUX2kGFb5tuwhfXxGciiT0oRY+cck35QmG+NmGh5eLz6lLCpWTqwpA==", - "requires": { - "is-buffer": "~2.0.3" - } + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==" }, "flat-cache": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", - "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", + "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", "requires": { - "flatted": "^2.0.0", - "rimraf": "2.6.3", - "write": "1.0.3" + "flatted": "^3.1.0", + "rimraf": "^3.0.2" }, "dependencies": { "rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "requires": { "glob": "^7.1.3" } @@ -5150,9 +5181,9 @@ } }, "flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.2.tgz", + "integrity": "sha512-JaTY/wtrcSyvXJl4IMFHPKyFur1sE9AUqc0QnhOaJ0CxHtAoIV8pYDzeEfAaNEtGkOfq4gr3LBFmdXW5mOQFnA==" }, "fs-minipass": { "version": "1.2.7", @@ -5168,9 +5199,9 @@ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "fsevents": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", - "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "optional": true }, "function-bind": { @@ -5219,14 +5250,23 @@ } }, "get-stdin": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-7.0.0.tgz", - "integrity": "sha512-zRKcywvrXlXsA0v0i9Io4KDRaAw7+a1ZpjRwl9Wox8PFlVCCHra7E9c4kqXCoCM9nR5tBkaTTZRBoCm60bFqTQ==" + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", + "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==" + }, + "get-symbol-description": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", + "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "requires": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + } }, "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -5245,17 +5285,17 @@ } }, "globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "version": "13.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.12.0.tgz", + "integrity": "sha512-uS8X6lSKN2JumVoXrbUz+uG4BYG+eiawqm3qFcT7ammfbUHeCBoJMlHcec/S3krSk73/AE/f0szYFmgAA3kYZg==", "requires": { - "type-fest": "^0.8.1" + "type-fest": "^0.20.2" } }, "graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==" + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz", + "integrity": "sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg==" }, "growl": { "version": "1.10.5", @@ -5350,6 +5390,14 @@ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==" }, + "has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "requires": { + "has-symbols": "^1.0.2" + } + }, "has-unicode": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", @@ -5360,11 +5408,6 @@ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==" }, - "hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==" - }, "iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -5424,59 +5467,14 @@ "resolved": "https://registry.npmjs.org/inpath/-/inpath-1.0.2.tgz", "integrity": "sha1-SsIZcQ7Hpy9GD/lL9CTdPvDlKBc=" }, - "inquirer": { - "version": "7.3.3", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.3.3.tgz", - "integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==", + "internal-slot": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", + "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", "requires": { - "ansi-escapes": "^4.2.1", - "chalk": "^4.1.0", - "cli-cursor": "^3.1.0", - "cli-width": "^3.0.0", - "external-editor": "^3.0.3", - "figures": "^3.0.0", - "lodash": "^4.17.19", - "mute-stream": "0.0.8", - "run-async": "^2.4.0", - "rxjs": "^6.6.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0", - "through": "^2.3.6" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "requires": { - "ansi-regex": "^5.0.0" - } - } + "get-intrinsic": "^1.1.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" } }, "invert-kv": { @@ -5490,9 +5488,12 @@ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" }, "is-bigint": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz", - "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==" + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "requires": { + "has-bigints": "^1.0.1" + } }, "is-binary-path": { "version": "2.1.0", @@ -5503,35 +5504,34 @@ } }, "is-boolean-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz", - "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", "requires": { - "call-bind": "^1.0.0" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" } }, - "is-buffer": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", - "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==" - }, "is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==" + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.4.tgz", + "integrity": "sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w==" }, "is-core-module": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.3.0.tgz", - "integrity": "sha512-xSphU2KG9867tsYdLD4RWQ1VqdFl4HTO9Thf3I/3dLEfr0dbPTWKsuCKrgqMljg4nPE+Gq0VCnzT3gr0CyBmsw==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.0.tgz", + "integrity": "sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==", "requires": { "has": "^1.0.3" } }, "is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "requires": { + "has-tostringtag": "^1.0.0" + } }, "is-extglob": { "version": "2.1.1", @@ -5547,9 +5547,9 @@ } }, "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "requires": { "is-extglob": "^2.1.1" } @@ -5565,30 +5565,59 @@ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" }, "is-number-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz", - "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==" + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.6.tgz", + "integrity": "sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g==", + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==" }, "is-regex": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz", - "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", "requires": { "call-bind": "^1.0.2", - "has-symbols": "^1.0.1" + "has-tostringtag": "^1.0.0" } }, + "is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==" + }, "is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "requires": { + "has-tostringtag": "^1.0.0" + } }, "is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", "requires": { - "has-symbols": "^1.0.1" + "has-symbols": "^1.0.2" + } + }, + "is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==" + }, + "is-weakref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.1.tgz", + "integrity": "sha512-b2jKc2pQZjaeFYWEf7ScFj+Be1I+PXmlu572Q8coTXZ+LD/QQZ7ShPMst8h16riVgyXTQwUsFEl74mDvc/3MHQ==", + "requires": { + "call-bind": "^1.0.0" } }, "isarray": { @@ -5607,12 +5636,11 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "argparse": "^2.0.1" } }, "json-parse-better-errors": { @@ -5630,13 +5658,21 @@ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=" }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "requires": { + "minimist": "^1.2.0" + } + }, "jsx-ast-utils": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-2.4.1.tgz", - "integrity": "sha512-z1xSldJ6imESSzOjd3NNkieVJKRlKYSOtMG8SFyCj2FIrvSaSuli/WjpBkEzCBoR9bYYYFgqJw61Xhu7Lcgk+w==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.2.1.tgz", + "integrity": "sha512-uP5vu8xfy2F9A6LGC22KO7e2/vGTS1MhP+18f++ZNlf0Ohaxbc9nIEwHAsejlJKyzfZzU5UIhe5ItYkitcZnZA==", "requires": { - "array-includes": "^3.1.1", - "object.assign": "^4.1.0" + "array-includes": "^3.1.3", + "object.assign": "^4.1.2" } }, "lcid": { @@ -5648,32 +5684,39 @@ } }, "levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "requires": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" } }, "load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", + "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" + "graceful-fs": "^4.1.15", + "parse-json": "^4.0.0", + "pify": "^4.0.1", + "strip-bom": "^3.0.0", + "type-fest": "^0.3.0" + }, + "dependencies": { + "type-fest": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", + "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==" + } } }, "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" + "p-locate": "^5.0.0" } }, "lodash": { @@ -5691,58 +5734,28 @@ "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz", "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=" }, + "lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=" + }, + "lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" + }, + "lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", + "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=" + }, "log-symbols": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-3.0.0.tgz", - "integrity": "sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", "requires": { - "chalk": "^2.4.2" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" } }, "long": { @@ -5758,10 +5771,20 @@ "js-tokens": "^3.0.0 || ^4.0.0" } }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" + "lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "requires": { + "yallist": "^4.0.0" + }, + "dependencies": { + "yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + } + } }, "minimatch": { "version": "3.0.4", @@ -5802,164 +5825,119 @@ } }, "mocha": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-7.2.0.tgz", - "integrity": "sha512-O9CIypScywTVpNaRrCAgoUnJgozpIofjKUYmJhiCIJMiuYnLI6otcb1/kpW9/n/tJODHGZ7i8aLQoDVsMtOKQQ==", + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.3.tgz", + "integrity": "sha512-Xcpl9FqXOAYqI3j79pEtHBBnQgVXIhpULjGQa7DVb0Po+VzmSIK9kanAiWLHoRR/dbZ2qpdPshuXr8l1VaHCzw==", "requires": { - "ansi-colors": "3.2.3", + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", "browser-stdout": "1.3.1", - "chokidar": "3.3.0", - "debug": "3.2.6", - "diff": "3.5.0", - "escape-string-regexp": "1.0.5", - "find-up": "3.0.0", - "glob": "7.1.3", + "chokidar": "3.5.2", + "debug": "4.3.2", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.7", "growl": "1.10.5", "he": "1.2.0", - "js-yaml": "3.13.1", - "log-symbols": "3.0.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", "minimatch": "3.0.4", - "mkdirp": "0.5.5", - "ms": "2.1.1", - "node-environment-flags": "1.0.6", - "object.assign": "4.1.0", - "strip-json-comments": "2.0.1", - "supports-color": "6.0.0", - "which": "1.3.1", - "wide-align": "1.1.3", - "yargs": "13.3.2", - "yargs-parser": "13.1.2", - "yargs-unparser": "1.6.0" + "ms": "2.1.3", + "nanoid": "3.1.25", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "workerpool": "6.1.5", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" }, "dependencies": { "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" }, "cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", - "requires": { - "ms": "^2.1.1" - } - }, - "glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" } }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" }, "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" } }, "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.1" } }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==" + }, "supports-color": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.0.0.tgz", - "integrity": "sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg==", + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "requires": { - "has-flag": "^3.0.0" + "has-flag": "^4.0.0" } }, "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" } }, "y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" }, "yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", "requires": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" } } } @@ -5979,6 +5957,11 @@ "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz", "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==" }, + "nanoid": { + "version": "3.1.25", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.25.tgz", + "integrity": "sha512-rdwtIXaXCLFAQbnfqDRnI6jaRHp9fTcYBjtFKE8eezcZ7LuLjhUaQGNeMXf1HmRoCH32CLz6XwX0TtxEOS/A3Q==" + }, "nats": { "version": "1.4.12", "resolved": "https://registry.npmjs.org/nats/-/nats-1.4.12.tgz", @@ -6013,20 +5996,6 @@ } } }, - "nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "node-environment-flags": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/node-environment-flags/-/node-environment-flags-1.0.6.tgz", - "integrity": "sha512-5Evy2epuL+6TM0lCQGpFIj6KwiEsGh1SrHUhTbNX+sLbBtjidPZFAnVK9y5yU1+h//RitLbRHTIMyxQPtxMdHw==", - "requires": { - "object.getownpropertydescriptors": "^2.0.3", - "semver": "^5.7.0" - } - }, "node-pre-gyp": { "version": "0.16.0", "resolved": "https://registry.npmjs.org/node-pre-gyp/-/node-pre-gyp-0.16.0.tgz", @@ -6053,17 +6022,6 @@ "osenv": "^0.1.4" } }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, "normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -6119,9 +6077,9 @@ "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" }, "object-inspect": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.10.2.tgz", - "integrity": "sha512-gz58rdPpadwztRrPjZE9DZLOABUpTGdcANUgOwBFO1C+HZZhePoP83M65WGDmbpwFYJSWqavbl4SgDn4k8RYTA==" + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.11.0.tgz", + "integrity": "sha512-jp7ikS6Sd3GxQfZJPyH3cjcbJF6GZPClgdV+EFygjFLQ5FmW/dRUnTd9PQ9k0JhoNDabWFbpF1yCdSWCC6gexg==" }, "object-keys": { "version": "1.1.1", @@ -6129,57 +6087,44 @@ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==" }, "object.assign": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", - "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", - "requires": { - "define-properties": "^1.1.2", - "function-bind": "^1.1.1", - "has-symbols": "^1.0.0", - "object-keys": "^1.0.11" - } - }, - "object.entries": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.3.tgz", - "integrity": "sha512-ym7h7OZebNS96hn5IJeyUmaWhaSM4SVtAPPfNLQEI2MYWCO2egsITb9nab2+i/Pwibx+R0mtn+ltKJXRSeTMGg==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", "requires": { "call-bind": "^1.0.0", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1", - "has": "^1.0.3" + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" } }, - "object.fromentries": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.4.tgz", - "integrity": "sha512-EsFBshs5RUUpQEY1D4q/m59kMfz4YJvxuNCJcv/jWwOJr34EaVnG11ZrZa0UHB3wnzV1wx8m58T4hQL8IuNXlQ==", + "object.entries": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.5.tgz", + "integrity": "sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==", "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" + "es-abstract": "^1.19.1" } }, - "object.getownpropertydescriptors": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz", - "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==", + "object.fromentries": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.5.tgz", + "integrity": "sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==", "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2" + "es-abstract": "^1.19.1" } }, "object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" + "es-abstract": "^1.19.1" } }, "once": { @@ -6190,25 +6135,17 @@ "wrappy": "1" } }, - "onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "requires": { - "mimic-fn": "^2.1.0" - } - }, "optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", "requires": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" } }, "optjs": { @@ -6244,25 +6181,25 @@ } }, "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "requires": { - "p-try": "^2.0.0" + "yocto-queue": "^0.1.0" } }, "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "requires": { - "p-limit": "^2.0.0" + "p-limit": "^3.0.2" } }, "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=" }, "parent-module": { "version": "1.0.1", @@ -6273,11 +6210,12 @@ } }, "parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", "requires": { - "error-ex": "^1.2.0" + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" } }, "partial-compare": { @@ -6286,9 +6224,9 @@ "integrity": "sha1-aKwbhk5GO3E+ZZMdIIPBVgYIgzo=" }, "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" }, "path-is-absolute": { "version": "1.0.1", @@ -6296,22 +6234,14 @@ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" }, "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" }, "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "requires": { - "pify": "^2.0.0" - } + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "pathval": { "version": "1.1.1", @@ -6319,9 +6249,9 @@ "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==" }, "picomatch": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", - "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==" + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", + "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==" }, "pidof": { "version": "1.0.2", @@ -6329,9 +6259,9 @@ "integrity": "sha1-+6Dq4cgzWhHrgJn10PPvvEXLTpA=" }, "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=" + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==" }, "pkg-conf": { "version": "3.1.0", @@ -6342,49 +6272,51 @@ "load-json-file": "^5.2.0" }, "dependencies": { - "load-json-file": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", - "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", "requires": { - "graceful-fs": "^4.1.15", - "parse-json": "^4.0.0", - "pify": "^4.0.1", - "strip-bom": "^3.0.0", - "type-fest": "^0.3.0" + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" } }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" + "p-limit": "^2.0.0" } }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==" + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" }, - "type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==" + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" } } }, - "pkg-config": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pkg-config/-/pkg-config-1.1.1.tgz", - "integrity": "sha1-VX7yLXPaPIg3EHdmxS6tq94pj+Q=", - "requires": { - "debug-log": "^1.0.0", - "find-root": "^1.0.0", - "xtend": "^4.0.1" - } - }, "pkg-dir": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", @@ -6426,17 +6358,17 @@ "p-limit": "^1.1.0" } }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=" + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" } } }, "prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==" }, "process-nextick-args": { "version": "2.0.1", @@ -6483,10 +6415,13 @@ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, - "queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "requires": { + "safe-buffer": "^5.1.0" + } }, "rc": { "version": "1.2.8", @@ -6512,65 +6447,6 @@ "mute-stream": "~0.0.4" } }, - "read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "requires": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - } - }, - "read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "requires": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - }, - "dependencies": { - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "requires": { - "locate-path": "^2.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "requires": { - "p-limit": "^1.1.0" - } - }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=" - } - } - }, "readable-stream": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", @@ -6586,27 +6462,36 @@ } }, "readdirp": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.2.0.tgz", - "integrity": "sha512-crk4Qu3pmXwgxdSgGhgA/eXiJAPQiX4GMOZZMXnqKxHX7TaoL+3gQVo/WeuAiogr07DpnfjIMpXXa+PAIvwPGQ==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "requires": { - "picomatch": "^2.0.4" + "picomatch": "^2.2.1" + } + }, + "regexp.prototype.flags": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", + "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" } }, "regexpp": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", - "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", + "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==" }, "require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + "require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==" }, "resolve": { "version": "1.20.0", @@ -6622,15 +6507,6 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" }, - "restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "requires": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - } - }, "rimraf": { "version": "2.7.1", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", @@ -6639,27 +6515,6 @@ "glob": "^7.1.3" } }, - "run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==" - }, - "run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "requires": { - "queue-microtask": "^1.2.2" - } - }, - "rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "requires": { - "tslib": "^1.9.0" - } - }, "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", @@ -6676,20 +6531,19 @@ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" }, "semistandard": { - "version": "14.2.3", - "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-14.2.3.tgz", - "integrity": "sha512-vSfN5ewUHz85dpnB5wf8Xo/btFNfVI8UmPYe0xpYLNEfrZQi/qLhtx12XnB/jNlsB2CrCokHRXlzl2zPoXghjw==", - "requires": { - "eslint": "~6.8.0", - "eslint-config-semistandard": "15.0.0", - "eslint-config-standard": "14.1.1", - "eslint-config-standard-jsx": "8.1.0", - "eslint-plugin-import": "~2.18.0", - "eslint-plugin-node": "~10.0.0", - "eslint-plugin-promise": "~4.2.1", - "eslint-plugin-react": "~7.14.2", - "eslint-plugin-standard": "~4.0.0", - "standard-engine": "^12.0.0" + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-16.0.1.tgz", + "integrity": "sha512-ApAJ9fMAIwYuk5xI2HWSCd8s5o5L95abxU4dYl6ovUX6Rcww/7oxtaSuu9wLFL/Gfj/EXx1h6S4itXy5vyL60Q==", + "requires": { + "eslint": "^7.27.0", + "eslint-config-semistandard": "16.0.0", + "eslint-config-standard": "16.0.3", + "eslint-config-standard-jsx": "10.0.0", + "eslint-plugin-import": "^2.22.1", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-promise": "^5.1.0", + "eslint-plugin-react": "~7.21.5", + "standard-engine": "^14.0.0" } }, "semver": { @@ -6697,23 +6551,41 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" }, + "serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "requires": { + "randombytes": "^2.1.0" + } + }, "set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" }, "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "requires": { - "shebang-regex": "^1.0.0" + "shebang-regex": "^3.0.0" } }, "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" + }, + "side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "requires": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + } }, "signal-exit": { "version": "3.0.3", @@ -6726,85 +6598,36 @@ "integrity": "sha1-jXlaJ+ojlT32tSuRCB5eImZZk8U=" }, "slice-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", - "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", "requires": { - "ansi-styles": "^3.2.0", - "astral-regex": "^1.0.0", - "is-fullwidth-code-point": "^2.0.0" + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" }, "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" } } }, - "spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==" - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.7.tgz", - "integrity": "sha512-U+MTEOO0AiDzxwFvoa4JVnMV6mZlJKk2sBLt90s7G0Gd0Mlknc7kxEn3nuDPNZRta7O2uy8oLcZLVT+4sqNZHQ==" - }, "sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" }, "standard-engine": { - "version": "12.1.0", - "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-12.1.0.tgz", - "integrity": "sha512-DVJnWM1CGkag4ucFLGdiYWa5/kJURPONmMmk17p8FT5NE4UnPZB1vxWnXnRo2sPSL78pWJG8xEM+1Tu19z0deg==", + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", + "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", "requires": { - "deglob": "^4.0.1", - "get-stdin": "^7.0.0", + "get-stdin": "^8.0.0", "minimist": "^1.2.5", - "pkg-conf": "^3.1.0" + "pkg-conf": "^3.1.0", + "xdg-basedir": "^4.0.0" } }, "string_decoder": { @@ -6825,6 +6648,21 @@ "strip-ansi": "^3.0.0" } }, + "string.prototype.matchall": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.6.tgz", + "integrity": "sha512-6WgDX8HmQqvEd7J+G6VtAahhsQIssiZ8zl7zKh1VDMFyL3hRTJP4FTNA3RbIp2TOQ9AYNDcc7e3fH0Qbup+DBg==", + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3", + "es-abstract": "^1.19.1", + "get-intrinsic": "^1.1.1", + "has-symbols": "^1.0.2", + "internal-slot": "^1.0.3", + "regexp.prototype.flags": "^1.3.1", + "side-channel": "^1.0.4" + } + }, "string.prototype.trimend": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", @@ -6875,58 +6713,83 @@ "integrity": "sha512-prJAt+iS2ITeygjLt/FGtN1qsIQHrRePCUqWtP0hGv6JsS0LSQTR+y5hWAd4frUIM/sjG95jHFUK1gx244KwUA==" }, "table": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", - "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", - "requires": { - "ajv": "^6.10.2", - "lodash": "^4.17.14", - "slice-ansi": "^2.1.0", - "string-width": "^3.0.0" - }, - "dependencies": { + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/table/-/table-6.7.2.tgz", + "integrity": "sha512-UFZK67uvyNivLeQbVtkiUs8Uuuxv24aSL4/Vil2PJVtMgU8Lx0CYkP12uCGa3kjyQzOSgV1+z9Wkb82fCGsO0g==", + "requires": { + "ajv": "^8.0.1", + "lodash.clonedeep": "^4.5.0", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "dependencies": { + "ajv": { + "version": "8.6.3", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.3.tgz", + "integrity": "sha512-SMJOdDP6LqTkD0Uq8qLi+gMwSt0imXLSV080qFVwJCpH9U6Mb+SUGHAXM0KNbcBPguytWyvFxcHgMLe2D2XSpw==", + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" }, "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" } }, "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.1" } } } }, "tar": { - "version": "4.4.13", - "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz", - "integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==", - "requires": { - "chownr": "^1.1.1", - "fs-minipass": "^1.2.5", - "minipass": "^2.8.6", - "minizlib": "^1.2.1", - "mkdirp": "^0.5.0", - "safe-buffer": "^5.1.2", - "yallist": "^3.0.3" + "version": "4.4.19", + "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.19.tgz", + "integrity": "sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==", + "requires": { + "chownr": "^1.1.4", + "fs-minipass": "^1.2.7", + "minipass": "^2.9.0", + "minizlib": "^1.3.3", + "mkdirp": "^0.5.5", + "safe-buffer": "^5.2.1", + "yallist": "^3.1.1" + }, + "dependencies": { + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + } } }, "text-table": { @@ -6934,19 +6797,6 @@ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" }, - "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" - }, - "tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "requires": { - "os-tmpdir": "~1.0.2" - } - }, "to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -6963,10 +6813,16 @@ "tweetnacl": "^1.0.3" } }, - "tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" + "tsconfig-paths": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.11.0.tgz", + "integrity": "sha512-7ecdYDnIdmv639mmDwslG6KQg1Z9STTz1j7Gcz0xa+nshh/gKDAHcPxRbWOsA3SPp0tXP2leTcY9Kw+NAkfZzA==", + "requires": { + "@types/json5": "^0.0.29", + "json5": "^1.0.1", + "minimist": "^1.2.0", + "strip-bom": "^3.0.0" + } }, "tweetnacl": { "version": "1.0.3", @@ -6974,11 +6830,11 @@ "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" }, "type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "requires": { - "prelude-ls": "~1.1.2" + "prelude-ls": "^1.2.1" } }, "type-detect": { @@ -6987,9 +6843,9 @@ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==" }, "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==" + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==" }, "unbox-primitive": { "version": "1.0.1", @@ -7002,11 +6858,6 @@ "which-boxed-primitive": "^1.0.2" } }, - "uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" - }, "uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -7025,19 +6876,10 @@ "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==" }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "requires": { "isexe": "^2.0.0" } @@ -7054,11 +6896,6 @@ "is-symbol": "^1.0.3" } }, - "which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" - }, "wide-align": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", @@ -7077,6 +6914,11 @@ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==" }, + "workerpool": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", + "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==" + }, "wrap-ansi": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", @@ -7091,23 +6933,15 @@ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, - "write": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", - "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", - "requires": { - "mkdirp": "^0.5.1" - } - }, "wtfnode": { "version": "0.8.4", "resolved": "https://registry.npmjs.org/wtfnode/-/wtfnode-0.8.4.tgz", "integrity": "sha512-64GEKtMt/MUBuAm+8kHqP74ojjafzu00aT0JKsmkIwYmjRQ/odO0yhbzKLm+Z9v1gMla+8dwITRKzTAlHsB+Og==" }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" + "xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==" }, "y18n": { "version": "3.2.2", @@ -7134,123 +6968,37 @@ } }, "yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - }, - "dependencies": { - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" - } - } + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==" }, "yargs-unparser": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-1.6.0.tgz", - "integrity": "sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", "requires": { - "flat": "^4.1.0", - "lodash": "^4.17.15", - "yargs": "^13.3.0" + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" }, "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "requires": { - "ansi-regex": "^4.1.0" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - } - }, - "y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + "camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==" }, - "yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "requires": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - } + "decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==" } } + }, + "yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==" } } } diff --git a/test/grpc/package.json b/test/grpc/package.json index e3021c9b9..64f25388b 100644 --- a/test/grpc/package.json +++ b/test/grpc/package.json @@ -19,11 +19,11 @@ "grpc-promise": "^1.4.0", "inpath": "^1.0.2", "lodash": "^4.17.21", - "mocha": "^7.2.0", + "mocha": "^9.1.3", "nats": "^1.4.12", "pidof": "^1.0.2", "read": "^1.0.7", - "semistandard": "^14.2.3", + "semistandard": "^16.0.1", "sleep-promise": "^8.0.1", "systeminformation": "^5.6.12", "wtfnode": "^0.8.4" diff --git a/test/grpc/test_cli.js b/test/grpc/test_cli.js index 44d599f27..78d483b7e 100644 --- a/test/grpc/test_cli.js +++ b/test/grpc/test_cli.js @@ -43,7 +43,8 @@ function runMockServer (rules) { '..', '..', 'rpc', - 'proto', + 'mayastor-api', + 'protobuf', 'mayastor.proto' ), packageName: 'mayastor', diff --git a/test/grpc/test_common.js b/test/grpc/test_common.js index f660bbc29..8db748a0f 100644 --- a/test/grpc/test_common.js +++ b/test/grpc/test_common.js @@ -352,7 +352,8 @@ function createGrpcClient (serviceName = 'Mayastor', endpoint = grpcEndpoint) { '..', '..', 'rpc', - 'proto', + 'mayastor-api', + 'protobuf', 'mayastor.proto' ), packageName: 'mayastor', diff --git a/test/grpc/test_csi.js b/test/grpc/test_csi.js index f9b1e6d82..c546bfbf2 100644 --- a/test/grpc/test_csi.js +++ b/test/grpc/test_csi.js @@ -335,7 +335,6 @@ describe('csi', function () { }); }); - csiProtocolTest('NBD', enums.NEXUS_NBD, 10000); csiProtocolTest('iSCSI', enums.NEXUS_ISCSI, 120000); csiProtocolTest('NVMF', enums.NEXUS_NVMF, 120000); }); diff --git a/test/grpc/test_nexus.js b/test/grpc/test_nexus.js index 5c214aee3..a95d22f82 100644 --- a/test/grpc/test_nexus.js +++ b/test/grpc/test_nexus.js @@ -1,6 +1,6 @@ // Unit tests for nexus grpc api. Nexus is basically a hub which does IO // replication to connected replicas. We test nexus operations with all -// supported replica types: nvmf, iscsi, bdev, aio and uring. aio is not used +// supported replica types: nvmf, bdev, aio and uring. aio is not used // in the product but it was part of initial implementation, so we keep it in // case it would be useful in the future. uring was added later and is also // not used in the product but kept for testing. @@ -16,6 +16,8 @@ const grpc = require('grpc'); const common = require('./test_common'); const enums = require('./grpc_enums'); const url = require('url'); +const NEXUSNAME = 'nexus0'; +const NEXUSUUID = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff20'; // just some UUID used for nexus ID const UUID = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff21'; const UUID2 = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff22'; @@ -33,10 +35,6 @@ const externIp = common.getMyIp(); // port at which iscsi replicas are available const iscsiReplicaPort = '3261'; -// NVMEoF frontends don't play nicely with iSCSI backend at the time of writing, -// so temporarily disable these tests. -const doIscsiReplica = false; - // The config just for nvmf target which cannot run in the same process as // the nvmf initiator (SPDK limitation). const configNvmfTarget = ` @@ -223,6 +221,15 @@ describe('nexus', function () { }); }; + const createNexusV2 = (args) => { + return new Promise((resolve, reject) => { + client.createNexusV2(args, (err, data) => { + if (err) return reject(err); + resolve(data); + }); + }); + }; + const createArgs = { uuid: UUID, size: 131072, @@ -267,7 +274,7 @@ describe('nexus', function () { }, next); }, (next) => { - common.createBdevs([`malloc:///Malloc0?size_mb=64&blk_size=4096&uuid=${TGTUUID}`], 'nvmf', '127.0.0.1:10125', next); + common.createBdevs([`malloc:///Malloc0?size_mb=64&blk_size=4096&uuid=${TGTUUID}`], 'nvmf', '127.0.0.1:10125', next); }, (next) => { fs.writeFile(aioFile, '', next); @@ -295,7 +302,7 @@ describe('nexus', function () { }, next); }, (next) => { - common.createBdevs(['malloc:///Malloc0?size_mb=64&blk_size=4096'], 'nvmf', common.grpcEndpoint, next); + common.createBdevs(['malloc:///Malloc0?size_mb=64&blk_size=4096'], 'nvmf', common.grpcEndpoint, next); } ], done @@ -341,7 +348,6 @@ describe('nexus', function () { `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${BASEDEV}` ] }; - if (doIscsiReplica) args.children.push(`iscsi://iscsi://${externIp}:${iscsiReplicaPort}/iqn.2019-05.io.openebs:disk1`); if (doUring()) args.children.push(`uring://${uringFile}?blk_size=4096`); client.createNexus(args, done); @@ -350,7 +356,7 @@ describe('nexus', function () { it('should create a nexus using all types of replicas', (done) => { createNexusWithAllTypes((err, nexus) => { if (err) return done(err); - const expectedChildren = 3 + doIscsiReplica + doUring(); + const expectedChildren = 3 + doUring(); assert.equal(nexus.uuid, UUID); assert.equal(nexus.state, 'NEXUS_ONLINE'); assert.lengthOf(nexus.children, expectedChildren); @@ -364,21 +370,13 @@ describe('nexus', function () { `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${BASEDEV}` ); assert.equal(nexus.children[2].state, 'CHILD_ONLINE'); - if (doIscsiReplica) { - assert.equal( - nexus.children[3].uri, - `iscsi://${externIp}:${iscsiReplicaPort}/iqn.2019-05.io.openebs:disk1` - ); - assert.equal(nexus.children[2].state, 'CHILD_ONLINE'); - } if (doUring()) { - const uringIndex = 3 + doIscsiReplica; assert.equal( - nexus.children[uringIndex].uri, + nexus.children[3].uri, `uring://${uringFile}?blk_size=4096` ); - assert.equal(nexus.children[uringIndex].state, 'CHILD_ONLINE'); + assert.equal(nexus.children[3].state, 'CHILD_ONLINE'); } done(); }); @@ -399,7 +397,7 @@ describe('nexus', function () { assert.lengthOf(res.nexus_list, 1); const nexus = res.nexus_list[0]; - const expectedChildren = 3 + doIscsiReplica + doUring(); + const expectedChildren = 3 + doUring(); assert.equal(nexus.uuid, UUID); assert.equal(nexus.state, 'NEXUS_ONLINE'); @@ -414,21 +412,13 @@ describe('nexus', function () { `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${BASEDEV}` ); assert.equal(nexus.children[2].state, 'CHILD_ONLINE'); - if (doIscsiReplica) { - assert.equal( - nexus.children[3].uri, - `iscsi://${externIp}:${iscsiReplicaPort}/iqn.2019-05.io.openebs:disk1` - ); - assert.equal(nexus.children[2].state, 'CHILD_ONLINE'); - } if (doUring()) { - const uringIndex = 3 + doIscsiReplica; assert.equal( - nexus.children[uringIndex].uri, + nexus.children[3].uri, `uring://${uringFile}?blk_size=4096` ); - assert.equal(nexus.children[uringIndex].state, 'CHILD_ONLINE'); + assert.equal(nexus.children[3].state, 'CHILD_ONLINE'); } done(); }); @@ -446,7 +436,7 @@ describe('nexus', function () { client.listNexus({}, (err, res) => { if (err) return done(err); const nexus = res.nexus_list[0]; - const expectedChildren = 2 + doIscsiReplica + doUring(); + const expectedChildren = 2 + doUring(); assert.lengthOf(nexus.children, expectedChildren); assert(!nexus.children.find((ch) => ch.uri.match(/^nvmf:/))); done(); @@ -470,7 +460,7 @@ describe('nexus', function () { client.listNexus({}, (err, res) => { if (err) return done(err); const nexus = res.nexus_list[0]; - const expectedChildren = 3 + doIscsiReplica + doUring(); + const expectedChildren = 3 + doUring(); assert.lengthOf(nexus.children, expectedChildren); assert(nexus.children.find((ch) => ch.uri.match(/^nvmf:/))); done(); @@ -808,6 +798,60 @@ describe('nexus', function () { }); }); + it('should create v2 and destroy a nexus with UUID as name', async () => { + const args = { + name: NEXUSUUID, + uuid: UUID, + size: 32768, + minCntlId: 1, + maxCntlId: 0xffef, + resvKey: 0x12345678, + children: [ + 'malloc:///malloc1?size_mb=64' + ] + }; + await createNexusV2(args); + await destroyNexus({ uuid: NEXUSUUID }); + }); + + it('should fail to create a nexus with invalid NVMe controller ID range', (done) => { + const args = { + name: NEXUSNAME, + uuid: UUID, + size: 131072, + minCntlId: 0xfff0, + maxCntlId: 1, + resvKey: 0x12345678, + children: [ + 'malloc:///malloc1?size_mb=64' + ] + }; + client.createNexusV2(args, (err) => { + if (!err) return done(new Error('Expected error')); + assert.equal(err.code, grpc.status.INTERNAL); + done(); + }); + }); + + it('should fail to create a nexus with invalid NVMe reservation key', (done) => { + const args = { + name: NEXUSNAME, + uuid: UUID, + size: 131072, + minCntlId: 1, + maxCntlId: 0xffef, + resvKey: 0, + children: [ + 'malloc:///malloc1?size_mb=64' + ] + }; + client.createNexusV2(args, (err) => { + if (!err) return done(new Error('Expected error')); + assert.equal(err.code, grpc.status.INTERNAL); + done(); + }); + }); + it('should have zero nexus devices left', (done) => { client.listNexus({}, (err, res) => { if (err) return done(err); @@ -904,7 +948,7 @@ describe('nexus', function () { common.jsonrpcCommand('/tmp/target.sock', 'nvmf_subsystem_remove_ns', args, done); }); - it.skip('should still have bdev of removed child after remove event', (done) => { + it('dummy call to get list of bdevs to allow remove event to be processed', (done) => { common.jsonrpcCommand(null, 'bdev_get_bdevs', (err, out) => { if (err) return done(err); const bdevs = JSON.parse(out); @@ -915,7 +959,7 @@ describe('nexus', function () { return done(); } } - done(new Error('bdev not found')); + done(); }); }); diff --git a/test/grpc/test_rebuild.js b/test/grpc/test_rebuild.js index 75a8aac4d..a04b621a3 100644 --- a/test/grpc/test_rebuild.js +++ b/test/grpc/test_rebuild.js @@ -50,7 +50,7 @@ const childOfflineArgs = { }; function createGrpcClient () { - const PROTO_PATH = path.join(__dirname, '/../../rpc/proto/mayastor.proto'); + const PROTO_PATH = path.join(__dirname, '/../../rpc/mayastor-api/protobuf/mayastor.proto'); // Load mayastor proto file with mayastor service const packageDefinition = protoLoader.loadSync(PROTO_PATH, { diff --git a/test/grpc/test_replica.js b/test/grpc/test_replica.js index b3b50ba34..27c512f8b 100644 --- a/test/grpc/test_replica.js +++ b/test/grpc/test_replica.js @@ -360,10 +360,6 @@ describe('replica', function () { assert.lengthOf(res, 1); res = res[0]; assert.equal(res.pool, POOL); - assert.equal(parseInt(res.stats.num_read_ops), 0); - assert.equal(parseInt(res.stats.num_write_ops), 0); - assert.equal(parseInt(res.stats.bytes_read), 0); - assert.equal(parseInt(res.stats.bytes_written), 0); done(); }); }); diff --git a/test/python/README.md b/test/python/README.md index f872097c1..f287a7fb7 100644 --- a/test/python/README.md +++ b/test/python/README.md @@ -1,6 +1,83 @@ -# setup virtual env +# Requirements -Not all packags are availble on nix, so one extra step is needed. +The key thing of the tests is that we want to run an "normal" workload. That is +to say, we want to run a Linux kernel target initiator against it. For this we +use VMs. Using a VM is not required but it may cause dangling devices in your +kernel that might be a hard to get rid of. -virtualenv env -pip install -r requirements.txt +Test may define their own `docker-compose.yml` to ensure it has the right +environment. Its possible however, that you are not able to run all the tests +on your local machine. You can change the `docker-compose.yml` to match your +needs. For example, to lower the amount of cores. + +# Configuring the test environment + +For configuration of a run or different runs, we make use of `pytest-testconfig`. This +file contains settings available in each run. This file can be a python script +or a plain configuration file. + +Currently, it contains a single variable. It is advised to tailor it to your +needs and use it as an argument. i.e: + +``` +pytest $test --tc-file=TESTCONFIG +``` + +or use an environment variable, for example: + +``` +export PY_TEST_CONFIG_FILE=/path/to/my/config.ini +``` + +# Converting a .feature + +A feature can automatically be converted to python code. This is not required +but avoids mismatch between the two. An advantage of using these features is +that others do not have to figure out what the test is supposed to be testing. + +``` +pytest-bdd generate xxx.feature > test_xxx.py + +``` + +When new scenarios are added the files can be updated with: + +``` +pytest --generate-missing --feature pool_create.feature test_pool.py + +``` + +# Setup virtual env + +Not all packages are available on nix, so one extra step is needed if you run +nix. + +```shell +python -m grpc_tools.protoc --proto_path=`realpath rpc/mayastor-api/protobuf` --python_out=test/python --grpc_python_out=test/python mayastor.proto +python -m grpc_tools.protoc --proto_path=`realpath csi/proto` --python_out=test/python --grpc_python_out=test/python csi.proto +virtualenv --no-setuptools test/python/venv +source test/python/venv/bin/activate +pip install -r test/python/requirements.txt +``` + +The virtual environment must be activated for every shell. Consider using something as `direnv` to automate this. +The `.proto` files, generated by the tool should never be committed and are already part of `.gitignore`. + +# Running the tests + +Running an individual test: +`python -m pytest --tc-file=test_config.ini --docker-compose=tests/replica tests/replica/test_bdd_pool.py` + +Running all tests within a directory: +`python -m pytest --tc-file=test_config.ini --docker-compose=tests/replica tests/replica` + +# Running tests with existing containers + +If you need to debug or want the environment not cleaned up you can start the containers +manually. For example: + +``` +docker-compose up +# different termimal +python -m pytest --tc-file=$path_to_my_config $test_name --use-running-containers --docker-compose-no-build -s +``` diff --git a/test/python/common/command.py b/test/python/common/command.py index 20c02c93c..6c17e7d22 100644 --- a/test/python/common/command.py +++ b/test/python/common/command.py @@ -2,6 +2,7 @@ from collections import namedtuple import asyncssh import subprocess + CommandReturn = namedtuple("CommandReturn", "returncode stdout stderr") @@ -12,9 +13,8 @@ def run_cmd(cmd, check=True): async def run_cmd_async(cmd): """Runs a command on the current machine.""" proc = await asyncio.create_subprocess_shell( - cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE) + cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) stdout, stderr = await proc.communicate() output_message = f"\n[{proc.pid}] Command:\n{cmd}" @@ -26,18 +26,13 @@ async def run_cmd_async(cmd): # If a non-zero return code was thrown, raise an exception if proc.returncode != 0: - output_message += \ - f"\nReturned error code: {proc.returncode}" + output_message += f"\nReturned error code: {proc.returncode}" if stderr.decode() != "": - output_message += \ - f"\nstderr:\n{stderr.decode()}" + output_message += f"\nstderr:\n{stderr.decode()}" raise ChildProcessError(output_message) - return CommandReturn( - proc.returncode, - stdout.decode(), - stderr.decode()) + return CommandReturn(proc.returncode, stdout.decode(), stderr.decode()) async def run_cmd_async_at(host, cmd): @@ -54,15 +49,10 @@ async def run_cmd_async_at(host, cmd): if result.exit_status != 0: - output_message += \ - f"\nReturned error code: {result.exit_status}" + output_message += f"\nReturned error code: {result.exit_status}" if result.stderr != "": - output_message += \ - f"\nstderr:\n{result.stderr}" + output_message += f"\nstderr:\n{result.stderr}" raise ChildProcessError(output_message) - return CommandReturn( - result.exit_status, - result.stdout, - result.stderr) + return CommandReturn(result.exit_status, result.stdout, result.stderr) diff --git a/test/python/common/csi_hdl.py b/test/python/common/csi_hdl.py new file mode 100644 index 000000000..b2bac8a7e --- /dev/null +++ b/test/python/common/csi_hdl.py @@ -0,0 +1,17 @@ +import grpc +import csi_pb2 as pb +import csi_pb2_grpc as rpc + + +class CsiHandle(object): + def __init__(self, csi_socket): + self.channel = grpc.insecure_channel(csi_socket) + # self.controller = rpc.ControllerStub(self.channel) + self.identity = rpc.IdentityStub(self.channel) + self.node = rpc.NodeStub(self.channel) + + def __del__(self): + del self.channel + + def close(self): + self.__del__() diff --git a/test/python/common/fio.py b/test/python/common/fio.py index a8656479f..89cf4caf9 100644 --- a/test/python/common/fio.py +++ b/test/python/common/fio.py @@ -1,11 +1,8 @@ import shutil -from common.command import run_cmd_async -import asyncio class Fio(object): - - def __init__(self, name, rw, device, runtime=15): + def __init__(self, name, rw, device, runtime=15, optstr=""): self.name = name self.rw = rw self.device = device @@ -13,20 +10,18 @@ def __init__(self, name, rw, device, runtime=15): self.output = {} self.success = {} self.runtime = runtime + self.optstr = optstr - def build(self) -> str: - if isinstance(self.device, str): - devs = [self.device] - else: - devs = self.device + def build(self): + devs = [self.device] if isinstance(self.device, str) else self.device - command = ("sudo fio --ioengine=linuxaio --direct=1 --bs=4k " - "--time_based=1 --rw={} " - "--group_reporting=1 --norandommap=1 --iodepth=64 " - "--runtime={} --name={} --filename={}").format(self.rw, - self.runtime, - self.name, - " --filename=".join(map(str, - devs))) + command = ( + "sudo fio --ioengine=linuxaio --direct=1 --bs=4k " + "--time_based=1 {} --rw={} " + "--group_reporting=1 --norandommap=1 --iodepth=64 " + "--runtime={} --name={} --filename={}" + ).format( + self.optstr, self.rw, self.runtime, self.name, ":".join(map(str, devs)) + ) return command diff --git a/test/python/common/fio_spdk.py b/test/python/common/fio_spdk.py index 12ddec08a..fa04cec18 100644 --- a/test/python/common/fio_spdk.py +++ b/test/python/common/fio_spdk.py @@ -4,7 +4,6 @@ class FioSpdk(object): - def __init__(self, name, rw, uris, runtime=15): self.name = name self.rw = rw @@ -14,22 +13,30 @@ def __init__(self, name, rw, uris, runtime=15): self.filenames = [] for uri in uris: u = urlparse(uri) - self.filenames.append(("\'trtype=tcp adrfam=IPv4 traddr={} " - "trsvcid={} subnqn={} ns=1\'").format( - u.hostname, u.port, u.path[1:].replace(":", "\\:"))) + self.filenames.append( + ( + "'trtype=tcp adrfam=IPv4 traddr={} " "trsvcid={} subnqn={} ns=1'" + ).format(u.hostname, u.port, u.path[1:].replace(":", "\\:")) + ) self.cmd = shutil.which("fio") self.runtime = runtime def build(self) -> str: - spdk_path = os.environ.get('SPDK_PATH') + spdk_path = os.environ.get("SPDK_PATH") if spdk_path is None: - spdk_path = os.getcwd() + '/../../spdk-sys/spdk/build' - command = ("sudo LD_PRELOAD={}/fio/spdk_nvme fio --ioengine=spdk " - "--direct=1 --bs=4k --time_based=1 --runtime={} " - "--thread=1 --rw={} --group_reporting=1 --norandommap=1 " - "--iodepth=64 --name={} --filename={}").format( - spdk_path, self.runtime, self.rw, self.name, - " --filename=".join(map(str, self.filenames))) + spdk_path = os.getcwd() + "/../../spdk-sys/spdk/build" + command = ( + "sudo LD_PRELOAD={}/fio/spdk_nvme fio --ioengine=spdk " + "--direct=1 --bs=4k --time_based=1 --runtime={} " + "--thread=1 --rw={} --group_reporting=1 --norandommap=1 " + "--iodepth=64 --name={} --filename={}" + ).format( + spdk_path, + self.runtime, + self.rw, + self.name, + " --filename=".join(map(str, self.filenames)), + ) return command diff --git a/test/python/common/hdl.py b/test/python/common/hdl.py index 47fcf94fb..ed193d345 100644 --- a/test/python/common/hdl.py +++ b/test/python/common/hdl.py @@ -2,6 +2,8 @@ import mayastor_pb2 as pb import grpc import mayastor_pb2_grpc as rpc +from pytest_testconfig import config +from functools import partial pytest_plugins = ["docker_compose"] @@ -12,18 +14,40 @@ class MayastorHandle(object): def __init__(self, ip_v4): """Init.""" self.ip_v4 = ip_v4 + self.timeout = float(config["grpc"]["client_timeout"]) self.channel = grpc.insecure_channel(("%s:10124") % self.ip_v4) self.bdev = rpc.BdevRpcStub(self.channel) self.ms = rpc.MayastorStub(self.channel) - self.bdev_list() - self.pool_list() + self._readiness_check() + + def set_timeout(self, timeout): + self.timeout = timeout + + def install_stub(self, name): + stub = getattr(rpc, name)(self.channel) + + # Install default timeout to all functions, ignore system attributes. + for f in dir(stub): + if not f.startswith("__"): + h = getattr(stub, f) + setattr(stub, f, partial(h, timeout=self.timeout)) + return stub + + def _readiness_check(self): + try: + self.bdev_list() + self.pool_list() + except grpc._channel._InactiveRpcError: + # This is to get around a gRPC bug. + # Retry once before failing + self.bdev_list() + self.pool_list() def reconnect(self): self.channel = grpc.insecure_channel(("%s:10124") % self.ip_v4) - self.bdev = rpc.BdevRpcStub(self.channel) - self.ms = rpc.MayastorStub(self.channel) - self.bdev_list() - self.pool_list() + self.bdev = self.install_stub("BdevRpcStub") + self.ms = self.install_stub("MayastorStub") + self._readiness_check() def __del__(self): del self.channel @@ -31,6 +55,9 @@ def __del__(self): def close(self): self.__del__() + def ip_address(self): + return self.ip_v4 + def as_target(self) -> str: """Returns this node as scheme which is used to designate this node to be used as the node where the nexus shall be created on.""" @@ -53,20 +80,13 @@ def bdev_create(self, uri): return self.bdev.Create(pb.BdevUri(uri=uri)) def bdev_share(self, name): - return self.bdev.Share( - pb.BdevShareRequest( - name=str(name), - proto="nvmf")).uri + return self.bdev.Share(pb.BdevShareRequest(name=str(name), proto="nvmf")).uri def bdev_unshare(self, name): - return self.bdev.Unshare( - pb.CreateReply( - name=str(name))) + return self.bdev.Unshare(pb.CreateReply(name=str(name))) def bdev_destroy(self, uri): - return self.bdev.Destroy( - pb.BdevUri( - uri=str(uri))) + return self.bdev.Destroy(pb.BdevUri(uri=str(uri))) def pool_create(self, name, bdev): """Create a pool with given name on this node using the bdev as the @@ -88,42 +108,88 @@ def replica_create(self, pool, uuid, size, share=1): ) ) + def replica_create_v2(self, pool, name, uuid, size, share=1): + """Create a replica on the pool with the specified UUID and size.""" + return self.ms.CreateReplicaV2( + pb.CreateReplicaRequestV2( + pool=pool, name=name, uuid=uuid, size=size, thin=False, share=share + ) + ) + def replica_destroy(self, uuid): """Destroy the replica by the UUID, the pool is resolved within mayastor.""" return self.ms.DestroyReplica(pb.DestroyReplicaRequest(uuid=uuid)) def replica_list(self): + """List existing replicas""" return self.ms.ListReplicas(pb.Null()) + def replica_list_v2(self): + """List existing replicas along with their UUIDs""" + return self.ms.ListReplicasV2(pb.Null()) + def nexus_create(self, uuid, size, children): - """Create a nexus with the given uuid and size. The children are - should be an array of nvmf URIs.""" + """Create a nexus with the given uuid and size. The children should + be an array of nvmf URIs.""" return self.ms.CreateNexus( pb.CreateNexusRequest(uuid=str(uuid), size=size, children=children) ) + def nexus_create_v2( + self, name, uuid, size, min_cntlid, max_cntlid, resv_key, preempt_key, children + ): + """Create a nexus with the given name, uuid, size, NVMe controller ID range, + NVMe reservation and preempt keys for children. The children should be an array + of nvmf URIs.""" + return self.ms.CreateNexusV2( + pb.CreateNexusV2Request( + name=name, + uuid=str(uuid), + size=size, + minCntlId=min_cntlid, + maxCntlId=max_cntlid, + resvKey=resv_key, + preemptKey=preempt_key, + children=children, + ) + ) + def nexus_destroy(self, uuid): """Destroy the nexus.""" return self.ms.DestroyNexus(pb.DestroyNexusRequest(uuid=uuid)) - def nexus_publish(self, uuid): + def nexus_publish(self, uuid, share=1): """Publish the nexus. this is the same as bdev_share() but is not used by the control plane.""" return self.ms.PublishNexus( - pb.PublishNexusRequest( - uuid=str(uuid), key="", share=1)).device_uri + pb.PublishNexusRequest(uuid=str(uuid), key="", share=share) + ).device_uri def nexus_unpublish(self, uuid): """Unpublish the nexus.""" return self.ms.UnpublishNexus(pb.UnpublishNexusRequest(uuid=str(uuid))) def nexus_list(self): - """List all the the nexus devices.""" + """List all the nexus devices.""" return self.ms.ListNexus(pb.Null()).nexus_list + def nexus_list_v2(self): + """List all the nexus devices, with separate name and uuid.""" + return self.ms.ListNexusV2(pb.Null()).nexus_list + + def nexus_add_replica(self, uuid, uri, norebuild): + """Add a new replica to the nexus""" + return self.ms.AddChildNexus( + pb.AddChildNexusRequest(uuid=uuid, uri=uri, norebuild=norebuild) + ) + + def nexus_remove_replica(self, uuid, uri): + """Add a new replica to the nexus""" + return self.ms.RemoveChildNexus(pb.RemoveChildNexusRequest(uuid=uuid, uri=uri)) + def bdev_list(self): - """"List all bdevs found within the system.""" + """List all bdevs found within the system.""" return self.bdev.List(pb.Null(), wait_for_ready=True).bdevs def pool_list(self): @@ -138,3 +204,11 @@ def pools_as_uris(self): uri = "pool://{0}/{1}".format(self.ip_v4, p.name) uris.append(uri) return uris + + def stat_nvme_controllers(self): + """Statistics for all nvmx controllers""" + return self.ms.StatNvmeControllers(pb.Null()).controllers + + def mayastor_info(self): + """Get information about Mayastor instance""" + return self.ms.GetMayastorInfo(pb.Null()) diff --git a/test/python/common/mayastor.py b/test/python/common/mayastor.py new file mode 100644 index 000000000..343260875 --- /dev/null +++ b/test/python/common/mayastor.py @@ -0,0 +1,62 @@ +"Default fixtures that are considered to be reusable." +import pytest +from common.hdl import MayastorHandle +from common.command import run_cmd + +pytest_plugins = ["docker_compose"] + + +def check_size(prev, current, delta): + "Validate that replica creation consumes space on the pool." + before = prev.pools[0].used + after = current.pools[0].used + assert delta == (before - after) >> 20 + + +@pytest.fixture(scope="function") +def containers(docker_project, function_scoped_container_getter): + "Fixture to get handles to mayastor containers." + containers = {} + for name in docker_project.service_names: + containers[name] = function_scoped_container_getter.get(name) + yield containers + + +@pytest.fixture(scope="function") +def mayastors(docker_project, containers): + "Fixture to get a reference to mayastor gRPC handles" + handles = {} + for name, container in containers.items(): + handles[name] = MayastorHandle( + container.get("NetworkSettings.Networks.mayastor_net.IPAddress") + ) + yield handles + + +@pytest.fixture(scope="function") +def create_temp_files(containers): + "Create temp files for each run so we start out clean." + for name in containers.keys(): + run_cmd(f"rm -f /tmp/{name}.img", True) + for name in containers.keys(): + run_cmd(f"truncate -s 1G /tmp/{name}.img", True) + + +@pytest.fixture(scope="module") +def container_mod(docker_project, module_scoped_container_getter): + "Fixture to get handles to mayastor containers." + containers = {} + for name in docker_project.service_names: + containers[name] = module_scoped_container_getter.get(name) + yield containers + + +@pytest.fixture(scope="module") +def mayastor_mod(docker_project, container_mod): + "Fixture to get a reference to mayastor gRPC handles." + handles = {} + for name, container in container_mod.items(): + handles[name] = MayastorHandle( + container.get("NetworkSettings.Networks.mayastor_net.IPAddress") + ) + yield handles diff --git a/test/python/common/msclient.py b/test/python/common/msclient.py new file mode 100644 index 000000000..5638315dc --- /dev/null +++ b/test/python/common/msclient.py @@ -0,0 +1,73 @@ +import os +import subprocess +import json +from common.util import mayastor_target_dir + + +class MayastorClient: + """Abstraction around Mayastor command line client, which allows + flexible client configuration and supports parsing of command output + depending on requested output mode. + In order to invoke mayastor CLI just perform a call operation against + a MayastorClient instance, passing all the commands via function arguments: + mscli = get_msclient() + mscli("controller", "list") + """ + + def __init__(self, path, cfg): + assert path, "CLI command path must be provided" + self.path = path + self.verbose = cfg.get("verbose", False) + self.url = cfg.get("url", None) + self.quiet = cfg.get("quiet", False) + self.output = cfg.get("output", "default") + + def __call__(self, *args): + cmd = [self.path, "-o", self.output] + + if self.quiet: + cmd.append("-q") + if self.verbose: + cmd.append("-v") + if self.url: + cmd += ["-b", self.url] + + cmd += list(args) + + output = subprocess.check_output(cmd, shell=False) + output = output.decode("utf-8") + + if self.output == "json": + output = json.loads(output) + + return output + + def with_url(self, url): + """Set URL of the Mayastor.""" + self.url = url + return self + + def with_json_output(self): + """Set JSON output mode.""" + self.output = "json" + return self + + def with_default_output(self): + """Set default output mode (non-JSON).""" + self.output = "default" + return self + + def with_verbose(self, verbose): + """Request verbose output.""" + self.verbose = verbose + return self + + +def get_msclient(cfg=None): + """Instantiate mayastor CLI object based on user config.""" + # Check that CLI binary exists. + p = "%s/mayastor-client" % mayastor_target_dir() + if not os.path.isfile(p): + raise FileNotFoundError("No Mayastor CLI command available") + + return MayastorClient(p, {} if cfg is None else cfg) diff --git a/test/python/common/nvme.py b/test/python/common/nvme.py index b7c51f739..96c0f35bb 100644 --- a/test/python/common/nvme.py +++ b/test/python/common/nvme.py @@ -5,6 +5,11 @@ from common.command import run_cmd_async_at +async def nvme_remote_connect_all(remote, host, port): + command = f"sudo nvme connect-all -t tcp -s {port} -a {host}" + await run_cmd_async_at(remote, command) + + async def nvme_remote_connect(remote, uri): """Connect to the remote nvmf target on this host.""" u = urlparse(uri) @@ -12,8 +17,7 @@ async def nvme_remote_connect(remote, uri): host = u.hostname nqn = u.path[1:] - command = "sudo nvme connect -t tcp -s {0} -a {1} -n {2}".format( - port, host, nqn) + command = "sudo nvme connect -t tcp -s {0} -a {1} -n {2}".format(port, host, nqn) await run_cmd_async_at(remote, command) time.sleep(1) @@ -22,15 +26,11 @@ async def nvme_remote_connect(remote, uri): discover = await run_cmd_async_at(remote, command) discover = json.loads(discover.stdout) - dev = list( - filter( - lambda d: nqn in d.get("SubsystemNQN"), - discover.get("Devices"))) + dev = list(filter(lambda d: nqn in d.get("SubsystemNQN"), discover.get("Devices"))) # we should only have one connection assert len(dev) == 1 - dev_path = dev[0].get('Controllers')[0].get( - 'Namespaces')[0].get('NameSpace') + dev_path = dev[0].get("Controllers")[0].get("Namespaces")[0].get("NameSpace") return f"/dev/{dev_path}" @@ -62,30 +62,48 @@ def nvme_connect(uri): host = u.hostname nqn = u.path[1:] - command = "sudo nvme connect -t tcp -s {0} -a {1} -n {2}".format( - port, host, nqn) + command = "sudo nvme connect -t tcp -s {0} -a {1} -n {2}".format(port, host, nqn) subprocess.run(command, check=True, shell=True, capture_output=False) time.sleep(1) command = "sudo nvme list -v -o json" discover = json.loads( subprocess.run( - command, - shell=True, - check=True, - text=True, - capture_output=True).stdout) + command, shell=True, check=True, text=True, capture_output=True + ).stdout + ) - dev = list( - filter( - lambda d: nqn in d.get("SubsystemNQN"), - discover.get("Devices"))) + dev = list(filter(lambda d: nqn in d.get("SubsystemNQN"), discover.get("Devices"))) # we should only have one connection assert len(dev) == 1 - device = "/dev/{}".format(dev[0].get('Namespaces')[0].get('NameSpace')) + device = "/dev/{}".format(dev[0]["Namespaces"][0].get("NameSpace")) return device +def nvme_id_ctrl(device): + """Identify controller.""" + command = "sudo nvme id-ctrl {0} -o json".format(device) + id_ctrl = json.loads( + subprocess.run( + command, shell=True, check=True, text=True, capture_output=True + ).stdout + ) + + return id_ctrl + + +def nvme_resv_report(device): + """Reservation report.""" + command = "sudo nvme resv-report {0} -c 1 -o json".format(device) + resv_report = json.loads( + subprocess.run( + command, shell=True, check=True, text=True, capture_output=True + ).stdout + ) + + return resv_report + + def nvme_discover(uri): """Discover target.""" u = urlparse(uri) @@ -94,11 +112,8 @@ def nvme_discover(uri): command = "sudo nvme discover -t tcp -s {0} -a {1}".format(port, host) output = subprocess.run( - command, - check=True, - shell=True, - capture_output=True, - encoding="utf-8") + command, check=True, shell=True, capture_output=True, encoding="utf-8" + ) if not u.path[1:] in str(output.stdout): raise ValueError("uri {} is not discovered".format(u.path[1:])) @@ -110,3 +125,37 @@ def nvme_disconnect(uri): command = "sudo nvme disconnect -n {0}".format(nqn) subprocess.run(command, check=True, shell=True, capture_output=True) + + +def nvme_disconnect_all(): + """Disconnect from all connected nvme subsystems""" + command = "sudo nvme disconnect-all" + subprocess.run(command, check=True, shell=True, capture_output=True) + + +def nvme_list_subsystems(device): + """Retrieve information for NVMe subsystems""" + command = "sudo nvme list-subsys {} -o json".format(device) + return json.loads( + subprocess.run( + command, check=True, shell=True, capture_output=True, encoding="utf-8" + ).stdout + ) + + +NS_PROPS = ["nguid", "eui64"] + + +def identify_namespace(device): + """Get properties of a namespace on this host""" + command = "sudo nvme id-ns {}".format(device) + output = subprocess.run( + command, check=True, shell=True, capture_output=True, encoding="utf-8" + ) + props = output.stdout.strip().split("\n")[1:] + ns = {} + for p in props: + v = [v.strip() for v in p.split(":") if p.count(":") == 1] + if len(v) == 2 and v[0] in NS_PROPS: + ns[v[0]] = v[1] + return ns diff --git a/test/python/common/util.py b/test/python/common/util.py new file mode 100644 index 000000000..5e32bf1ec --- /dev/null +++ b/test/python/common/util.py @@ -0,0 +1,13 @@ +import os + + +def mayastor_target_dir(): + """Get Mayastor target directory (absolute path) based on evironment variable SRCDIR. + Raise exception if no Mayastor root is configured. + """ + if "SRCDIR" not in os.environ: + raise Exception("SRCDIR environment variable not defined") + + # For now assume only Debug builds, but we might want to consider using + # a variable to access Release binaries too. + return "%s/target/debug" % os.environ["SRCDIR"] diff --git a/test/python/conftest.py b/test/python/conftest.py deleted file mode 100644 index e0dbffc03..000000000 --- a/test/python/conftest.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Default fixtures that are considered to be reusable. These are all function scoped.""" - -import logging - -import pytest -from common.hdl import MayastorHandle -import mayastor_pb2 as pb -import os - -pytest_plugins = ["docker_compose"] - - -@pytest.fixture -def target_vm(): - try: - return os.environ.get("TARGET_VM") - except Exception as e: - print("the environment variable TARGET_VM must be set to a valid host") - raise(e) - - -@pytest.fixture(scope="function") -def create_temp_files(containers): - """Create temp files for each run so we start out clean.""" - for name in containers: - run_cmd(f"rm -rf /tmp/{name}.img", True) - for name in containers: - run_cmd(f"truncate -s 1G /tmp/{name}.img", True) - - -def check_size(prev, current, delta): - """Validate that replica creation consumes space on the pool.""" - before = prev.pools[0].used - after = current.pools[0].used - assert delta == (before - after) >> 20 - - -@pytest.fixture(scope="function") -def mayastors(docker_project, function_scoped_container_getter): - """Fixture to get a reference to mayastor gRPC handles.""" - project = docker_project - handles = {} - for name in project.service_names: - # because we use static networks .get_service() does not work - services = function_scoped_container_getter.get(name) - ip_v4 = services.get( - "NetworkSettings.Networks.python_mayastor_net.IPAddress") - handles[name] = MayastorHandle(ip_v4) - yield handles - - -@pytest.fixture(scope="function") -def containers(docker_project, function_scoped_container_getter): - """Fixture to get handles to mayastor as well as the containers.""" - project = docker_project - containers = {} - for name in project.service_names: - containers[name] = function_scoped_container_getter.get(name) - yield containers diff --git a/test/python/k8s/fio.yaml b/test/python/k8s/fio.yaml new file mode 100644 index 000000000..d246488ef --- /dev/null +++ b/test/python/k8s/fio.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fiomap +data: + fio.conf: | + [global] + direct=1 + rw=randrw + ioengine=libaio + bs=4k + iodepth=16 + verify=crc32 + verify_fatal=1 + verify_async=2 + time_based=1 + runtime=30 + size=500mb + + [volume-1] + filename=/volume-1/vol.test + [volume-2] + filename=/volume-2/vol.test + [volume-3] + filename=/volume-3/vol.test + [volume-4] + filename=/volume-4/vol.test +--- +kind: Pod +apiVersion: v1 +metadata: + name: fio +spec: + volumes: + - name: ms-1-claim + persistentVolumeClaim: + claimName: ms-1 + - name: ms-2-claim + persistentVolumeClaim: + claimName: ms-2 + - name: ms-3-claim + persistentVolumeClaim: + claimName: ms-3 + - name: ms-4-claim + persistentVolumeClaim: + claimName: ms-4 + - name: ms-5-claim + persistentVolumeClaim: + claimName: ms-5 + - name: ms-6-claim + persistentVolumeClaim: + claimName: ms-6 + - name: config-volume + configMap: + name: fiomap + containers: + - name: fio + image: mayadata/fio + args: + - fio + - /config/fio.conf + volumeMounts: + - mountPath: /volume-1 + name: ms-1-claim + - mountPath: /volume-2 + name: ms-2-claim + - mountPath: /volume-3 + name: ms-3-claim + - mountPath: /volume-4 + name: ms-4-claim + - mountPath: /volume-5 + name: ms-5-claim + - mountPath: /volume-6 + name: ms-6-claim + - name: config-volume + mountPath: /config/fio.conf + subPath: fio.conf + imagePullPolicy: IfNotPresent + restartPolicy: Never diff --git a/test/python/k8s/test_pvc.py b/test/python/k8s/test_pvc.py new file mode 100644 index 000000000..445234183 --- /dev/null +++ b/test/python/k8s/test_pvc.py @@ -0,0 +1,252 @@ +import pytest +from kubernetes import client, config, dynamic, watch +from kubernetes.client import api_client +import asyncio +from kubernetes import utils + + +def get_api(api_name): + client = dynamic.DynamicClient( + api_client.ApiClient(configuration=config.load_kube_config()) + ) + + _apis = { + "msp": ( + lambda: client.resources.get( + api_version="openebs.io/v1alpha1", kind="MayastorPool" + ) + ), + "msv": ( + lambda: client.resources.get( + api_version="openebs.io/v1alpha1", kind="MayastorVolume" + ) + ), + "pvc": ( + lambda: client.resources.get(api_version="v1", kind="PersistentVolumeClaim") + ), + "pod": (lambda: client.resources.get(api_version="v1", kind="Pod")), + } + + return _apis[api_name]() + + +def delete_msp(node): + get_api("msp").delete(name=f"pool-{node}", namespace="mayastor") + + +async def create_msp(node, disk): + api = get_api("msp") + name = f"pool-{node}" + pool = { + "apiVersion": "openebs.io/v1alpha1", + "kind": "MayastorPool", + "metadata": { + "name": name, + "namespace": "mayastor", + }, + "spec": { + "node": node, + "disks": [disk], + }, + } + try: + current = api.get(name=name, namespace="mayastor") + assert current.status.state == "online" + assert current.spec.disks == [disk] + print(f"Pool {name} already exists") + except: + api.create(body=pool, namespace="mayastor") + await wait_for_it(api, name, "online", "mayastor") + + +# Create a PVC and wait for it to be Bound +async def create_pvc(name, size=1, sc="mayastor-nvmf-2"): + size = f"{size}Gi" + + api = get_api("pvc") + body = { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": {"name": name}, + "spec": { + "accessModes": ["ReadWriteOnce"], + "volumeMode": "Filesystem", + "resources": {"requests": {"storage": size}}, + "storageClassName": sc, + }, + } + + try: + current = api.get(name=name, namespace="default") + assert current.status["phase"] == "Bound" + print("PVC already exists") + except: + api.create(body=body, namespace="default") + await wait_for_it(api, name, "Bound") + + +def delete_pvc(name): + get_api("pvc").delete(name=name, namespace="default") + + +# wait for a resource to reach a certain state +async def wait_for_it(api, name, phase, namespace="default", iter=1000): + while iter > 0: + try: + current = api.get(name=name, namespace=namespace) + if current["status"]["phase"] == str(phase) or current["status"][ + "state" + ] == str(phase): + return + except: + pass + iter -= 1 + if iter == 0: + raise Exception(f"timed out {api} while creating {name}") + await asyncio.sleep(0.1) + + +# wait until a resource no longer exists +async def wait_until_gone(api, name, iter=1000): + while iter > 0: + try: + api.get(name=name, namespace="default") + except: + return + iter -= 1 + if iter == 0: + raise Exception(f"timed out waiting for {name} to disappear") + await asyncio.sleep(0.1) + + +# watch events to a certain pod +async def watch_for(bail_on, pod): + w = watch.Watch() + for event in w.stream(client.CoreV1Api().list_pod_for_all_namespaces): + if event["type"] == bail_on and event["object"].metadata.name == pod: + print( + "Event: %s %s %s" + % (event["type"], event["object"].kind, event["object"].metadata.name) + ) + assert event["object"].status.phase, "Succeeded" + return + await asyncio.sleep(0) + + +async def fio_delete(name="fio"): + api = get_api("pod") + api.delete(name=name, namespace="default") + await wait_until_gone(api, "fio") + + +# This uses a YAML file and a configMap to start Fio +async def fio_from_yaml(): + config.load_kube_config() + k8s_client = client.ApiClient() + + # remove the old pod in case its still there but + # completed + try: + await fio_delete() + except: + pass + + try: + utils.create_from_yaml(k8s_client, "k8s/fio.yaml") + except: + pass + await wait_for_it(get_api("pod"), "fio", "Running") + print("Fio is running.....") + await wait_for_it(get_api("pod"), "fio", "Succeeded") + print("Fio completed.....") + await fio_delete() + + +# Its rather tedious to create a "large" spec by constructing the +# json object on the fly -- without using some form of templating. +# As an alternative approach we use the V1XXX models generated by +# openAPI. + + +async def create_fio_manifest(pvcs, size=1, runtime=30): + + size = f"{ ((size-1)*1000) + 800 }mb" + config.load_kube_config() + volume_mounts = [] + volumes = [] + fio_targets = [] + + for i, name in enumerate(pvcs): + volume_mounts.append( + client.V1VolumeMount(name=f"volume-{i}", mount_path=f"/volume-{i}") + ) + volumes.append( + client.V1Volume( + name=f"volume-{i}", + persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( + claim_name=name + ), + ) + ) + fio_targets.append(f"--name={name}") + fio_targets.append(f"--filename=/volume-{i}/{name}.test") + + cmd = "fio --direct=1 --rw=randrw --ioengine=libaio" + cmd += " --bs=4k --iodepth=16 --verify=crc32 --verify_fatal=1 --verify_async=2" + cmd += f" --time_based=1 --runtime={runtime} --size={size}" + + command = cmd.split() + fio_targets + + container = client.V1Container( + name="fio-generated", + image="mayadata/fio", + image_pull_policy="IfNotPresent", + command=command, + volume_mounts=volume_mounts, + ) + + template = client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta(name="fio"), + spec=client.V1PodSpec( + containers=[container], + volumes=volumes, + restart_policy="Never", + ), + ) + + client.CoreV1Api().create_namespaced_pod(namespace="default", body=template) + await wait_for_it(get_api("pod"), "fio", "Running") + print("Fio is running.....") + await watch_for("MODIFIED", "fio") + print("Fio completed.....") + logs = client.CoreV1Api().read_namespaced_pod_log(name="fio", namespace="default") + + print(logs) + await fio_delete() + + +@pytest.mark.asyncio +async def test_msp(): + + num_vols = 4 + # size in GB + size = 4 + await asyncio.gather( + create_msp("ksnode-2", "uring:///dev/vda"), + create_msp("ksnode-3", "uring:///dev/vda"), + ) + + pvcs = [f"ms-{i}" for i in range(0, num_vols)] + futures = [] + for pvc in pvcs: + futures.append(create_pvc(pvc, size=size)) + + await asyncio.gather(*futures) + await create_fio_manifest(pvcs, size=size) + + # await fio_from_yaml() + for i in range(0, num_vols): + delete_pvc(f"ms-{i}") + + delete_msp("ksnode-2") + delete_msp("ksnode-3") diff --git a/test/python/pool/docker-compose.yml b/test/python/pool/docker-compose.yml new file mode 100644 index 000000000..441eefcaf --- /dev/null +++ b/test/python/pool/docker-compose.yml @@ -0,0 +1,28 @@ +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_RESV_ENABLE=1 + - RUST_LOG=mayastor=trace + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + privileged: true + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev:/dev/ + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/pool/test_unmap.py b/test/python/pool/test_unmap.py new file mode 100644 index 000000000..c2ebf8a09 --- /dev/null +++ b/test/python/pool/test_unmap.py @@ -0,0 +1,61 @@ +# validate that when recreating lvols on the same pool, +# does not retain previous filesystem data + +from common.mayastor import mayastors, target_vm +import pytest +from common.nvme import nvme_remote_connect, nvme_remote_disconnect +from common.command import run_cmd_async_at +import json + + +@pytest.fixture +def create_pool(mayastors): + ms = mayastors.get("ms0") + ms.pool_create("tpool", "aio:///dev/sda3") + yield + ms.pool_destroy("tpool") + + +def create_volumes(mayastors): + ms = mayastors.get("ms0") + for i in range(0, 15): + ms.replica_create("tpool", f"replica-{i}", 4 * 1024 * 1024) + + +def delete_volumes(mayastors): + ms = mayastors.get("ms0") + for i in range(0, 15): + ms.replica_destroy(f"replica-{i}") + + +# this will fail the second time around as mkfs will fail if it finds +# a preexisting filesystem +async def mkfs_on_target(target_vm, mayastors): + host_ip = mayastors.get("ms0").ip_address() + remote_devices = [] + for i in range(0, 15): + dev = await nvme_remote_connect( + target_vm, f"nvmf://{host_ip}:8420/nqn.2019-05.io.openebs:replica-{i}" + ) + remote_devices.append(dev) + + print(await run_cmd_async_at(target_vm, "lsblk -o name,fstype -J")) + + for d in remote_devices: + await run_cmd_async_at(target_vm, f"sudo mkfs.xfs {d}") + + for i in range(0, 15): + dev = await nvme_remote_disconnect( + target_vm, f"nvmf://{host_ip}:8420/nqn.2019-05.io.openebs:replica-{i}" + ) + + +@pytest.mark.asyncio +async def test_lvol_unmap(mayastors, create_pool, target_vm): + create_volumes(mayastors) + await mkfs_on_target(target_vm, mayastors) + delete_volumes(mayastors) + + create_volumes(mayastors) + await mkfs_on_target(target_vm, mayastors) + delete_volumes(mayastors) diff --git a/test/python/pytest.ini b/test/python/pytest.ini index 9b09a5032..9cb762a0b 100644 --- a/test/python/pytest.ini +++ b/test/python/pytest.ini @@ -1,3 +1,5 @@ [pytest] -log_level = error log_cli = true +log_level = warn +console_output_style = classic + diff --git a/test/python/requirements.txt b/test/python/requirements.txt index 089195c4e..7bd95fa06 100644 --- a/test/python/requirements.txt +++ b/test/python/requirements.txt @@ -1,6 +1,11 @@ asyncio asyncssh -pytest-timeout -pytest-docker-compose +black +protobuf pytest-asyncio +pytest-bdd +pytest-black +pytest-docker-compose +pytest-testconfig +pytest-timeout pytest-variables diff --git a/test/python/setup.sh b/test/python/setup.sh new file mode 100755 index 000000000..c0200e168 --- /dev/null +++ b/test/python/setup.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -euxo pipefail + +if [ "${SRCDIR:-unset}" = unset ] +then + echo "SRCDIR must be set to the root of your working tree" 2>&1 + exit 1 +fi + +cd "$SRCDIR" + +python -m grpc_tools.protoc --proto_path=rpc/mayastor-api/protobuf --grpc_python_out=test/python --python_out=test/python mayastor.proto +python -m grpc_tools.protoc --proto_path=csi/proto --grpc_python_out=test/python --python_out=test/python csi.proto + +virtualenv --no-setuptools test/python/venv +(source ./test/python/venv/bin/activate && pip install -r test/python/requirements.txt) diff --git a/test/python/test_common.py b/test/python/test_common.py new file mode 100644 index 000000000..23de57ea2 --- /dev/null +++ b/test/python/test_common.py @@ -0,0 +1,22 @@ +from common.hdl import MayastorHandle +from common.mayastor import container_mod, mayastor_mod +import pytest + + +@pytest.mark.asyncio +async def test_mayastor_features(mayastor_mod): + ms1 = mayastor_mod.get("ms1") + ms3 = mayastor_mod.get("ms3") + + for replica, ms in ((True, ms1), (False, ms3)): + ms_info = ms.mayastor_info() + + assert ms_info.version.startswith("v0.") + + # Should see ANA disabled on mayastors where environment + # variable is not set. + features = ms_info.supportedFeatures + if replica: + assert features.asymmetricNamespaceAccess == False + else: + assert features.asymmetricNamespaceAccess == True diff --git a/test/python/test_config.ini b/test/python/test_config.ini new file mode 100644 index 000000000..ba24c21d5 --- /dev/null +++ b/test/python/test_config.ini @@ -0,0 +1,2 @@ +[grpc] +client_timeout = 120 diff --git a/test/python/test_multi_nexus.py b/test/python/test_multi_nexus.py deleted file mode 100644 index 54f888726..000000000 --- a/test/python/test_multi_nexus.py +++ /dev/null @@ -1,184 +0,0 @@ -from common.hdl import MayastorHandle -from common.command import run_cmd, run_cmd_async, run_cmd_async_at -from common.nvme import nvme_remote_connect, nvme_remote_disconnect -from common.fio import Fio -from common.fio_spdk import FioSpdk -import pytest -import asyncio -import uuid as guid - -UUID = "0000000-0000-0000-0000-000000000001" -NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" - - -@pytest.fixture(scope="function") -def create_temp_files(containers): - """Create temp files for each run so we start out clean.""" - for name in containers: - run_cmd(f"rm -rf /tmp/{name}.img", True) - for name in containers: - run_cmd(f"truncate -s 2G /tmp/{name}.img", True) - - -def check_size(prev, current, delta): - """Validate that replica creation consumes space on the pool.""" - before = prev.pools[0].used - after = current.pools[0].used - assert delta == (before - after) >> 20 - - -@pytest.fixture(scope="function") -def mayastors(docker_project, function_scoped_container_getter): - """Fixture to get a reference to mayastor handles.""" - project = docker_project - handles = {} - for name in project.service_names: - # because we use static networks .get_service() does not work - services = function_scoped_container_getter.get(name) - ip_v4 = services.get( - "NetworkSettings.Networks.python_mayastor_net.IPAddress") - handles[name] = MayastorHandle(ip_v4) - yield handles - - -@pytest.fixture(scope="function") -def containers(docker_project, function_scoped_container_getter): - """Fixture to get handles to mayastor as well as the containers.""" - project = docker_project - containers = {} - for name in project.service_names: - containers[name] = function_scoped_container_getter.get(name) - yield containers - - -@pytest.fixture -def create_pool_on_all_nodes(create_temp_files, containers, mayastors): - """Create a pool on each node.""" - uuids = [] - - for name, h in mayastors.items(): - h.pool_create(f"{name}", f"aio:///tmp/{name}.img") - # validate we have zero replicas - assert len(h.replica_list().replicas) == 0 - - for i in range(30): - uuid = guid.uuid4() - for name, h in mayastors.items(): - before = h.pool_list() - h.replica_create(name, uuid, 64 * 1024 * 1024) - after = h.pool_list() - check_size(before, after, -64) - # ensure our replica count goes up as expected - assert len(h.replica_list().replicas) == i + 1 - - uuids.append(uuid) - return uuids - - -@pytest.mark.skip -@pytest.mark.parametrize("times", range(2)) -def test_restart( - times, - create_pool_on_all_nodes, - containers, - mayastors): - """ - Test that when we create replicas and destroy them the count is as expected - At this point we have 3 nodes each with 15 replica's. - """ - - node = containers.get("ms1") - ms1 = mayastors.get("ms1") - - # kill one of the nodes and validate we indeed have 15 replica's - node.kill() - node.start() - # we must reconnect grpc here.. - ms1.reconnect() - # create does import here if found - ms1.pool_create("ms1", "aio:///tmp/ms1.img") - - # check the list has 15 replica's - replicas = ms1.replica_list().replicas - assert 15 == len(replicas) - - # destroy a few - for i in range(7): - ms1.replica_destroy(replicas[i].uuid) - - # kill and reconnect - node.kill() - node.start() - ms1.reconnect() - - # validate we have 8 replicas left - ms1.pool_create("ms1", "aio:///tmp/ms1.img") - replicas = ms1.replica_list().replicas - - assert 8 == len(replicas) - - -async def kill_after(container, sec): - """Kill the given container after sec seconds.""" - await asyncio.sleep(sec) - container.kill() - - -@pytest.mark.asyncio -async def test_multiple(create_pool_on_all_nodes, - containers, - mayastors, - target_vm): - - ms1 = mayastors.get('ms1') - rlist_m2 = mayastors.get('ms2').replica_list().replicas - rlist_m3 = mayastors.get('ms3').replica_list().replicas - nexus_list = [] - to_kill = containers.get("ms3") - - devs = [] - - for i in range(30): - uuid = guid.uuid4() - ms1.nexus_create(uuid, 60 * 1024 * 1024, - [rlist_m2.pop().uri, rlist_m3.pop().uri]) - nexus_list.append(ms1.nexus_publish(uuid)) - - for nexus in nexus_list: - dev = await nvme_remote_connect(target_vm, nexus) - devs.append(dev) - - fio_cmd = Fio(f"job-{dev}", "randwrite", devs).build() - - await asyncio.gather(run_cmd_async_at(target_vm, fio_cmd), - kill_after(to_kill, 3), - ) - - for nexus in nexus_list: - dev = await nvme_remote_disconnect(target_vm, nexus) - - -@pytest.mark.skip -@pytest.mark.asyncio -async def test_multiple_spdk(create_pool_on_all_nodes, - containers, - mayastors): - - ms1 = mayastors.get('ms1') - rlist_m2 = mayastors.get('ms2').replica_list().replicas - rlist_m3 = mayastors.get('ms3').replica_list().replicas - nexus_list = [] - to_kill = containers.get("ms3") - - devs = [] - - for i in range(30): - uuid = guid.uuid4() - ms1.nexus_create(uuid, 60 * 1024 * 1024, - [rlist_m2.pop().uri, rlist_m3.pop().uri]) - nexus_list.append(ms1.nexus_publish(uuid)) - - fio_cmd = FioSpdk(f"job-1", "randwrite", nexus_list).build() - - await asyncio.gather(run_cmd_async(fio_cmd), - kill_after(to_kill, 3)) diff --git a/test/python/test_nexus.py b/test/python/test_nexus.py deleted file mode 100644 index 968ebdc22..000000000 --- a/test/python/test_nexus.py +++ /dev/null @@ -1,306 +0,0 @@ -from common.command import run_cmd_async_at, run_cmd_async -from common.fio import Fio -from common.fio_spdk import FioSpdk -from common.volume import Volume -from common.hdl import MayastorHandle -import logging -import pytest -import uuid as guid -import grpc -import asyncio -import mayastor_pb2 as pb -from common.nvme import ( - nvme_discover, - nvme_connect, - nvme_disconnect, - nvme_remote_connect, - nvme_remote_disconnect) - -@pytest.fixture -def create_nexus(wait_for_mayastor, containers, nexus_uuid, create_replica): - hdls = wait_for_mayastor - replicas = create_replica - replicas = [k.uri for k in replicas] - - NEXUS_UUID, size_mb = nexus_uuid - - hdls['ms3'].nexus_create(NEXUS_UUID, 64 * 1024 * 1024, replicas) - uri = hdls['ms3'].nexus_publish(NEXUS_UUID) - assert len(hdls['ms1'].bdev_list()) == 2 - assert len(hdls['ms2'].bdev_list()) == 2 - assert len(hdls['ms3'].bdev_list()) == 1 - - assert len(hdls['ms1'].pool_list().pools) == 1 - assert len(hdls['ms2'].pool_list().pools) == 1 - - yield uri - hdls['ms3'].nexus_destroy(NEXUS_UUID) - -@pytest.fixture -def pool_config(): - """ - The idea is this used to obtain the pool types and names that should be - created. - """ - pool = {} - pool['name'] = "tpool" - pool['uri'] = "malloc:///disk0?size_mb=100" - return pool - - -@pytest.fixture(scope="module") -def containers(docker_project, module_scoped_container_getter): - """Fixture to get handles to mayastor as well as the containers.""" - project = docker_project - containers = {} - for name in project.service_names: - containers[name] = module_scoped_container_getter.get(name) - yield containers - - -@pytest.fixture(scope="module") -def wait_for_mayastor(docker_project, module_scoped_container_getter): - """Fixture to get a reference to mayastor gRPC handles""" - project = docker_project - handles = {} - for name in project.service_names: - # because we use static networks .get_service() does not work - services = module_scoped_container_getter.get(name) - ip_v4 = services.get( - "NetworkSettings.Networks.python_mayastor_net.IPAddress") - handles[name] = MayastorHandle(ip_v4) - yield handles - - -@pytest.fixture -def replica_uuid(): - """Replica UUID's to be used.""" - UUID = "0000000-0000-0000-0000-000000000001" - size_mb = 64 * 1024 * 1024 - return (UUID, size_mb) - - -@pytest.fixture -def nexus_uuid(): - """Nexus UUID's to be used.""" - NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" - size_mb = 64 * 1024 * 1024 - return (NEXUS_UUID, size_mb) - - -@pytest.fixture -def create_pools( - wait_for_mayastor, - containers, - pool_config): - hdls = wait_for_mayastor - - cfg = pool_config - pools = [] - - pools.append(hdls['ms1'].pool_create(cfg.get('name'), - cfg.get('uri'))) - - pools.append(hdls['ms2'].pool_create(cfg.get('name'), - cfg.get('uri'))) - - for p in pools: - assert p.state == pb.POOL_ONLINE - yield pools - try: - hdls['ms1'].pool_destroy(cfg.get('name')) - hdls['ms2'].pool_destroy(cfg.get('name')) - except Exception: - pass - - -@pytest.fixture -def create_replica( - wait_for_mayastor, - pool_config, - replica_uuid, - create_pools): - hdls = wait_for_mayastor - pools = create_pools - replicas = [] - - UUID, size_mb = replica_uuid - - replicas.append(hdls['ms1'].replica_create(pools[0].name, - UUID, size_mb)) - replicas.append(hdls['ms2'].replica_create(pools[0].name, - UUID, size_mb)) - - yield replicas - try: - hdls['ms1'].replica_destroy(UUID) - hdls['ms2'].replica_destroy(UUID) - except Exception as e: - logging.debug(e) - - -@pytest.mark.skip -@pytest.fixture -def destroy_all(wait_for_mayastor): - hdls = wait_for_mayastor - - hdls["ms3"].nexus_destroy(NEXUS_UUID) - - hdls["ms1"].replica_destroy(UUID) - hdls["ms2"].replica_destroy(UUID) - - hdls["ms1"].pool_destroy("tpool") - hdls["ms2"].pool_destroy("tpool") - - hdls["ms1"].replica_destroy(UUID) - hdls["ms2"].replica_destroy(UUID) - hdls["ms3"].nexus_destroy(NEXUS_UUID) - - hdls["ms1"].pool_destroy("tpool") - hdls["ms2"].pool_destroy("tpool") - - assert len(hdls["ms1"].pool_list().pools) == 0 - assert len(hdls["ms2"].pool_list().pools) == 0 - - assert len(hdls["ms1"].bdev_list().bdevs) == 0 - assert len(hdls["ms2"].bdev_list().bdevs) == 0 - assert len(hdls["ms3"].bdev_list().bdevs) == 0 - - -@pytest.mark.skip -def test_multi_volume_local(wait_for_mayastor, create_pools): - hdls = wait_for_mayastor - # contains the replicas - - ms = hdls.get('ms1') - - for i in range(6): - uuid = guid.uuid4() - replicas = [] - - ms.replica_create("tpool", uuid, 8 * 1024 * 1024) - - replicas.append("bdev:///{}".format(uuid)) - print(ms.nexus_create(uuid, 4 * 1024 * 1024, replicas)) - - -@pytest.mark.parametrize("times", range(50)) -@pytest.mark.skip -def test_create_nexus_with_two_replica(times, create_nexus): - nexus, uri, hdls = create_nexus - nvme_discover(uri.device_uri) - nvme_connect(uri.device_uri) - nvme_disconnect(uri.device_uri) - destroy_all - - -@pytest.mark.skip -def test_enospace_on_volume(wait_for_mayastor, create_pools): - nodes = wait_for_mayastor - pools = [] - uuid = guid.uuid4() - - pools.append(nodes["ms2"].pools_as_uris()[0]) - pools.append(nodes["ms1"].pools_as_uris()[0]) - nexus_node = nodes["ms3"].as_target() - - v = Volume(uuid, nexus_node, pools, 100 * 1024 * 1024) - - with pytest.raises(grpc.RpcError, match='RESOURCE_EXHAUSTED'): - _ = v.create() - print("expected failed") - - -async def kill_after(container, sec): - """Kill the given container after sec seconds.""" - await asyncio.sleep(sec) - logging.info(f"killing container {container}") - container.kill() - - -@pytest.mark.skip -@pytest.mark.asyncio -@pytest.mark.timeout(60) -async def test_nexus_2_mirror_kill_one(containers, create_nexus): - - to_kill = containers.get("ms2") - uri = create_nexus - - nvme_discover(uri) - dev = nvme_connect(uri) - job = Fio("job1", "rw", dev).build() - - await asyncio.gather(run_cmd_async(job), kill_after(to_kill, 5)) - - nvme_disconnect(uri) - - -@pytest.mark.asyncio -@pytest.mark.timeout(60) -async def test_nexus_2_remote_mirror_kill_one(target_vm, - containers, nexus_uuid, wait_for_mayastor, create_nexus): - - """ - This test does the following steps: - - - creates mayastor instances - - creates pools on mayastor 1 and 2 - - creates replicas on those pools - - creates a nexus on mayastor 3 - - starts fio on a remote VM (vixos1) for 15 secondsj - - kills mayastor 2 after 4 seconds - - assume the test to succeed - - disconnect the VM from mayastor 3 when FIO completes - - removes the nexus from mayastor 3 - - removes the replicas but as mayastor 2 is down, will swallow errors - - removes the pool - - The bulk of this is done by reusing fixtures those fitures are not as - generic as one might like at this point so look/determine if you need them - to begin with. - - By yielding from fixtures, after the tests the function is resumed where - yield is called. - """ - - uri = create_nexus - NEXUS_UUID, size_mb = nexus_uuid - dev = await nvme_remote_connect(target_vm, uri) - job = Fio("job1", "randwrite", dev).build() - - # create an event loop polling the async processes for completion - await asyncio.gather( - run_cmd_async_at(target_vm, job), - kill_after(containers.get("ms2"), 4)) - - list = wait_for_mayastor.get("ms3").nexus_list() - nexus = next(n for n in list if n.uuid == NEXUS_UUID) - assert nexus.state == pb.NEXUS_DEGRADED - nexus.children[1].state == pb.CHILD_FAULTED - - # disconnect the VM from our target before we shutdown - await nvme_remote_disconnect(target_vm, uri) - - -@pytest.mark.skip -@pytest.mark.asyncio -@pytest.mark.timeout(60) -async def test_nexus_2_remote_mirror_kill_one_spdk( - containers, nexus_uuid, wait_for_mayastor, create_nexus): - """ - Identical to the previous test except fio uses the SPDK ioengine - """ - - uri = create_nexus - NEXUS_UUID, size_mb = nexus_uuid - job = FioSpdk("job1", "randwrite", uri).build() - - await asyncio.gather( - run_cmd_async(job), - kill_after(containers.get("ms2"), 4) - ) - - list = wait_for_mayastor.get("ms3").nexus_list() - nexus = next(n for n in list if n.uuid == NEXUS_UUID) - assert nexus.state == pb.NEXUS_DEGRADED - nexus.children[1].state == pb.CHILD_FAULTED diff --git a/test/python/test_nexus_create_destroy.py b/test/python/test_nexus_create_destroy.py deleted file mode 100644 index 5c0c1069d..000000000 --- a/test/python/test_nexus_create_destroy.py +++ /dev/null @@ -1,121 +0,0 @@ -from common.hdl import MayastorHandle -import logging -import pytest -import uuid as guid -import grpc -import mayastor_pb2 as pb - - -@pytest.fixture -def pool_config(): - pool = {} - pool['name'] = "tpool" - pool['uri'] = "malloc:///disk0?size_mb=100" - return pool - - -@pytest.fixture(scope="module") -def containers(docker_project, module_scoped_container_getter): - project = docker_project - containers = {} - for name in project.service_names: - containers[name] = module_scoped_container_getter.get(name) - yield containers - - -@pytest.fixture(scope="module") -def wait_for_mayastor(docker_project, module_scoped_container_getter): - project = docker_project - handles = {} - for name in project.service_names: - # because we use static networks .get_service() does not work - services = module_scoped_container_getter.get(name) - ip_v4 = services.get( - "NetworkSettings.Networks.python_mayastor_net.IPAddress") - handles[name] = MayastorHandle(ip_v4) - yield handles - - -@pytest.fixture -def replica_uuid(): - UUID = "0000000-0000-0000-0000-000000000001" - size_mb = 64 * 1024 * 1024 - return (UUID, size_mb) - - -@pytest.fixture -def nexus_uuid(): - NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" - size_mb = 64 * 1024 * 1024 - return (NEXUS_UUID, size_mb) - - -@pytest.fixture -def create_pools( - wait_for_mayastor, - containers, - pool_config): - hdls = wait_for_mayastor - - cfg = pool_config - pools = [] - - pools.append(hdls['ms1'].pool_create(cfg.get('name'), - cfg.get('uri'))) - - pools.append(hdls['ms2'].pool_create(cfg.get('name'), - cfg.get('uri'))) - - pools.append(hdls['ms3'].pool_create(cfg.get('name'), - cfg.get('uri'))) - - for p in pools: - assert p.state == pb.POOL_ONLINE - yield pools - - -@pytest.fixture -def create_replica( - wait_for_mayastor, - pool_config, - replica_uuid, - create_pools): - hdls = wait_for_mayastor - pools = create_pools - replicas = [] - - UUID, size_mb = replica_uuid - - replicas.append(hdls['ms1'].replica_create(pools[0].name, - UUID, size_mb)) - replicas.append(hdls['ms2'].replica_create(pools[0].name, - UUID, size_mb)) - replicas.append(hdls['ms3'].replica_create(pools[0].name, - UUID, size_mb, 0)) - - yield replicas - - -@pytest.mark.timeout(60) -def test_nexus_create_destroy(wait_for_mayastor, nexus_uuid, create_replica): - replicas = create_replica - replicas = [k.uri for k in replicas] - - hdls = wait_for_mayastor - - NEXUS_UUID, size_mb = nexus_uuid - - assert len(hdls['ms1'].bdev_list()) == 2 - assert len(hdls['ms2'].bdev_list()) == 2 - assert len(hdls['ms3'].bdev_list()) == 2 - assert len(hdls['ms1'].pool_list().pools) == 1 - assert len(hdls['ms2'].pool_list().pools) == 1 - assert len(hdls['ms3'].pool_list().pools) == 1 - - for i in range(10): - hdls['ms3'].nexus_create(NEXUS_UUID, 64 * 1024 * 1024, replicas) - assert len(hdls['ms3'].nexus_list()) == 1 - assert len(hdls['ms3'].bdev_list()) == 3 - hdls['ms3'].nexus_destroy(NEXUS_UUID) - assert len(hdls['ms3'].nexus_list()) == 0 - assert len(hdls['ms3'].bdev_list()) == 2 diff --git a/test/python/test_nexus_kill_all.py b/test/python/test_nexus_kill_all.py deleted file mode 100644 index c5768cd8b..000000000 --- a/test/python/test_nexus_kill_all.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Test that will delete all replica's while under load from the nexus.""" - -import logging -import pytest -import asyncio -from common.nvme import nvme_remote_disconnect, nvme_remote_connect -from common.fio import Fio -from common.command import run_cmd_async_at -import mayastor_pb2 as pb - -NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" - - -async def kill_after(container, sec): - """Kill the given container after sec seconds.""" - await asyncio.sleep(sec) - logging.info(f"killing container {container}") - container.kill() - - -@pytest.mark.asyncio -@pytest.mark.timeout(60) -async def test_nexus_2_remote_mirror_kill_all( - container_ref, wait_for_mayastor, create_nexus): - - """ - - This test does the following steps: - - - creates mayastor instances - - creates pools on mayastor 1 and 2 - - creates replicas on those pools - - creates a nexus on mayastor 3 - - starts fio on a remote VM (vixos1) for 15 secondsj - - kills mayastor 2 after 4 seconds - - kills mayastor 1 after 5 seconds - - assume the fail with a ChildProcessError due to Fio bailing out - - disconnect the VM from mayastor 3 when has failed - - removes the nexus from mayastor 3 - """ - - containers = container_ref - uri = create_nexus - - dev = await nvme_remote_connect("vixos1", uri) - - job = Fio("job1", "randwrite", dev).build() - - try: - # create an event loop polling the async processes for completion - await asyncio.gather( - run_cmd_async_at("vixos1", job), - kill_after(containers.get("ms2"), 4), - kill_after(containers.get("ms1"), 5)) - except ChildProcessError: - pass - except Exception as e: - # if its not a child processe error fail the test - raise(e) - finally: - list = wait_for_mayastor.get("ms3").nexus_list() - nexus = next(n for n in list if n.uuid == NEXUS_UUID) - - assert nexus.state == pb.NEXUS_FAULTED - - nexus.children[0].state == pb.CHILD_FAULTED - nexus.children[1].state == pb.CHILD_FAULTED - - # disconnect the VM from our target before we shutdown - await nvme_remote_disconnect("vixos1", uri) diff --git a/test/python/test_null_nexus.py b/test/python/test_null_nexus.py deleted file mode 100644 index e36046ba1..000000000 --- a/test/python/test_null_nexus.py +++ /dev/null @@ -1,213 +0,0 @@ -from common.hdl import MayastorHandle -from common.command import run_cmd, run_cmd_async_at -from common.nvme import nvme_remote_connect, nvme_remote_disconnect -from common.fio import Fio -import pytest -import asyncio -import uuid as guid -import time -import mayastor_pb2 as pb -import random -# Reusing nexus UUIDs to avoid the need to disconnect between tests -nexus_uuids = [ - "78c0e836-ef26-47c2-a136-2a99b538a9a8", - "fc2bd1bf-301c-46e7-92e7-71e7a062e2dd", - "1edc6a04-74b0-450e-b953-14237a6795de", - "70bb42e6-4924-4079-a755-3798015e1319", - "9aae12d7-48dd-4fa6-a554-c4d278a9386a", - "af8107b6-2a9b-4097-9676-7a0941ba9bf9", - "7fde42a4-758b-466e-9755-825328131f67", - "6956b466-7491-4b8f-9a97-731bf7c9fd9c", - "c5fa226d-b1c7-4102-83b3-7f69ff1b311b", - "0cc74b98-fcd9-4a95-93ea-7c921d56c8cc", - "460758a4-d87f-4738-8666-cd07c23077db", - "63c567c9-75a6-466f-940e-a4ec5163794d", - "e425b05c-0730-4c10-8d01-77a2a65b4016", - "38d73fea-6f69-4a80-8959-2e2705be0f52", - "7eae6f7c-a52a-4689-b591-9642a094b4cc", - "52978839-8813-4c38-8665-42a78eeab499", - "8dccd362-b0fa-473d-abd3-b9c4d2a95f48", - "41dd24c4-8d20-4ee7-b52d-45dfcd580c52", - "9d879d46-8f71-4520-8eac-2b749c76adb8", - "d53cf04b-032d-412d-822a-e6b8b308bc52", - "da6247ab-6c28-429b-8848-c290ee474a81", - "71e9aab8-a350-4768-ab56-a2c66fda4e80", - "2241726a-487f-4852-8735-ddf849c92145", - "dbbbd8d4-96a4-45ae-9403-9dd43e34db6d", - "51ceb351-f864-43fc-bf31-3e36f75d8e86", - "7f90415a-29b3-41cd-9918-2d35b3b66056", - "f594f29c-b227-46a7-b4a6-486243c9f500", - "563b91ab-7ffd-44fc-aead-c551e280587a", - "8616f575-bcc9-490e-9524-9e6e7d9c76cc", - "817d4ca0-1f52-40de-b3d2-e59ce28b05ee", - "0a1103de-c466-4f77-88ca-521044b18483", - "ef6ff58b-0307-43df-bb87-9db0d06c1818", - "615c6fbb-90c1-46d6-b47c-6436a023953d", - "201e80b9-9389-4013-ab3c-b85b40d0cb56", - "e392187b-657f-4b4b-a249-cae27b7a5ba5", - "19e34f44-ff93-437d-9c11-31adfc974d64", - "b9542cb0-12e9-4b32-9cab-3b476161e1c6", - "3db3bfb9-0453-48bf-bb57-bd51b35e7f77", - "10e1f9e8-4cb2-4a79-a3d4-2b6314b58ba7", - "5ab8188a-e622-4965-b558-3355a5ceb285", - "063e2338-c42a-4aee-b4ed-b97a6c3bdc2b", - "94b03db5-14d7-4668-a952-e71160a658fc", - "4d03a0bc-645c-45ce-8d5e-a9ca584dfcb0", - "1a038ddb-fb0d-45b3-a46c-bdd57030af9e", - "89eecdef-4dc7-4228-8794-7139f15bf966", - "67369dd2-9c6a-49f8-b7bb-32ecba8698f2", - "f57cc434-d00c-4fee-b85f-56126403bf31", - "f9458cf7-8a12-487c-88f2-19c89e1d60c5", - "a33aca3e-fa5f-4477-b945-78616316ffb0", - "965329ba-24c1-4de7-b988-5a0baa376e66", - "453adc9f-501e-4d03-8810-990b747296e3", - "3a95e49d-afaa-4f3f-871a-4ec96ab86380", - "710450f3-266a-462a-abc0-bd3cdf235568", - "619b8ec8-2098-47fc-a55c-0a4048687163", - "9e3ae3ee-ddfe-4d81-93c0-9c62737d96fb", - "bc320f97-3a1f-4c6f-a2ee-936bfb5f293c", - "e5e271a8-d099-4cf4-8035-1f1672b6b16e", - "0fae6293-57b6-4135-b7dc-317b210d89b6", - "b8debec5-ea8e-4063-bba9-630bd108752f", - "cab0e91e-4e27-4820-a734-06c0bcd3f6ae", - "4986c804-64e9-4fb9-93ce-8ad6ca0cd3b2", - "5604d2cd-8ba6-4322-900a-31396168b72c", - "1affafb6-2089-45b5-8938-e40de8f89381", - "1fc64e79-9875-4136-b312-9f5df02d7c93", - "7fe16343-40dd-4bb5-bc63-9021be0cafb7", - "d24ad88e-b4ed-4ca5-91a0-b7afbc99744a", - "65889c75-7b2b-40a6-bfec-7bd9db64d20a", - "f60c9d96-360c-4b50-a1a8-f3fce87e24d7", - "4b6dc95f-1fb2-47f5-9746-e0e3970cbbb3", - "b37eb168-6430-44f8-8242-d1e0110bc71e", - "e34264f2-c999-4a3e-b2af-53b0c4f45095", - "157e6489-a96c-4e8c-8843-928c89529fff", - "efcbca04-8b0b-4a48-b3f2-e644a732d16d", - "238e35f2-9baa-4540-8fbd-ee46d2eca2cc", - "2f7e6ffb-47d5-485e-9166-1d455f2ec824", - "f75099a7-8600-4e4e-8332-1d045b6b85e1", - "2323b974-420c-40f7-8296-a28c4bc6b64e", - "31e7dab5-dbcb-4c33-999f-0e6779dad480", - "5221023d-6a15-4eeb-bf82-b770fcf8576d", - "51eee369-d85f-4097-ab93-419d31c2205f", - "3beaf7e5-a70e-4687-a28f-f9c8ff9364d8", - "c80d88bb-b1ca-454f-b793-19b00b141479", - "fda3e343-1f29-4e4d-8e56-6dd77fe28838", - "298a065a-1571-434e-a8cd-7464890595be", - "64540a85-4260-4469-81be-fac0e831a0ad", - "1fc17318-cec1-40cd-86d4-f498ce3342a4", - "30096c80-6e35-4c3f-910b-99b4190b79e1", - "4451d707-39d9-4174-b7ea-8dcfc4c408d4", - "dbc05fa6-bd30-4e0d-997b-6f8cb8854141", - "4ce06ba7-9074-445d-b97b-d665f065d60e", - "80115d98-b7df-4ed2-8fd8-7c7464143ce4", - "aa7142fc-b6c3-4499-98a2-5f424912d107", - "8adf8819-c3eb-43ce-ad11-04e0d22dfb52", - "a21b7d6e-354e-4d2d-b75f-b5782dcef385", - "b7ac8c80-8dfa-4314-8d76-6a57f87ad32f", - "2b15ccf1-6ee2-4c7d-9286-5133b0b57844", - "f443ce61-8ba8-4490-8bf9-8c1c443a0aa2", - "73025002-8582-48f4-8e32-d9866e8d97d2", - "6eb21022-4a99-4dd8-b76f-f2715378253b", - "f0e074af-2f97-4c67-bac8-f29f409b9db2", -] - - -def check_nexus_state(ms, state=pb.NEXUS_ONLINE): - nl = ms.nexus_list() - for nexus in nl: - assert nexus.state == state - for child in nexus.children: - assert child.state == pb.CHILD_ONLINE - - -def destroy_nexus(ms, list): - for uuid in list: - ms.nexus_destroy(uuid) - -@pytest.fixture -def create_nexus_devices(mayastors, share_null_devs): - - rlist_m0 = mayastors.get('ms0').bdev_list() - rlist_m1 = mayastors.get('ms1').bdev_list() - rlist_m2 = mayastors.get('ms2').bdev_list() - - assert len(rlist_m0) == len(rlist_m1) == len(rlist_m2) - - ms = mayastors.get('ms3') - - for uuid in nexus_uuids: - ms.nexus_create(uuid, - 94 * 1024 * 1024, - [rlist_m0.pop().share_uri, - rlist_m1.pop().share_uri, - rlist_m2.pop().share_uri]) - - for uuid in nexus_uuids: - ms.nexus_publish(uuid) - - assert len(ms.nexus_list()) == len(nexus_uuids) - - -@pytest.fixture -def create_null_devs(mayastors): - for node in ['ms0', 'ms1', 'ms2']: - ms = mayastors.get(node) - - for i in range(len(nexus_uuids)): - ms.bdev_create(f"null:///null{i}?blk_size=512&size_mb=100") - yield -# for node in ['ms0', 'ms1', 'ms2']: -# ms = mayastors.get(node) -# names = ms.bdev_list() -# for n in names: -# ms.bdev_destroy((n.uri)) -# - -@pytest.fixture -def share_null_devs(mayastors, create_null_devs): - - for node in ['ms0', 'ms1', 'ms2']: - ms = mayastors.get(node) - names = ms.bdev_list() - for n in names: - ms.bdev_share((n.name)) - - yield - -# for node in ['ms0', 'ms1', 'ms2']: -# ms = mayastors.get(node) -# names = ms.bdev_list() -# for n in names: -# ms.bdev_unshare((n.name)) -# - -async def kill_after(container, sec): - """Kill the given container after sec seconds.""" - await asyncio.sleep(sec) - container.kill() - - -def test_null_nexus(create_nexus_devices, - containers, - mayastors, - target_vm): - - ms = mayastors.get('ms3') - check_nexus_state(ms) -# destroy_nexus(ms, nexus_uuids) - -@pytest.mark.skip -def test_kill_one_by_one(create_nexus_devices, containers, mayastors): - - ms = mayastors.get('ms3') - - check_nexus_state(ms) - nodes = ['ms0', 'ms1', 'ms2']; - random.shuffle(nodes) - - - for ms in nodes: - ms.kill() - - check_nexus_state(ms, state=pb.NEXUS_FAULTED) diff --git a/test/python/docker-compose.yml b/test/python/tests/ana_client/docker-compose.yml similarity index 75% rename from test/python/docker-compose.yml rename to test/python/tests/ana_client/docker-compose.yml index cadd49bb0..e7e216ba6 100644 --- a/test/python/docker-compose.yml +++ b/test/python/tests/ana_client/docker-compose.yml @@ -9,6 +9,8 @@ services: image: rust:latest environment: - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock networks: mayastor_net: @@ -21,17 +23,20 @@ services: - IPC_LOCK security_opt: # we can set this to a JSON file to allow per syscall access - - seccomp:unconfined + - seccomp=unconfined volumes: - ${SRCDIR}:${SRCDIR} - /nix:/nix - /dev/hugepages:/dev/hugepages - /tmp:/tmp + - /var/tmp:/var/tmp ms1: container_name: "ms1" image: rust:latest environment: - MY_POD_IP=10.0.0.3 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock networks: mayastor_net: @@ -44,58 +49,69 @@ services: - IPC_LOCK security_opt: # we can set this to a JSON file to allow per syscall access - - seccomp:unconfined + - seccomp=unconfined volumes: - ${SRCDIR}:${SRCDIR} - /nix:/nix - /dev/hugepages:/dev/hugepages - /tmp:/tmp + - /var/tmp:/var/tmp ms2: container_name: "ms2" image: rust:latest environment: - MY_POD_IP=10.0.0.4 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 5,6 -r /tmp/ms2.sock networks: mayastor_net: ipv4_address: 10.0.0.4 cap_add: + # NUMA related - SYS_ADMIN - SYS_NICE + # uring needs mmap - IPC_LOCK security_opt: - - seccomp:unconfined + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined volumes: - ${SRCDIR}:${SRCDIR} - /nix:/nix - /dev/hugepages:/dev/hugepages - /tmp:/tmp + - /var/tmp:/var/tmp ms3: container_name: "ms3" image: rust:latest environment: - MY_POD_IP=10.0.0.5 - - RUST_BACKTRACE=full - NVME_KATO_MS=1000 - - RUST_LOG=mayastor=trace - - NEXUS_DONT_READ_LABELS=true + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 0,7 -r /tmp/ms3.sock networks: mayastor_net: ipv4_address: 10.0.0.5 cap_add: + # NUMA related - SYS_ADMIN - SYS_NICE + # uring needs mmap - IPC_LOCK security_opt: - - seccomp:unconfined + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined volumes: - ${SRCDIR}:${SRCDIR} - /nix:/nix - /dev/hugepages:/dev/hugepages - /tmp:/tmp + - /var/tmp:/var/tmp networks: mayastor_net: + name: mayastor_net ipam: driver: default config: diff --git a/test/python/tests/ana_client/test_ana_client.py b/test/python/tests/ana_client/test_ana_client.py new file mode 100644 index 000000000..30360d8de --- /dev/null +++ b/test/python/tests/ana_client/test_ana_client.py @@ -0,0 +1,140 @@ +import pytest +from common.mayastor import container_mod, mayastor_mod +from common.nvme import ( + nvme_connect, + nvme_disconnect, + nvme_disconnect_all, + nvme_list_subsystems, + identify_namespace, +) +import uuid +import mayastor_pb2 as pb +import os + + +POOL_NAME = "pool1" +NEXUS_GUID = "afebdeb9-ff44-1111-2222-254f810ba34a" + + +@pytest.fixture +def create_replicas(mayastor_mod): + ms0 = mayastor_mod.get("ms0") + ms1 = mayastor_mod.get("ms1") + + replicas = [] + + for m in (ms0, ms1): + p = m.pool_create(POOL_NAME, "malloc:///disk0?size_mb=100") + assert p.state == pb.POOL_ONLINE + r = m.replica_create(POOL_NAME, str(uuid.uuid4()), 32 * 1024 * 1024) + replicas.append(r.uri) + + yield replicas + + for m in (ms0, ms1): + try: + m.pool_destroy(POOL_NAME) + except Exception: + pass + + +@pytest.fixture +def create_nexuses(mayastor_mod, create_replicas): + uris = [] + + nvme_disconnect_all() + + for n in ["ms2", "ms3"]: + ms = mayastor_mod.get(n) + ms.nexus_create(NEXUS_GUID, 32 * 1024 * 1024, create_replicas) + uri = ms.nexus_publish(NEXUS_GUID) + uris.append(uri) + + yield uris + + nvme_disconnect_all() + + for n in ["ms2", "ms3"]: + ms = mayastor_mod.get(n) + ms.nexus_destroy(NEXUS_GUID) + + +def connect_multipath_nexuses(uris): + dev1 = nvme_connect(uris[0]) + dev2 = None + + try: + dev2 = nvme_connect(uris[1]) + except Exception: + # The first connect is allowed to fail due to controller ID collision. + pass + + if dev2 is None: + dev2 = nvme_connect(uris[1]) + + return (dev1, dev2) + + +@pytest.mark.asyncio +async def test_io_policy(create_replicas, create_nexuses, mayastor_mod): + devs = connect_multipath_nexuses(create_nexuses) + assert devs[0] == devs[1], "Paths are different for multipath nexus" + + # Make sure all we see exactly 2 paths and all paths are 'live optimized' + device = devs[0] + descr = nvme_list_subsystems(device) + paths = descr["Subsystems"][0]["Paths"] + assert len(paths) == 2, "Number of paths to Nexus mismatches" + + for p in paths: + assert p["State"] == "live" + assert p["ANAState"] == "optimized" + + # Make sure there are 2 virtual NVMe controllers for the namespace. + ns = os.path.basename(device) + for i in range(2): + cname = ns.replace("n1", "c%dn1" % i) + cpath = "/sys/block/%s" % cname + l = os.readlink(cpath) + assert l.startswith( + "../devices/virtual/nvme-fabrics/ctl/" + ), "Path device is not a virtual controller" + + # Make sure virtual NVMe namespace exists for multipath nexus. + l = os.readlink("/sys/block/%s" % ns) + assert l.startswith( + "../devices/virtual/nvme-subsystem/nvme-subsys" + ), "No virtual NVMe subsystem exists for multipath Nexus" + + # Make sure I/O policy is NUMA. + subsys = descr["Subsystems"][0]["Name"] + pfile = "/sys/class/nvme-subsystem/%s/iopolicy" % subsys + assert os.path.isfile(pfile), "No iopolicy file exists" + with open(pfile) as f: + iopolicy = f.read().strip() + assert iopolicy == "numa", "I/O policy is not NUMA" + + # Make sure ANA state is reported properly for both nexuses. + for n in ["ms2", "ms3"]: + ms = mayastor_mod.get(n) + nexuses = ms.nexus_list_v2() + assert len(nexuses) == 1, "Number of nexuses mismatches" + assert ( + nexuses[0].ana_state == pb.NVME_ANA_OPTIMIZED_STATE + ), "ANA state of nexus mismatches" + + +@pytest.mark.asyncio +async def test_namespace_guid(create_replicas, create_nexuses, mayastor_mod): + uri = create_nexuses[0] + device = nvme_connect(uri) + ns = identify_namespace(device) + nvme_disconnect(uri) + + # Namespace's GUID must match Nexus GUID. + assert uuid.UUID(ns["nguid"]) == uuid.UUID( + NEXUS_GUID + ), "Namespace NGID doesn't match Nexus GUID" + + # Extended Unique Identifier must be zero. + assert ns["eui64"] == "0000000000000000", "Namespace EUI64 is not zero" diff --git a/test/python/tests/cli_controller/docker-compose.yml b/test/python/tests/cli_controller/docker-compose.yml new file mode 100644 index 000000000..c74d822b0 --- /dev/null +++ b/test/python/tests/cli_controller/docker-compose.yml @@ -0,0 +1,92 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms1: + container_name: "ms1" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.3 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.3 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms2: + container_name: "ms2" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.4 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 5,6 -r /tmp/ms2.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.4 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms3: + container_name: "ms3" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.5 + - NVME_KATO_MS=1000 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 0,7 -r /tmp/ms3.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.5 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/cli_controller/test_cli_controller.py b/test/python/tests/cli_controller/test_cli_controller.py new file mode 100644 index 000000000..041628e2f --- /dev/null +++ b/test/python/tests/cli_controller/test_cli_controller.py @@ -0,0 +1,146 @@ +import pytest +from common.mayastor import container_mod, mayastor_mod +from common.msclient import get_msclient +import mayastor_pb2 as pb +import uuid +from urllib.parse import urlparse +from common.command import run_cmd_async +from common.fio_spdk import FioSpdk + + +POOL_NAME = "pool1" +NEXUS_GUID = "9febdeb9-cb33-4166-a89d-254b810ba34a" + + +@pytest.fixture +def create_replicas(mayastor_mod): + ms1 = mayastor_mod.get("ms1") + ms2 = mayastor_mod.get("ms2") + + replicas = [] + + for m in (ms1, ms2): + p = m.pool_create(POOL_NAME, "malloc:///disk0?size_mb=100") + assert p.state == pb.POOL_ONLINE + r = m.replica_create(POOL_NAME, str(uuid.uuid4()), 32 * 1024 * 1024) + replicas.append(r.uri) + yield replicas + try: + for m in (ms1, ms2): + m.pool_destroy(POOL_NAME) + except Exception: + pass + + +@pytest.fixture +def create_nexus(mayastor_mod, create_replicas): + ms3 = mayastor_mod.get("ms3") + ms3.nexus_create(NEXUS_GUID, 32 * 1024 * 1024, create_replicas) + uri = ms3.nexus_publish(NEXUS_GUID) + yield uri + ms3.nexus_destroy(NEXUS_GUID) + + +def assure_controllers(mscli, replicas): + """Check that target mayastor contains all the given controllers.""" + output = mscli("controller", "list") + assert len(output["controllers"]) == len(replicas) + names = [c["name"] for c in output["controllers"]] + + for r in replicas: + c = ctrl_name_from_uri(r) + assert c in names, "Controller for replica %s not found" % c + + +def ctrl_name_from_uri(uri): + """Form controller name from the full replica URL.""" + u = urlparse(uri) + return "%s%sn1" % (u.netloc, u.path) + + +@pytest.mark.asyncio +async def test_controller_list(mayastor_mod, create_replicas, create_nexus): + replica1 = mayastor_mod.get("ms1") + replica2 = mayastor_mod.get("ms2") + replicas = [replica1, replica2] + nexus = mayastor_mod.get("ms3") + mscli = get_msclient().with_json_output() + + # Should not see any controllers on replica instances. + for r in replicas: + output = mscli.with_url(r.ip_address())("controller", "list") + assert len(output["controllers"]) == 0 + + # Should see exactly 2 controllers on the nexus instance. + mscli.with_url(nexus.ip_address()) + assure_controllers(mscli, create_replicas) + + # Should not see a controller for the removed replica. + nexus.nexus_remove_replica(NEXUS_GUID, create_replicas[0]) + assure_controllers(mscli, create_replicas[1:]) + + # Should see controller for the newly added replica. + nexus.nexus_add_replica(NEXUS_GUID, create_replicas[0], True) + assure_controllers(mscli, create_replicas) + + +@pytest.mark.asyncio +async def test_controller_stats(mayastor_mod, create_replicas, create_nexus): + nexus = mayastor_mod.get("ms3") + mscli = get_msclient().with_json_output().with_url(nexus.ip_address()) + + # Should see exactly 2 controllers on the nexus instance. + assure_controllers(mscli, create_replicas) + + # Check that stats exist for all controllers and are initially empty. + output = mscli("controller", "stats") + assert len(output["controllers"]) == len(create_replicas) + names = [c["name"] for c in output["controllers"]] + + for r in create_replicas: + c = ctrl_name_from_uri(r) + assert c in names, "Controller for replica %s not found" % c + + for s in output["controllers"]: + stats = s["stats"] + for n in stats.keys(): + assert stats[n] == 0, "Stat %s is not zero for a new controller" % n + + # Issue I/O to replicas and make sure stats reflect that. + job = FioSpdk("job1", "readwrite", create_nexus, runtime=5).build() + await run_cmd_async(job) + + target_stats = ["num_read_ops", "num_write_ops", "bytes_read", "bytes_written"] + cached_stats = {"num_write_ops": 0, "bytes_written": 0} + + output = mscli("controller", "stats") + assert len(output["controllers"]) == 2 + for c in output["controllers"]: + stats = c["stats"] + + # Make sure all related I/O stats are counted. + for s in target_stats: + assert stats[s] > 0, "I/O stat %s is zero after active I/O operations" % s + + # Make sure IOPS and number of bytes are sane (fio uses 4k block). + assert ( + stats["bytes_read"] == stats["num_read_ops"] * 4096 + ), "Read IOPs don't match number of bytes" + assert ( + stats["bytes_written"] == stats["num_write_ops"] * 4096 + ), "Write IOPs don't match number of bytes" + + # Check that write-related stats are equal for all controllers in the same nexus + # and make sure all unrelated stats remained untouched. + for s in stats.keys(): + if s in cached_stats: + # Cache I/O stat to check across all controllers against the same value. + if cached_stats[s] == 0: + cached_stats[s] = stats[s] + else: + # I/O stats for all controllers in a nexus must be equal. + assert cached_stats[s] == stats[s], ( + "I/O statistics %s for replicas mismatch" % s + ) + elif s not in target_stats: + assert stats[s] == 0, "Unrelated I/O stat %s got impacted by I/O" % s diff --git a/test/python/tests/csi/features/csi.feature b/test/python/tests/csi/features/csi.feature new file mode 100644 index 000000000..c24248b12 --- /dev/null +++ b/test/python/tests/csi/features/csi.feature @@ -0,0 +1,108 @@ +Feature: Mayastor CSI plugin pool management + + Background: + Given a mayastor instance + And a mayastor-csi instance + And a nexus published via "nvmf" + + Scenario: stage volume request without specified volume_id + When staging a volume with a missing volume_id + Then the request should fail + + Scenario: stage volume request without specified staging_target_path + When staging a volume with a missing staging_target_path + Then the request should fail + + Scenario: stage volume request without specified volume_capability + When staging a volume with a missing volume_capability + Then the request should fail + + Scenario: stage volume request without specified access_mode + When staging a volume with a volume_capability with a missing access_mode + Then the request should fail + + Scenario: stage volume request without specified mount + When staging a volume with a volume_capability with a missing mount + Then the request should fail + + Scenario: stage volume request with unsupported fs_type + When staging a volume with a volume_capability with a mount with an unsupported fs_type + Then the request should fail + + Scenario: staging a single writer volume + When staging an "ext4" volume as "MULTI_NODE_SINGLE_WRITER" + Then the request should succeed + + Scenario: restaging a volume + Given an "ext4" volume staged as "MULTI_NODE_SINGLE_WRITER" + When staging the same volume + Then the request should succeed + + Scenario: staging different volumes with the same staging_target_path + Given an "ext4" volume staged as "MULTI_NODE_SINGLE_WRITER" + When attempting to stage a different volume with the same staging_target_path + Then the request should fail + + Scenario: staging the same volumes with a different staging_target_path + Given an "ext4" volume staged as "MULTI_NODE_SINGLE_WRITER" + When staging the same volume but with a different staging_target_path + Then the request should fail + + Scenario: unstaging a single writer volume + Given an "ext4" volume staged as "MULTI_NODE_SINGLE_WRITER" + When unstaging the volume + Then the request should succeed + + Scenario: publish volume request without specified target_path + Given a staged volume + When publishing a volume with a missing target_path + Then the request should fail + + Scenario: publish volume request + Given a staged volume + When publishing a volume + Then the request should succeed + + Scenario: republishing a volume + Given a staged volume + And a published volume + When publishing the same volume + Then the request should succeed + + Scenario: publishing the same volumes with a different target_path + Given a staged volume + And a published volume + When publishing the same volume with a different target_path + Then the request should fail + + Scenario: publishing a single writer mount volume as readonly + Given an "ext4" volume staged as "MULTI_NODE_SINGLE_WRITER" + When publishing the volume as "ro" should succeed + + Scenario: publishing a single writer mount volume as rw + Given an "ext4" volume staged as "MULTI_NODE_SINGLE_WRITER" + When publishing the volume as "rw" should succeed + + Scenario: publishing a reader only mount volume as readonly + Given an "ext4" volume staged as "MULTI_NODE_READER_ONLY" + When publishing the volume as "ro" should succeed + + Scenario: publishing a reader only mount volume as rw + Given an "ext4" volume staged as "MULTI_NODE_READER_ONLY" + When publishing the volume as "rw" should fail + + Scenario: publishing a single writer block volume as readonly + Given a block volume staged as "MULTI_NODE_SINGLE_WRITER" + When publishing the block volume as "ro" should succeed + + Scenario: publishing a single writer block volume as rw + Given a block volume staged as "MULTI_NODE_SINGLE_WRITER" + When publishing the block volume as "rw" should succeed + + Scenario: publishing a reader only block volume as readonly + Given a block volume staged as "MULTI_NODE_READER_ONLY" + When publishing the block volume as "ro" should succeed + + Scenario: publishing a reader only block volume as rw + Given a block volume staged as "MULTI_NODE_READER_ONLY" + When publishing the block volume as "rw" should fail diff --git a/test/python/tests/csi/test_bdd_csi.py b/test/python/tests/csi/test_bdd_csi.py new file mode 100644 index 000000000..ce67de65f --- /dev/null +++ b/test/python/tests/csi/test_bdd_csi.py @@ -0,0 +1,779 @@ +import pytest +from pytest_bdd import given, scenario, then, when, parsers + +import logging +import os +import signal +import subprocess +import threading +import time + +from common.hdl import MayastorHandle +from common.csi_hdl import CsiHandle + +import grpc +import csi_pb2 as pb +import csi_pb2_grpc as rpc + +pytest_plugins = ["docker_compose"] + + +class Nexus: + def __init__(self, uuid, protocol, uri): + self.uuid = uuid + self.protocol = protocol + self.uri = uri + + +class Volume: + def __init__(self, uuid, protocol, uri, mode, staging_target_path, fs_type): + self.uuid = uuid + self.protocol = protocol + self.uri = uri + self.mode = mode + self.staging_target_path = staging_target_path + self.fs_type = fs_type + + +class PublishedVolume: + def __init__(self, volume, read_only, target_path): + self.volume = volume + self.read_only = read_only + self.target_path = target_path + + +def get_uuid(n): + return "11111111-0000-0000-0000-%.12d" % (n) + + +def share_type(protocol): + import mayastor_pb2 + + TYPES = { + "nbd": mayastor_pb2.ShareProtocolNexus.NEXUS_NBD, + "nvmf": mayastor_pb2.ShareProtocolNexus.NEXUS_NVMF, + "iscsi": mayastor_pb2.ShareProtocolNexus.NEXUS_ISCSI, + } + return TYPES[protocol] + + +def access_mode(name): + MODES = { + "SINGLE_NODE_WRITER": pb.VolumeCapability.AccessMode.Mode.SINGLE_NODE_WRITER, + "SINGLE_NODE_READER_ONLY": pb.VolumeCapability.AccessMode.Mode.SINGLE_NODE_READER_ONLY, + "MULTI_NODE_READER_ONLY": pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_READER_ONLY, + "MULTI_NODE_SINGLE_WRITER": pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_SINGLE_WRITER, + "MULTI_NODE_MULTI_WRITER": pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_MULTI_WRITER, + } + return MODES[name] + + +def get_volume_capability(volume, read_only): + if volume.fs_type == "raw": + return pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode(mode=access_mode(volume.mode)), + block=pb.VolumeCapability.BlockVolume(), + ) + + mount_flags = ["ro"] if read_only else [] + + return pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode(mode=access_mode(volume.mode)), + mount=pb.VolumeCapability.MountVolume( + fs_type=volume.fs_type, mount_flags=mount_flags + ), + ) + + +@pytest.fixture(scope="module") +def start_csi_plugin(): + def monitor(proc, result): + stdout, stderr = proc.communicate() + result["stdout"] = stdout.decode() + result["stderr"] = stderr.decode() + result["status"] = proc.returncode + + proc = subprocess.Popen( + args=[ + "sudo", + os.environ["SRCDIR"] + "/target/debug/mayastor-csi", + "--csi-socket=/tmp/csi.sock", + "--grpc-endpoint=0.0.0.0", + "--node-name=msn-test", + "-v", + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + result = {} + handler = threading.Thread(target=monitor, args=[proc, result]) + handler.start() + time.sleep(1) + yield + subprocess.run(["sudo", "pkill", "mayastor-csi"], check=True) + handler.join() + print("[CSI] exit status: %d" % (result["status"])) + print(result["stdout"]) + print(result["stderr"]) + + +@pytest.fixture(scope="module") +def handles_mod(docker_project, module_scoped_container_getter): + assert "ms0" in docker_project.service_names + handles = {} + handles["ms0"] = MayastorHandle( + module_scoped_container_getter.get("ms0").get( + "NetworkSettings.Networks.mayastor_net.IPAddress" + ) + ) + yield handles + + +@pytest.fixture(scope="module") +def mayastor_instance(handles_mod): + yield handles_mod["ms0"] + + +@pytest.fixture(scope="module") +def fix_socket_permissions(start_csi_plugin): + subprocess.run(["sudo", "chmod", "go+rw", "/tmp/csi.sock"], check=True) + yield + + +@pytest.fixture(scope="module") +def csi_instance(start_csi_plugin, fix_socket_permissions): + yield CsiHandle("unix:///tmp/csi.sock") + + +@pytest.fixture +def staging_target_path(): + yield "/tmp/staging/mount" + + +@pytest.fixture +def target_path(): + try: + os.mkdir("/tmp/publish") + except FileExistsError: + pass + yield "/tmp/publish/mount" + + +@pytest.fixture(scope="module") +def io_timeout(): + yield "30" + + +@pytest.fixture(scope="module") +def mayastor_base_bdevs(mayastor_instance): + devices = {} + for n in range(5): + uuid = get_uuid(n) + uri = f"malloc:///malloc{n}?size_mb=64&uuid={uuid}&blk_size=4096" + bdev = mayastor_instance.bdev_create(uri) + devices[bdev.name] = uri + mayastor_instance.bdev_share(bdev.name) + yield devices + for name, uri in devices.items(): + mayastor_instance.bdev_unshare(name) + mayastor_instance.bdev_destroy(uri) + + +@pytest.fixture(scope="module") +def mayastor_nexuses(mayastor_instance, mayastor_base_bdevs): + nexuses = [] + for n in range(5): + uuid = get_uuid(n) + nexus = mayastor_instance.nexus_create( + uuid, 64 * 1024 * 1024, children=[f"bdev:///malloc{n}"] + ) + nexuses.append(nexus.uuid) + yield nexuses + for uuid in nexuses: + mayastor_instance.nexus_destroy(uuid) + + +@scenario("features/csi.feature", "publish volume request") +def test_publish_volume_request(): + "Publish volume request." + + +@scenario( + "features/csi.feature", "publish volume request without specified target_path" +) +def test_publish_volume_request_without_specified_target_path(): + "Publish volume request without specified target_path." + + +@scenario("features/csi.feature", "publishing a reader only block volume as readonly") +def test_publishing_a_reader_only_block_volume_as_readonly(): + "Publishing a reader only block volume as readonly." + + +@scenario("features/csi.feature", "publishing a reader only block volume as rw") +def test_publishing_a_reader_only_block_volume_as_rw(): + "Publishing a reader only block volume as rw." + + +@scenario("features/csi.feature", "publishing a reader only mount volume as readonly") +def test_publishing_a_reader_only_mount_volume_as_readonly(): + "Publishing a reader only mount volume as readonly." + + +@scenario("features/csi.feature", "publishing a reader only mount volume as rw") +def test_publishing_a_reader_only_mount_volume_as_rw(): + "Publishing a reader only mount volume as rw." + + +@scenario("features/csi.feature", "publishing a single writer block volume as readonly") +def test_publishing_a_single_writer_block_volume_as_readonly(): + "Publishing a single writer block volume as readonly." + + +@scenario("features/csi.feature", "publishing a single writer block volume as rw") +def test_publishing_a_single_writer_block_volume_as_rw(): + "Publishing a single writer block volume as rw." + + +@scenario("features/csi.feature", "publishing a single writer mount volume as readonly") +def test_publishing_a_single_writer_mount_volume_as_readonly(): + "Publishing a single writer mount volume as readonly." + + +@scenario("features/csi.feature", "publishing a single writer mount volume as rw") +def test_publishing_a_single_writer_mount_volume_as_rw(): + "Publishing a single writer mount volume as rw." + + +@scenario( + "features/csi.feature", "publishing the same volumes with a different target_path" +) +def test_publishing_the_same_volumes_with_a_different_target_path(): + "Publishing the same volumes with a different target_path." + + +@scenario("features/csi.feature", "republishing a volume") +def test_republishing_a_volume(): + "Republishing a volume." + + +@scenario("features/csi.feature", "restaging a volume") +def test_restaging_a_volume(): + "Restaging a volume." + + +@scenario("features/csi.feature", "stage volume request with unsupported fs_type") +def test_stage_volume_request_with_unsupported_fs_type(): + "Stage volume request with unsupported fs_type." + + +@scenario("features/csi.feature", "stage volume request without specified access_mode") +def test_stage_volume_request_without_specified_access_mode(): + "Stage volume request without specified access_mode." + + +@scenario("features/csi.feature", "stage volume request without specified mount") +def test_stage_volume_request_without_specified_mount(): + "Stage volume request without specified mount." + + +@scenario( + "features/csi.feature", "stage volume request without specified staging_target_path" +) +def test_stage_volume_request_without_specified_staging_target_path(): + "Stage volume request without specified staging_target_path." + + +@scenario( + "features/csi.feature", "stage volume request without specified volume_capability" +) +def test_stage_volume_request_without_specified_volume_capability(): + "Stage volume request without specified volume_capability." + + +@scenario("features/csi.feature", "stage volume request without specified volume_id") +def test_stage_volume_request_without_specified_volume_id(): + "Stage volume request without specified volume_id." + + +@scenario("features/csi.feature", "staging a single writer volume") +def test_staging_a_single_writer_volume(): + "Staging a single writer volume." + + +@scenario( + "features/csi.feature", + "staging different volumes with the same staging_target_path", +) +def test_staging_different_volumes_with_the_same_staging_target_path(): + "Staging different volumes with the same staging_target_path." + + +@scenario( + "features/csi.feature", + "staging the same volumes with a different staging_target_path", +) +def test_staging_the_same_volumes_with_a_different_staging_target_path(): + "Staging the same volumes with a different staging_target_path." + + +@scenario("features/csi.feature", "unstaging a single writer volume") +def test_unstaging_a_single_writer_volume(): + "Unstaging a single writer volume." + + +@pytest.fixture +def published_nexuses(mayastor_instance, mayastor_nexuses): + published = {} + yield published + for uuid in published.keys(): + mayastor_instance.nexus_unpublish(uuid) + + +@pytest.fixture +def publish_nexus(mayastor_instance, published_nexuses): + def publish(uuid, protocol): + uri = mayastor_instance.nexus_publish(uuid, share_type(protocol)) + nexus = Nexus(uuid, protocol, uri) + published_nexuses[uuid] = nexus + return nexus + + yield publish + + +@pytest.fixture +def staged_volumes(csi_instance): + staged = {} + yield staged + for volume in staged.values(): + csi_instance.node.NodeUnstageVolume( + pb.NodeUnstageVolumeRequest( + volume_id=volume.uuid, staging_target_path=volume.staging_target_path + ) + ) + + +@pytest.fixture +def stage_volume(csi_instance, publish_nexus, staged_volumes, io_timeout): + def stage(volume): + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=volume.uuid, + publish_context={"uri": volume.uri, "ioTimeout": io_timeout}, + staging_target_path=volume.staging_target_path, + volume_capability=get_volume_capability(volume, False), + secrets={}, + volume_context={}, + ) + ) + staged_volumes[volume.uuid] = volume + + yield stage + + +@pytest.fixture +def published_volumes(csi_instance): + published = {} + yield published + for volume in published.values(): + csi_instance.node.NodeUnpublishVolume( + pb.NodeUnpublishVolumeRequest( + volume_id=volume.volume.uuid, target_path=volume.target_path + ) + ) + + +@pytest.fixture +def publish_volume(csi_instance, publish_nexus, published_volumes): + def publish(volume, read_only, target_path): + csi_instance.node.NodePublishVolume( + pb.NodePublishVolumeRequest( + volume_id=volume.uuid, + publish_context={"uri": volume.uri}, + staging_target_path=volume.staging_target_path, + target_path=target_path, + volume_capability=get_volume_capability(volume, read_only), + readonly=read_only, + secrets={}, + volume_context={}, + ) + ) + published_volumes[volume.uuid] = PublishedVolume(volume, read_only, target_path) + + yield publish + + +@given("a mayastor instance") +def get_mayastor_instance(mayastor_instance): + pass + + +@given("a mayastor-csi instance") +def get_mayastor_csi_instance(csi_instance): + pass + + +@given( + parsers.parse('a nexus published via "{protocol}"'), + target_fixture="get_published_nexus", +) +def get_published_nexus(publish_nexus, protocol): + uuid = get_uuid(0) + return publish_nexus(uuid, protocol) + + +@given( + parsers.parse('an "{fs_type}" volume staged as "{mode}"'), + target_fixture="get_staged_volume", +) +def get_staged_volume( + get_published_nexus, stage_volume, staging_target_path, fs_type, mode +): + nexus = get_published_nexus + volume = Volume( + nexus.uuid, nexus.protocol, nexus.uri, mode, staging_target_path, fs_type + ) + stage_volume(volume) + return volume + + +@given( + parsers.parse('a block volume staged as "{mode}"'), + target_fixture="get_staged_block_volume", +) +def get_staged_block_volume( + get_published_nexus, stage_volume, staging_target_path, mode +): + nexus = get_published_nexus + volume = Volume( + nexus.uuid, nexus.protocol, nexus.uri, mode, staging_target_path, "raw" + ) + stage_volume(volume) + return volume + + +@given("a published volume", target_fixture="generic_published_volume") +def generic_published_volume( + generic_staged_volume, publish_volume, published_volumes, target_path +): + volume = generic_staged_volume + publish_volume(volume, False, target_path) + return published_volumes[volume.uuid] + + +@given("a staged volume", target_fixture="generic_staged_volume") +def generic_staged_volume(get_published_nexus, stage_volume, staging_target_path): + nexus = get_published_nexus + volume = Volume( + nexus.uuid, + nexus.protocol, + nexus.uri, + "MULTI_NODE_SINGLE_WRITER", + staging_target_path, + "ext4", + ) + stage_volume(volume) + return volume + + +@when("attempting to stage a different volume with the same staging_target_path") +def attempt_to_stage_different_volume_with_same_staging_target_path( + publish_nexus, get_staged_volume, stage_volume +): + volume = get_staged_volume + uuid = get_uuid(1) + nexus = publish_nexus(uuid, volume.protocol) + volume = Volume( + nexus.uuid, + nexus.protocol, + nexus.uri, + volume.mode, + volume.staging_target_path, + volume.fs_type, + ) + with pytest.raises(grpc.RpcError) as error: + stage_volume(volume) + + +@when("staging a volume with a missing staging_target_path") +def attempt_to_stage_volume_with_missing_staging_target_path( + get_published_nexus, csi_instance, io_timeout +): + nexus = get_published_nexus + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=nexus.uuid, + publish_context={"uri": nexus.uri, "ioTimeout": io_timeout}, + volume_capability=pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode( + mode=pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_SINGLE_WRITER + ), + mount=pb.VolumeCapability.MountVolume( + fs_type="ext4", mount_flags=[] + ), + ), + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("staging a volume with a missing volume_capability") +def attempt_to_stage_volume_with_missing_volume_capability( + get_published_nexus, csi_instance, staging_target_path, io_timeout +): + nexus = get_published_nexus + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=nexus.uuid, + publish_context={"uri": nexus.uri, "ioTimeout": io_timeout}, + staging_target_path=staging_target_path, + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("staging a volume with a missing volume_id") +def attempt_to_stage_volume_with_missing_volume_id( + get_published_nexus, csi_instance, staging_target_path, io_timeout +): + nexus = get_published_nexus + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + publish_context={"uri": nexus.uri, "ioTimeout": io_timeout}, + staging_target_path=staging_target_path, + volume_capability=pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode( + mode=pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_SINGLE_WRITER + ), + mount=pb.VolumeCapability.MountVolume( + fs_type="ext4", mount_flags=[] + ), + ), + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("staging a volume with a volume_capability with a missing access_mode") +def attempt_to_stage_volume_with_missing_access_mode( + get_published_nexus, csi_instance, staging_target_path, io_timeout +): + nexus = get_published_nexus + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=nexus.uuid, + publish_context={"uri": nexus.uri, "ioTimeout": io_timeout}, + staging_target_path=staging_target_path, + volume_capability=pb.VolumeCapability( + mount=pb.VolumeCapability.MountVolume( + fs_type="ext4", mount_flags=[] + ) + ), + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("staging a volume with a volume_capability with a missing mount") +def attempt_to_stage_volume_with_missing_mount( + get_published_nexus, csi_instance, staging_target_path, io_timeout +): + nexus = get_published_nexus + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=nexus.uuid, + publish_context={"uri": nexus.uri, "ioTimeout": io_timeout}, + staging_target_path=staging_target_path, + volume_capability=pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode( + mode=pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_SINGLE_WRITER + ), + ), + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when( + "staging a volume with a volume_capability with a mount with an unsupported fs_type" +) +def attempt_to_stage_volume_with_unsupported_fs_type( + get_published_nexus, csi_instance, staging_target_path, io_timeout +): + nexus = get_published_nexus + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=nexus.uuid, + publish_context={"uri": nexus.uri, "ioTimeout": io_timeout}, + staging_target_path=staging_target_path, + volume_capability=pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode( + mode=pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_SINGLE_WRITER + ), + mount=pb.VolumeCapability.MountVolume( + fs_type="ext3", mount_flags=[] + ), + ), + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when(parsers.parse('staging an "{fs_type}" volume as "{mode}"')) +def stage_new_volume( + get_published_nexus, stage_volume, staging_target_path, fs_type, mode +): + nexus = get_published_nexus + volume = Volume( + nexus.uuid, nexus.protocol, nexus.uri, mode, staging_target_path, fs_type + ) + stage_volume(volume) + + +@when("staging the same volume") +def stage_same_volume(get_staged_volume, stage_volume): + volume = get_staged_volume + stage_volume(volume) + + +@when("attempting to stage a different volume with the same staging_target_path") +def attempt_to_stage_different_volume_with_same_staging_target_path( + get_staged_volume, publish_nexus, stage_volume +): + volume = get_staged_volume + uuid = get_uuid(1) + nexus = publish_nexus(uuid, volume.protocol) + with pytest.raises(grpc.RpcError) as error: + stage_volume( + Volume( + nexus.uuid, + nexus.protocol, + nexus.uri, + volume.mode, + volume.staging_target_path, + "ext4", + ) + ) + assert error.value.code() == grpc.StatusCode.ALREADY_EXISTS + + +@when("staging the same volume but with a different staging_target_path") +def attempt_to_stage_same_volume_with_different_staging_target_path( + get_staged_volume, stage_volume +): + volume = get_staged_volume + with pytest.raises(grpc.RpcError) as error: + stage_volume( + Volume( + volume.uuid, + volume.protocol, + volume.uri, + volume.mode, + "/tmp/different/staging/mount", + volume.fs_type, + ) + ) + assert error.value.code() == grpc.StatusCode.ALREADY_EXISTS + + +@when("unstaging the volume") +def unstaging_the_volume(csi_instance, get_staged_volume, staged_volumes): + volume = get_staged_volume + csi_instance.node.NodeUnstageVolume( + pb.NodeUnstageVolumeRequest( + volume_id=volume.uuid, staging_target_path=volume.staging_target_path + ) + ) + del staged_volumes[volume.uuid] + + +@when("publishing a volume") +def generic_published_volume(generic_staged_volume, publish_volume, target_path): + volume = generic_staged_volume + publish_volume(volume, False, target_path) + + +@when("publishing a volume with a missing target_path") +def attempt_to_publish_volume_with_missing_target_path( + csi_instance, generic_staged_volume +): + volume = generic_staged_volume + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodePublishVolume( + pb.NodePublishVolumeRequest( + volume_id=volume.uuid, + publish_context={"uri": volume.uri}, + staging_target_path=volume.staging_target_path, + volume_capability=get_volume_capability(volume, False), + readonly=False, + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("publishing the same volume") +def publish_same_volume(generic_published_volume, publish_volume): + volume = generic_published_volume + publish_volume(volume.volume, volume.read_only, volume.target_path) + + +@when("publishing the same volume with a different target_path") +def attempt_to_publish_same_volume_with_different_target_path( + generic_published_volume, publish_volume +): + with pytest.raises(grpc.RpcError) as error: + volume = generic_published_volume + publish_volume(volume.volume, volume.read_only, "/tmp/different/publish/mount") + assert error.value.code() == grpc.StatusCode.INTERNAL + + +@when(parsers.parse('publishing the volume as "{flags}" should {disposition}')) +def publish_volume_as_read_or_write( + get_staged_volume, publish_volume, target_path, flags, disposition +): + volume = get_staged_volume + if disposition == "succeed": + publish_volume(volume, flags == "ro", target_path) + else: + with pytest.raises(grpc.RpcError) as error: + publish_volume(volume, flags == "ro", target_path) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when(parsers.parse('publishing the block volume as "{flags}" should {disposition}')) +def publish_block_volume_as_read_or_write( + get_staged_block_volume, publish_volume, target_path, flags, disposition +): + volume = get_staged_block_volume + if disposition == "succeed": + publish_volume(volume, flags == "ro", target_path) + else: + with pytest.raises(grpc.RpcError) as error: + publish_volume(volume, flags == "ro", target_path) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@then(parsers.parse("the request should {disposition}")) +def request_success_expected(disposition): + return disposition == "succeed" diff --git a/test/python/tests/csi/test_csi.py b/test/python/tests/csi/test_csi.py new file mode 100644 index 000000000..931033243 --- /dev/null +++ b/test/python/tests/csi/test_csi.py @@ -0,0 +1,454 @@ +import logging +import os +import pytest +import signal +import subprocess +import threading +import time + +from common.hdl import MayastorHandle +from common.csi_hdl import CsiHandle + +import grpc +import csi_pb2 as pb +import csi_pb2_grpc as rpc + +pytest_plugins = ["docker_compose"] + + +def get_uuid(n): + return "11111111-0000-0000-0000-%.12d" % (n) + + +@pytest.fixture(scope="module") +def start_csi_plugin(): + def monitor(proc, result): + stdout, stderr = proc.communicate() + result["stdout"] = stdout.decode() + result["stderr"] = stderr.decode() + result["status"] = proc.returncode + + proc = subprocess.Popen( + args=[ + "sudo", + os.environ["SRCDIR"] + "/target/debug/mayastor-csi", + "--csi-socket=/tmp/csi.sock", + "--grpc-endpoint=0.0.0.0", + "--node-name=msn-test", + "-v", + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + result = {} + handler = threading.Thread(target=monitor, args=[proc, result]) + handler.start() + time.sleep(1) + yield + subprocess.run(["sudo", "pkill", "mayastor-csi"], check=True) + handler.join() + print("[CSI] exit status: %d" % (result["status"])) + print(result["stdout"]) + print(result["stderr"]) + + +@pytest.fixture(scope="module") +def handles_mod(docker_project, module_scoped_container_getter): + assert "ms0" in docker_project.service_names + handles = {} + handles["ms0"] = MayastorHandle( + module_scoped_container_getter.get("ms0").get( + "NetworkSettings.Networks.mayastor_net.IPAddress" + ) + ) + yield handles + + +@pytest.fixture(scope="module") +def mayastor_instance(handles_mod): + yield handles_mod["ms0"] + + +@pytest.fixture(scope="module") +def fix_socket_permissions(start_csi_plugin): + subprocess.run(["sudo", "chmod", "go+rw", "/tmp/csi.sock"], check=True) + yield + + +@pytest.fixture(scope="module") +def csi_instance(start_csi_plugin, fix_socket_permissions): + yield CsiHandle("unix:///tmp/csi.sock") + + +def test_plugin_info(csi_instance): + info = csi_instance.identity.GetPluginInfo(pb.GetPluginInfoRequest()) + assert info.name == "io.openebs.csi-mayastor" + assert info.vendor_version == "0.2" + + +def test_plugin_capabilities(csi_instance): + response = csi_instance.identity.GetPluginCapabilities( + pb.GetPluginCapabilitiesRequest() + ) + services = [cap.service.type for cap in response.capabilities] + assert pb.PluginCapability.Service.Type.CONTROLLER_SERVICE in services + assert pb.PluginCapability.Service.Type.VOLUME_ACCESSIBILITY_CONSTRAINTS in services + + +def test_probe(csi_instance): + response = csi_instance.identity.Probe(pb.ProbeRequest()) + assert response.ready + + +def test_node_info(csi_instance): + info = csi_instance.node.NodeGetInfo(pb.NodeGetInfoRequest()) + assert info.node_id == "mayastor://msn-test" + assert info.max_volumes_per_node == 0 + + +def test_node_capabilities(csi_instance): + response = csi_instance.node.NodeGetCapabilities(pb.NodeGetCapabilitiesRequest()) + assert pb.NodeServiceCapability.RPC.Type.STAGE_UNSTAGE_VOLUME in [ + cap.rpc.type for cap in response.capabilities + ] + + +@pytest.fixture(scope="module") +def mayastor_base_bdevs(mayastor_instance): + devices = {} + for n in range(5): + uuid = get_uuid(n) + uri = f"malloc:///malloc{n}?size_mb=64&uuid={uuid}&blk_size=4096" + bdev = mayastor_instance.bdev_create(uri) + devices[bdev.name] = uri + mayastor_instance.bdev_share(bdev.name) + yield devices + for name, uri in devices.items(): + mayastor_instance.bdev_unshare(name) + mayastor_instance.bdev_destroy(uri) + + +@pytest.fixture(scope="module") +def mayastor_nexuses(mayastor_instance, mayastor_base_bdevs): + nexuses = [] + for n in range(5): + uuid = get_uuid(n) + nexus = mayastor_instance.nexus_create( + uuid, 64 * 1024 * 1024, children=[f"bdev:///malloc{n}"] + ) + nexuses.append(nexus.uuid) + yield nexuses + for uuid in nexuses: + mayastor_instance.nexus_destroy(uuid) + + +@pytest.fixture(scope="module") +def io_timeout(): + yield "33" + + +@pytest.fixture(params=["nvmf", "iscsi"]) +def share_type(request): + import mayastor_pb2 + + TYPES = { + "nbd": mayastor_pb2.ShareProtocolNexus.NEXUS_NBD, + "nvmf": mayastor_pb2.ShareProtocolNexus.NEXUS_NVMF, + "iscsi": mayastor_pb2.ShareProtocolNexus.NEXUS_ISCSI, + } + yield TYPES[request.param] + + +@pytest.fixture +def staging_target_path(): + yield "/tmp/staging/mount" + + +@pytest.fixture +def target_path(): + try: + os.mkdir("/tmp/publish") + except FileExistsError: + pass + yield "/tmp/publish/mount" + + +@pytest.fixture(params=["ext4", "xfs"]) +def fs_type(request): + yield request.param + + +@pytest.fixture +def volume_id(fs_type): + # use a different (volume) uuid for each filesystem type + yield get_uuid(["ext3", "ext4", "xfs"].index(fs_type)) + + +@pytest.fixture +def mayastor_published_nexus( + mayastor_instance, mayastor_nexuses, share_type, volume_id +): + uuid = volume_id + yield mayastor_instance.nexus_publish(uuid, share_type) + mayastor_instance.nexus_unpublish(uuid) + + +def test_get_volume_stats( + csi_instance, mayastor_published_nexus, volume_id, target_path +): + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodeGetVolumeStats( + pb.NodeGetVolumeStatsRequest(volume_id=volume_id, volume_path=target_path) + ) + assert error.value.code() == grpc.StatusCode.UNIMPLEMENTED + + +@pytest.fixture(params=["multi-node-reader-only", "multi-node-single-writer"]) +def access_mode(request): + MODES = { + "single-node-writer": pb.VolumeCapability.AccessMode.Mode.SINGLE_NODE_WRITER, + "single-node-reader-only": pb.VolumeCapability.AccessMode.Mode.SINGLE_NODE_READER_ONLY, + "multi-node-reader-only": pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_READER_ONLY, + "multi-node-single-writer": pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_SINGLE_WRITER, + "multi-node-multi-writer": pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_MULTI_WRITER, + } + yield MODES[request.param] + + +@pytest.fixture(params=["rw", "ro"]) +def read_only(request): + yield request.param == "ro" + + +@pytest.fixture +def compatible(access_mode, read_only): + yield read_only or access_mode not in [ + pb.VolumeCapability.AccessMode.Mode.SINGLE_NODE_READER_ONLY, + pb.VolumeCapability.AccessMode.Mode.MULTI_NODE_READER_ONLY, + ] + + +@pytest.fixture +def publish_mount_flags(read_only): + yield ["ro"] if read_only else [] + + +@pytest.fixture +def stage_context(mayastor_published_nexus, io_timeout): + yield {"uri": mayastor_published_nexus, "ioTimeout": io_timeout} + + +@pytest.fixture +def publish_context(mayastor_published_nexus, volume_id): + yield {"uri": mayastor_published_nexus} + + +@pytest.fixture +def block_volume_capability(access_mode): + yield pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode(mode=access_mode), + block=pb.VolumeCapability.BlockVolume(), + ) + + +@pytest.fixture +def stage_mount_volume_capability(access_mode, fs_type): + yield pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode(mode=access_mode), + mount=pb.VolumeCapability.MountVolume(fs_type=fs_type, mount_flags=[]), + ) + + +@pytest.fixture +def publish_mount_volume_capability(access_mode, fs_type, publish_mount_flags): + yield pb.VolumeCapability( + access_mode=pb.VolumeCapability.AccessMode(mode=access_mode), + mount=pb.VolumeCapability.MountVolume( + fs_type=fs_type, mount_flags=publish_mount_flags + ), + ) + + +@pytest.fixture +def staged_block_volume( + csi_instance, volume_id, stage_context, staging_target_path, block_volume_capability +): + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=volume_id, + publish_context=stage_context, + staging_target_path=staging_target_path, + volume_capability=block_volume_capability, + secrets={}, + volume_context={}, + ) + ) + yield + csi_instance.node.NodeUnstageVolume( + pb.NodeUnstageVolumeRequest( + volume_id=volume_id, staging_target_path=staging_target_path + ) + ) + + +def test_stage_block_volume( + csi_instance, volume_id, stage_context, staging_target_path, block_volume_capability +): + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=volume_id, + publish_context=stage_context, + staging_target_path=staging_target_path, + volume_capability=block_volume_capability, + secrets={}, + volume_context={}, + ) + ) + time.sleep(0.5) + csi_instance.node.NodeUnstageVolume( + pb.NodeUnstageVolumeRequest( + volume_id=volume_id, staging_target_path=staging_target_path + ) + ) + + +def test_publish_block_volume( + csi_instance, + volume_id, + publish_context, + staging_target_path, + target_path, + block_volume_capability, + read_only, + staged_block_volume, + compatible, +): + if compatible: + csi_instance.node.NodePublishVolume( + pb.NodePublishVolumeRequest( + volume_id=volume_id, + publish_context=publish_context, + staging_target_path=staging_target_path, + target_path=target_path, + volume_capability=block_volume_capability, + readonly=read_only, + secrets={}, + volume_context={}, + ) + ) + time.sleep(0.5) + csi_instance.node.NodeUnpublishVolume( + pb.NodeUnpublishVolumeRequest(volume_id=volume_id, target_path=target_path) + ) + else: + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodePublishVolume( + pb.NodePublishVolumeRequest( + volume_id=volume_id, + publish_context=publish_context, + staging_target_path=staging_target_path, + target_path=target_path, + volume_capability=block_volume_capability, + readonly=read_only, + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@pytest.fixture +def staged_mount_volume( + csi_instance, + volume_id, + stage_context, + staging_target_path, + stage_mount_volume_capability, +): + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=volume_id, + publish_context=stage_context, + staging_target_path=staging_target_path, + volume_capability=stage_mount_volume_capability, + secrets={}, + volume_context={}, + ) + ) + yield + csi_instance.node.NodeUnstageVolume( + pb.NodeUnstageVolumeRequest( + volume_id=volume_id, staging_target_path=staging_target_path + ) + ) + + +def test_stage_mount_volume( + csi_instance, + volume_id, + stage_context, + staging_target_path, + stage_mount_volume_capability, +): + csi_instance.node.NodeStageVolume( + pb.NodeStageVolumeRequest( + volume_id=volume_id, + publish_context=stage_context, + staging_target_path=staging_target_path, + volume_capability=stage_mount_volume_capability, + secrets={}, + volume_context={}, + ) + ) + time.sleep(0.5) + csi_instance.node.NodeUnstageVolume( + pb.NodeUnstageVolumeRequest( + volume_id=volume_id, staging_target_path=staging_target_path + ) + ) + + +def test_publish_mount_volume( + csi_instance, + volume_id, + publish_context, + staging_target_path, + target_path, + publish_mount_volume_capability, + read_only, + staged_mount_volume, + compatible, +): + if compatible: + csi_instance.node.NodePublishVolume( + pb.NodePublishVolumeRequest( + volume_id=volume_id, + publish_context=publish_context, + staging_target_path=staging_target_path, + target_path=target_path, + volume_capability=publish_mount_volume_capability, + readonly=read_only, + secrets={}, + volume_context={}, + ) + ) + time.sleep(0.5) + csi_instance.node.NodeUnpublishVolume( + pb.NodeUnpublishVolumeRequest(volume_id=volume_id, target_path=target_path) + ) + else: + with pytest.raises(grpc.RpcError) as error: + csi_instance.node.NodePublishVolume( + pb.NodePublishVolumeRequest( + volume_id=volume_id, + publish_context=publish_context, + staging_target_path=staging_target_path, + target_path=target_path, + volume_capability=publish_mount_volume_capability, + readonly=read_only, + secrets={}, + volume_context={}, + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT diff --git a/test/python/tests/nexus/docker-compose.yml b/test/python/tests/nexus/docker-compose.yml new file mode 100644 index 000000000..4c4ffe904 --- /dev/null +++ b/test/python/tests/nexus/docker-compose.yml @@ -0,0 +1,120 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms1: + container_name: "ms1" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.3 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 2 -r /tmp/ms1.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.3 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms2: + container_name: "ms2" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.4 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3 -r /tmp/ms2.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.4 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms3: + container_name: "ms3" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.5 + - NVME_KATO_MS=1000 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + # required when using "null" devices (which can be written to but cannot be read from) + - NEXUS_DONT_READ_LABELS=true + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 4 -r /tmp/ms3.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.5 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/nexus/test_multi_nexus.py b/test/python/tests/nexus/test_multi_nexus.py new file mode 100644 index 000000000..d82e0656e --- /dev/null +++ b/test/python/tests/nexus/test_multi_nexus.py @@ -0,0 +1,161 @@ +from common.hdl import MayastorHandle +from common.command import run_cmd, run_cmd_async +from common.nvme import nvme_connect, nvme_disconnect +from common.fio import Fio +from common.fio_spdk import FioSpdk +from common.mayastor import containers, mayastors, create_temp_files, check_size +import pytest +import asyncio +import uuid as guid + +NEXUS_COUNT = 15 +DESTROY_COUNT = 7 + + +@pytest.fixture +def create_replicas_on_all_nodes(mayastors, create_temp_files): + "Create a pool on each node." + uuids = [] + + for name, ms in mayastors.items(): + ms.pool_create(name, f"aio:///tmp/{name}.img") + # verify we have zero replicas + assert len(ms.replica_list().replicas) == 0 + + for i in range(NEXUS_COUNT): + uuid = guid.uuid4() + for name, ms in mayastors.items(): + before = ms.pool_list() + ms.replica_create(name, uuid, 64 * 1024 * 1024) + after = ms.pool_list() + check_size(before, after, -64) + # ensure our replica count goes up as expected + assert len(ms.replica_list().replicas) == i + 1 + uuids.append(uuid) + + yield uuids + + +@pytest.mark.parametrize("times", range(3)) +def test_restart(containers, mayastors, create_replicas_on_all_nodes, times): + """ + Test that when we create replicas and destroy them the count is as expected + At this point we have 3 nodes each with NEXUS_COUNT replicas. + """ + + node = containers.get("ms1") + ms1 = mayastors.get("ms1") + + # kill one of the nodes, restart it, and verify we still have NEXUS_COUNT replicas + node.kill() + node.start() + + # must reconnect grpc + ms1.reconnect() + + # create does import here if found + ms1.pool_create("ms1", "aio:///tmp/ms1.img") + + # check the list has the required number of replicas + replicas = ms1.replica_list().replicas + assert len(replicas) == NEXUS_COUNT + + # destroy a few + for i in range(DESTROY_COUNT): + ms1.replica_destroy(replicas[i].uuid) + + # kill (again) and reconnect + node.kill() + node.start() + ms1.reconnect() + + # verify we have correct number of replicas remaining + ms1.pool_create("ms1", "aio:///tmp/ms1.img") + replicas = ms1.replica_list().replicas + + assert len(replicas) + DESTROY_COUNT == NEXUS_COUNT + + +async def kill_after(container, sec): + "Kill the given container after sec seconds." + await asyncio.sleep(sec) + container.kill() + + +@pytest.fixture +def create_nexuses(mayastors, create_replicas_on_all_nodes): + "Create a nexus for each replica on each child node." + nexuses = [] + ms1 = mayastors.get("ms1") + uris = [ + [replica.uri for replica in mayastors.get(node).replica_list().replicas] + for node in ["ms2", "ms3"] + ] + + for children in zip(*uris): + uuid = guid.uuid4() + ms1.nexus_create(uuid, 60 * 1024 * 1024, list(children)) + nexuses.append(ms1.nexus_publish(uuid)) + + yield nexuses + + for nexus in ms1.nexus_list(): + uuid = nexus.uuid + ms1.nexus_unpublish(uuid) + ms1.nexus_destroy(uuid) + + +@pytest.fixture +def connect_devices(create_nexuses): + "Connect an nvmf device to each nexus." + yield [nvme_connect(nexus) for nexus in create_nexuses] + + for nexus in create_nexuses: + nvme_disconnect(nexus) + + +@pytest.fixture +async def mount_devices(connect_devices): + "Create and mount a filesystem on each nvmf connected device." + for dev in connect_devices: + await run_cmd_async(f"sudo mkfs.xfs {dev}") + await run_cmd_async(f"sudo mkdir -p /mnt{dev}") + await run_cmd_async(f"sudo mount {dev} /mnt{dev}") + + yield + + for dev in connect_devices: + await run_cmd_async(f"sudo umount /mnt{dev}") + + +@pytest.mark.asyncio +async def test_multiple_raw(containers, connect_devices): + fio_cmd = Fio(f"job-raw", "randwrite", connect_devices).build() + print(fio_cmd) + + to_kill = containers.get("ms3") + await asyncio.gather(run_cmd_async(fio_cmd), kill_after(to_kill, 3)) + + +@pytest.mark.asyncio +async def test_multiple_fs(containers, connect_devices, mount_devices): + # we're now writing to files not raw devices + files = [f"/mnt{dev}/file.dat" for dev in connect_devices] + fio_cmd = Fio( + f"job-fs", + "randwrite", + files, + optstr="--verify=crc32 --verify_fatal=1 --verify_async=2 --size=50mb", + ).build() + print(fio_cmd) + + to_kill = containers.get("ms3") + await asyncio.gather(run_cmd_async(fio_cmd), kill_after(to_kill, 3)) + + +@pytest.mark.asyncio +async def test_multiple_spdk(containers, create_nexuses): + fio_cmd = FioSpdk(f"job-spdk", "randwrite", create_nexuses).build() + + to_kill = containers.get("ms3") + await asyncio.gather(run_cmd_async(fio_cmd), kill_after(to_kill, 3)) diff --git a/test/python/tests/nexus/test_nexus.py b/test/python/tests/nexus/test_nexus.py new file mode 100644 index 000000000..5f1af0ccb --- /dev/null +++ b/test/python/tests/nexus/test_nexus.py @@ -0,0 +1,530 @@ +from common.hdl import MayastorHandle +from common.command import run_cmd, run_cmd_async +from common.fio import Fio +from common.fio_spdk import FioSpdk +from common.mayastor import containers, mayastors +from common.volume import Volume +import logging +import pytest +import uuid as guid +import grpc +import asyncio +import time +import mayastor_pb2 as pb +from common.nvme import ( + nvme_discover, + nvme_connect, + nvme_disconnect, + nvme_id_ctrl, + nvme_resv_report, +) + + +@pytest.fixture +def create_nexus(mayastors, nexus_uuid, create_replica): + hdls = mayastors + replicas = [k.uri for k in create_replica] + + NEXUS_UUID, size_mb = nexus_uuid + + hdls["ms3"].nexus_create(NEXUS_UUID, 64 * 1024 * 1024, replicas) + uri = hdls["ms3"].nexus_publish(NEXUS_UUID) + + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + assert len(hdls["ms1"].pool_list().pools) == 1 + assert len(hdls["ms2"].pool_list().pools) == 1 + + yield uri + hdls["ms3"].nexus_destroy(NEXUS_UUID) + + +@pytest.fixture +def create_nexus_v2( + mayastors, nexus_name, nexus_uuid, create_replica, min_cntlid, resv_key +): + hdls = mayastors + replicas = [k.uri for k in create_replica] + + NEXUS_UUID, size_mb = nexus_uuid + + hdls["ms3"].nexus_create_v2( + nexus_name, + NEXUS_UUID, + size_mb, + min_cntlid, + min_cntlid + 9, + resv_key, + 0, + replicas, + ) + + uri = hdls["ms3"].nexus_publish(nexus_name) + + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + assert len(hdls["ms1"].pool_list().pools) == 1 + assert len(hdls["ms2"].pool_list().pools) == 1 + + yield uri + hdls["ms3"].nexus_destroy(nexus_name) + + +@pytest.fixture +def create_nexus_2_v2( + mayastors, nexus_name, nexus_uuid, min_cntlid_2, resv_key, resv_key_2 +): + """Create a 2nd nexus on ms0 with the same 2 replicas but with resv_key_2 + and preempt resv_key""" + hdls = mayastors + NEXUS_NAME = nexus_name + + replicas = [] + list = mayastors.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == NEXUS_NAME) + replicas.append(nexus.children[0].uri) + replicas.append(nexus.children[1].uri) + + NEXUS_UUID, size_mb = nexus_uuid + + hdls["ms0"].nexus_create_v2( + NEXUS_NAME, + NEXUS_UUID, + size_mb, + min_cntlid_2, + min_cntlid_2 + 9, + resv_key_2, + resv_key, + replicas, + ) + uri = hdls["ms0"].nexus_publish(NEXUS_NAME) + assert len(hdls["ms0"].bdev_list()) == 1 + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + yield uri + hdls["ms0"].nexus_destroy(nexus_name) + + +@pytest.fixture +def pool_config(): + """ + The idea is this used to obtain the pool types and names that should be + created. + """ + pool = {} + pool["name"] = "tpool" + pool["uri"] = "malloc:///disk0?size_mb=100" + return pool + + +@pytest.fixture +def replica_uuid(): + """Replica UUID to be used.""" + UUID = "0000000-0000-0000-0000-000000000001" + size_mb = 64 * 1024 * 1024 + return (UUID, size_mb) + + +@pytest.fixture +def nexus_name(): + """Nexus name to be used.""" + NEXUS_NAME = "nexus0" + return NEXUS_NAME + + +@pytest.fixture +def nexus_uuid(): + """Nexus UUID's to be used.""" + NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" + size_mb = 64 * 1024 * 1024 + return (NEXUS_UUID, size_mb) + + +@pytest.fixture +def min_cntlid(): + """NVMe minimum controller ID to be used.""" + min_cntlid = 50 + return min_cntlid + + +@pytest.fixture +def min_cntlid_2(): + """NVMe minimum controller ID to be used for 2nd nexus.""" + min_cntlid = 60 + return min_cntlid + + +@pytest.fixture +def resv_key(): + """NVMe reservation key to be used.""" + resv_key = 0xABCDEF0012345678 + return resv_key + + +@pytest.fixture +def resv_key_2(): + """NVMe reservation key to be used for 2nd nexus.""" + resv_key = 0x1234567890ABCDEF + return resv_key + + +@pytest.fixture +def create_pools(containers, mayastors, pool_config): + hdls = mayastors + + cfg = pool_config + pools = [] + + pools.append(hdls["ms1"].pool_create(cfg.get("name"), cfg.get("uri"))) + pools.append(hdls["ms2"].pool_create(cfg.get("name"), cfg.get("uri"))) + + for p in pools: + assert p.state == pb.POOL_ONLINE + + yield pools + try: + hdls["ms1"].pool_destroy(cfg.get("name")) + hdls["ms2"].pool_destroy(cfg.get("name")) + except Exception: + pass + + +@pytest.fixture +def create_replica(mayastors, replica_uuid, create_pools): + hdls = mayastors + pools = create_pools + replicas = [] + + UUID, size_mb = replica_uuid + + replicas.append(hdls["ms1"].replica_create(pools[0].name, UUID, size_mb)) + replicas.append(hdls["ms2"].replica_create(pools[0].name, UUID, size_mb)) + + yield replicas + try: + hdls["ms1"].replica_destroy(UUID) + hdls["ms2"].replica_destroy(UUID) + except Exception as e: + logging.debug(e) + + +def test_enospace_on_volume(mayastors, create_replica): + nodes = mayastors + pools = [] + uuid = guid.uuid4() + + pools.append(nodes["ms2"].pools_as_uris()[0]) + pools.append(nodes["ms1"].pools_as_uris()[0]) + nexus_node = nodes["ms3"].as_target() + + v = Volume(uuid, nexus_node, pools, 100 * 1024 * 1024) + + with pytest.raises(grpc.RpcError) as error: + v.create() + assert error.value.code() == grpc.StatusCode.RESOURCE_EXHAUSTED + + +async def kill_after(container, sec): + """Kill the given container after sec seconds.""" + await asyncio.sleep(sec) + logging.info(f"killing container {container}") + container.kill() + + +@pytest.mark.asyncio +async def test_nexus_2_mirror_kill_one(containers, mayastors, create_nexus): + + uri = create_nexus + nvme_discover(uri) + dev = nvme_connect(uri) + try: + job = Fio("job1", "rw", dev).build() + print(job) + + to_kill = containers.get("ms2") + await asyncio.gather(run_cmd_async(job), kill_after(to_kill, 5)) + + finally: + # disconnect target before we shutdown + nvme_disconnect(uri) + + +@pytest.mark.asyncio +async def test_nexus_2_remote_mirror_kill_one( + containers, mayastors, nexus_uuid, create_nexus +): + """ + This test does the following steps: + + - creates mayastor instances + - creates pools on mayastor 1 and 2 + - creates replicas on those pools + - creates a nexus on mayastor 3 + - starts fio on a remote VM (vixos1) for 15 secondsj + - kills mayastor 2 after 4 seconds + - assume the test to succeed + - disconnect the VM from mayastor 3 when FIO completes + - removes the nexus from mayastor 3 + - removes the replicas but as mayastor 2 is down, will swallow errors + - removes the pool + + The bulk of this is done by reusing fixtures those fitures are not as + generic as one might like at this point so look/determine if you need them + to begin with. + + By yielding from fixtures, after the tests the function is resumed where + yield is called. + """ + + uri = create_nexus + dev = nvme_connect(uri) + try: + job = Fio("job1", "randwrite", dev).build() + print(job) + + to_kill = containers.get("ms2") + + # create an event loop polling the async processes for completion + await asyncio.gather(run_cmd_async(job), kill_after(to_kill, 4)) + + list = mayastors.get("ms3").nexus_list() + + NEXUS_UUID, size_mb = nexus_uuid + nexus = next(n for n in list if n.uuid == NEXUS_UUID) + + assert nexus.state == pb.NEXUS_DEGRADED + assert nexus.children[1].state == pb.CHILD_FAULTED + + finally: + # disconnect target before we shutdown + nvme_disconnect(uri) + + +@pytest.mark.asyncio +async def test_nexus_2_remote_mirror_kill_one_spdk( + containers, mayastors, nexus_uuid, create_nexus +): + """ + Identical to the previous test except fio uses the SPDK ioengine + """ + + uri = create_nexus + + job = FioSpdk("job1", "randwrite", uri).build() + print(job) + + to_kill = containers.get("ms2") + await asyncio.gather(run_cmd_async(job), kill_after(to_kill, 4)) + + list = mayastors.get("ms3").nexus_list() + + NEXUS_UUID, _ = nexus_uuid + nexus = next(n for n in list if n.uuid == NEXUS_UUID) + + assert nexus.state == pb.NEXUS_DEGRADED + assert nexus.children[1].state == pb.CHILD_FAULTED + + +@pytest.mark.asyncio +async def test_nexus_cntlid(create_nexus_v2, min_cntlid): + """Test create_nexus_v2 NVMe controller ID""" + + uri = create_nexus_v2 + + dev = nvme_connect(uri) + try: + id_ctrl = nvme_id_ctrl(dev) + assert id_ctrl["cntlid"] == min_cntlid + + # Test optional command support + oncs = id_ctrl["oncs"] + assert oncs & 0x04, "should support Dataset Management" + assert oncs & 0x08, "should support Write Zeroes" + + finally: + # disconnect target before we shut down + nvme_disconnect(uri) + + +def test_nexus_resv_key(create_nexus_v2, nexus_name, nexus_uuid, mayastors, resv_key): + """Test create_nexus_v2 replica NVMe reservation key""" + + uri = create_nexus_v2 + NEXUS_UUID, _ = nexus_uuid + + list = mayastors.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == nexus_name) + assert nexus.uuid == NEXUS_UUID + child_uri = nexus.children[0].uri + + dev = nvme_connect(child_uri) + try: + report = nvme_resv_report(dev) + print(report) + + assert ( + report["rtype"] == 5 + ), "should have write exclusive, all registrants reservation" + assert report["regctl"] == 1, "should have 1 registered controller" + assert report["ptpls"] == 0, "should have Persist Through Power Loss State of 0" + assert ( + report["regctlext"][0]["cntlid"] == 0xFFFF + ), "should have dynamic controller ID" + + # reservation status reserved + assert (report["regctlext"][0]["rcsts"] & 0x1) == 1 + assert report["regctlext"][0]["rkey"] == resv_key + + finally: + nvme_disconnect(child_uri) + + +def test_nexus_preempt_key( + create_nexus_v2, + create_nexus_2_v2, + nexus_name, + nexus_uuid, + mayastors, + resv_key_2, +): + """Create a nexus on ms3 and ms0, with the latter preempting the NVMe + reservation key registered by ms3, verify that ms3 is no longer registered. + Verify that writes succeed via the nexus on ms0 but not ms3.""" + + NEXUS_UUID, _ = nexus_uuid + + list = mayastors.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == nexus_name) + assert nexus.uuid == NEXUS_UUID + child_uri = nexus.children[0].uri + assert nexus.state == pb.NEXUS_ONLINE + assert nexus.children[0].state == pb.CHILD_ONLINE + assert nexus.children[1].state == pb.CHILD_ONLINE + + dev = nvme_connect(child_uri) + try: + report = nvme_resv_report(dev) + print(report) + + assert ( + report["rtype"] == 5 + ), "should have write exclusive, all registrants reservation" + assert report["regctl"] == 1, "should have 1 registered controller" + assert report["ptpls"] == 0, "should have Persist Through Power Loss State of 0" + assert ( + report["regctlext"][0]["cntlid"] == 0xFFFF + ), "should have dynamic controller ID" + + # reservation status reserved + assert (report["regctlext"][0]["rcsts"] & 0x1) == 1 + assert report["regctlext"][0]["rkey"] == resv_key_2 + + finally: + nvme_disconnect(child_uri) + + # verify write with nexus on ms0 + uri = create_nexus_2_v2 + dev = nvme_connect(uri) + job = "sudo dd if=/dev/urandom of={0} bs=512 count=1".format(dev) + + try: + run_cmd(job) + + finally: + nvme_disconnect(uri) + + list = mayastors.get("ms0").nexus_list_v2() + nexus = next(n for n in list if n.name == nexus_name) + assert nexus.state == pb.NEXUS_ONLINE + assert nexus.children[0].state == pb.CHILD_ONLINE + assert nexus.children[1].state == pb.CHILD_ONLINE + + # verify write error with nexus on ms3 + uri = create_nexus_v2 + dev = nvme_connect(uri) + job = "sudo dd if=/dev/urandom of={0} bs=512 count=1".format(dev) + + try: + run_cmd(job) + + finally: + nvme_disconnect(uri) + + list = mayastors.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == nexus_name) + assert nexus.state == pb.NEXUS_FAULTED + assert nexus.children[0].state == pb.CHILD_FAULTED + assert nexus.children[1].state == pb.CHILD_FAULTED + + +@pytest.mark.asyncio +async def test_nexus_2_remote_mirror_kill_1( + containers, mayastors, create_nexus, nexus_uuid +): + """Create a nexus on ms3 with replicas on ms1 and ms2. Sleep for 10s. Kill + ms2 after 4s, verify that the second child is degraded. + """ + + uri = create_nexus + NEXUS_UUID, _ = nexus_uuid + + job = "sleep 10" + + try: + # create an event loop polling the async processes for completion + await asyncio.gather( + run_cmd_async(job), + kill_after(containers.get("ms2"), 4), + ) + except Exception as e: + raise (e) + finally: + list = mayastors.get("ms3").nexus_list() + nexus = next(n for n in list if n.uuid == NEXUS_UUID) + + assert nexus.state == pb.NEXUS_DEGRADED + + assert nexus.children[0].state == pb.CHILD_ONLINE + assert nexus.children[1].state == pb.CHILD_FAULTED + + +@pytest.mark.asyncio +async def test_nexus_2_remote_mirror_kill_all_fio( + containers, mayastors, create_nexus, nexus_uuid +): + """Create a nexus on ms3 with replicas on ms1 and ms2. Start fio_spdk for + 15s. Kill ms2 after 4s, ms1 after 4s. Assume the fail with a + ChildProcessError is due to fio bailing out. Remove the nexus from ms3. + """ + + uri = create_nexus + NEXUS_UUID, _ = nexus_uuid + + job = FioSpdk("job1", "randwrite", uri).build() + + try: + # create an event loop polling the async processes for completion + await asyncio.gather( + run_cmd_async(job), + kill_after(containers.get("ms2"), 4), + kill_after(containers.get("ms1"), 4), + ) + except ChildProcessError: + pass + except Exception as e: + # if it's not a child process error fail the test + raise (e) + finally: + list = mayastors.get("ms3").nexus_list() + nexus = next(n for n in list if n.uuid == NEXUS_UUID) + + assert nexus.state == pb.NEXUS_FAULTED + + assert nexus.children[0].state == pb.CHILD_FAULTED + assert nexus.children[1].state == pb.CHILD_FAULTED diff --git a/test/python/tests/nexus/test_null_nexus.py b/test/python/tests/nexus/test_null_nexus.py new file mode 100644 index 000000000..9fadf7503 --- /dev/null +++ b/test/python/tests/nexus/test_null_nexus.py @@ -0,0 +1,94 @@ +from common.command import run_cmd_async +from common.nvme import nvme_connect, nvme_disconnect +from common.fio import Fio +from common.mayastor import containers, mayastors +import pytest +import asyncio +import uuid as guid +import mayastor_pb2 as pb + +NEXUS_COUNT = 15 + + +def check_nexus_state(ms, state=pb.NEXUS_ONLINE): + for nexus in ms.nexus_list(): + assert nexus.state == state + for child in nexus.children: + assert child.state == pb.CHILD_ONLINE + + +@pytest.fixture +def device_nodes(): + yield ["ms0", "ms1", "ms2"] + + +@pytest.fixture +def nexus_node(): + yield "ms3" + + +@pytest.fixture +def create_null_devices(mayastors, device_nodes): + for node in device_nodes: + ms = mayastors.get(node) + for i in range(NEXUS_COUNT): + ms.bdev_create("null:///null{:02d}?blk_size=512&size_mb=100".format(i)) + yield + for node in device_nodes: + ms = mayastors.get(node) + for dev in ms.bdev_list(): + ms.bdev_destroy(dev.uri) + + +@pytest.fixture +def share_null_devices(mayastors, device_nodes, create_null_devices): + for node in device_nodes: + ms = mayastors.get(node) + for dev in ms.bdev_list(): + ms.bdev_share(dev.name) + yield + for node in device_nodes: + ms = mayastors.get(node) + for dev in ms.bdev_list(): + ms.bdev_unshare(dev.name) + + +@pytest.fixture +def create_nexuses(mayastors, device_nodes, nexus_node, share_null_devices): + ms = mayastors.get(nexus_node) + uris = [ + [dev.share_uri for dev in mayastors.get(node).bdev_list()] + for node in device_nodes + ] + for children in zip(*uris): + ms.nexus_create(guid.uuid4(), 60 * 1024 * 1024, list(children)) + yield + for nexus in ms.nexus_list(): + ms.nexus_destroy(nexus.uuid) + + +@pytest.fixture +def publish_nexuses(mayastors, nexus_node, create_nexuses): + nexuses = [] + ms = mayastors.get(nexus_node) + for nexus in ms.nexus_list(): + nexuses.append(ms.nexus_publish(nexus.uuid)) + yield nexuses + for nexus in ms.nexus_list(): + ms.nexus_unpublish(nexus.uuid) + + +@pytest.fixture +def connect_devices(publish_nexuses): + yield [nvme_connect(nexus) for nexus in publish_nexuses] + for nexus in publish_nexuses: + nvme_disconnect(nexus) + + +@pytest.mark.asyncio +async def test_null_nexus(mayastors, nexus_node, connect_devices): + ms = mayastors.get(nexus_node) + check_nexus_state(ms) + + job = Fio("job1", "randwrite", connect_devices).build() + await run_cmd_async(job) diff --git a/test/python/tests/nexus/test_remote_only.py b/test/python/tests/nexus/test_remote_only.py new file mode 100644 index 000000000..010f1ae94 --- /dev/null +++ b/test/python/tests/nexus/test_remote_only.py @@ -0,0 +1,77 @@ +import time + +from common.mayastor import container_mod, mayastor_mod as mayastors +import uuid as guid +import pytest + +VOLUME_COUNT = 1 + + +def ensure_zero_devices(mayastors): + """ + Assert all nodes have no bdevs left. + """ + nodes = ["ms0", "ms1"] + + for node in nodes: + bdevs = mayastors[node].bdev_list() + assert len(bdevs) == 0 + + +def create_publish(node, children): + """ + Create a nexus with the given children. The nexus is created, + shared, wait, and unshared before finally being destroyed + """ + nexus_uuids = list() + for i in range(VOLUME_COUNT): + # Create the nexus + new_uuid = guid.uuid4() + nexus_uuids.append(new_uuid) + nexus = node.nexus_create(new_uuid, 20 * 1024 * 1024, list(children[i])) + + # Publish the nexus + node.nexus_publish(nexus.uuid) + + time.sleep(2) + + for i in range(VOLUME_COUNT): + uuid = nexus_uuids[i] + node.nexus_destroy(str(uuid)) + + +def delete_all_bdevs(node): + for dev in node.bdev_list(): + node.bdev_unshare(dev.name) + node.bdev_destroy(f"malloc:///{dev.name}?size_mb=50") + + +@pytest.mark.parametrize("times", range(10)) +def test_remote_only(mayastors, times): + print("Run ", times) + """ + Test nexus with a remote bdev + """ + remotes = ["ms1"] + local = "ms0" + + children = list(list()) + for i in range(VOLUME_COUNT): + children.append(list()) + + ensure_zero_devices(mayastors) + + device = "malloc:///malloc{index}?size_mb=50" + device_name = "malloc{index}" + for remote in remotes: + for i in range(VOLUME_COUNT): + mayastors[remote].bdev_create(device.format(index=i)) + uri = mayastors[remote].bdev_share(device_name.format(index=i)) + children[i].append(uri) + + create_publish(mayastors[local], children) + + for remote in remotes: + delete_all_bdevs(mayastors[remote]) + + ensure_zero_devices(mayastors) diff --git a/test/python/tests/nexus_multipath/docker-compose.yml b/test/python/tests/nexus_multipath/docker-compose.yml new file mode 100644 index 000000000..e7e216ba6 --- /dev/null +++ b/test/python/tests/nexus_multipath/docker-compose.yml @@ -0,0 +1,118 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms1: + container_name: "ms1" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.3 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.3 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms2: + container_name: "ms2" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.4 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 5,6 -r /tmp/ms2.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.4 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms3: + container_name: "ms3" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.5 + - NVME_KATO_MS=1000 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 0,7 -r /tmp/ms3.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.5 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/nexus_multipath/features/nexus-multipath.feature b/test/python/tests/nexus_multipath/features/nexus-multipath.feature new file mode 100644 index 000000000..6b2c9594e --- /dev/null +++ b/test/python/tests/nexus_multipath/features/nexus-multipath.feature @@ -0,0 +1,11 @@ +Feature: Mayastor nexus multipath management + + Background: + Given a local mayastor instance + And a remote mayastor instance + + Scenario: running IO against an ANA NVMe controller + Given a client connected to two controllers to the same namespace + And both controllers are online + When I start Fio + Then I should be able to see IO flowing to one path only diff --git a/test/python/tests/nexus_multipath/test_bdd_nexus_multipath.py b/test/python/tests/nexus_multipath/test_bdd_nexus_multipath.py new file mode 100644 index 000000000..012071f30 --- /dev/null +++ b/test/python/tests/nexus_multipath/test_bdd_nexus_multipath.py @@ -0,0 +1,255 @@ +import pytest +import logging +from pytest_bdd import given, scenario, then, when, parsers + +from common.command import run_cmd +from common.fio import Fio +from common.mayastor import container_mod, mayastor_mod + +import grpc +import mayastor_pb2 as pb +from common.nvme import ( + nvme_connect, + nvme_disconnect, + nvme_list_subsystems, +) + + +@scenario( + "features/nexus-multipath.feature", "running IO against an ANA NVMe controller" +) +def test_running_io_against_ana_nvme_ctrlr(): + "Running IO against an ANA NVMe controller." + + +@pytest.fixture(scope="module") +def create_nexus( + mayastor_mod, nexus_name, nexus_uuid, create_replica, min_cntlid, resv_key +): + """ Create a nexus on ms3 with 2 replicas """ + hdls = mayastor_mod + replicas = create_replica + replicas = [k.uri for k in replicas] + + NEXUS_UUID, size_mb = nexus_uuid + NEXUS_NAME = nexus_name + + hdls["ms3"].nexus_create_v2( + NEXUS_NAME, + NEXUS_UUID, + size_mb, + min_cntlid, + min_cntlid + 9, + resv_key, + 0, + replicas, + ) + uri = hdls["ms3"].nexus_publish(NEXUS_NAME) + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + assert len(hdls["ms1"].pool_list().pools) == 1 + assert len(hdls["ms2"].pool_list().pools) == 1 + + dev = nvme_connect(uri) + + yield dev + nvme_disconnect(uri) + hdls["ms3"].nexus_destroy(NEXUS_NAME) + + +@pytest.fixture(scope="module") +def create_nexus_2(mayastor_mod, nexus_name, nexus_uuid, min_cntlid_2, resv_key_2): + """ Create a 2nd nexus on ms0 with the same 2 replicas but with resv_key_2 """ + hdls = mayastor_mod + NEXUS_NAME = nexus_name + + replicas = [] + list = mayastor_mod.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == NEXUS_NAME) + replicas.append(nexus.children[0].uri) + replicas.append(nexus.children[1].uri) + + NEXUS_UUID, size_mb = nexus_uuid + + hdls["ms0"].nexus_create_v2( + NEXUS_NAME, + NEXUS_UUID, + size_mb, + min_cntlid_2, + min_cntlid_2 + 9, + resv_key_2, + 0, + replicas, + ) + uri = hdls["ms0"].nexus_publish(NEXUS_NAME) + assert len(hdls["ms0"].bdev_list()) == 1 + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + dev = nvme_connect(uri) + + yield dev + nvme_disconnect(uri) + hdls["ms0"].nexus_destroy(NEXUS_NAME) + + +@pytest.fixture(scope="module") +def pool_config(): + pool = {} + pool["name"] = "tpool" + pool["uri"] = "malloc:///disk0?size_mb=100" + return pool + + +@pytest.fixture(scope="module") +def replica_uuid(): + UUID = "0000000-0000-0000-0000-000000000001" + size_mb = 64 * 1024 * 1024 + return (UUID, size_mb) + + +@pytest.fixture(scope="module") +def nexus_name(): + NEXUS_NAME = "nexus0" + return NEXUS_NAME + + +@pytest.fixture(scope="module") +def nexus_uuid(): + NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" + size_mb = 64 * 1024 * 1024 + return (NEXUS_UUID, size_mb) + + +@pytest.fixture(scope="module") +def min_cntlid(): + """NVMe minimum controller ID.""" + min_cntlid = 50 + return min_cntlid + + +@pytest.fixture(scope="module") +def min_cntlid_2(): + """NVMe minimum controller ID for 2nd nexus.""" + min_cntlid = 60 + return min_cntlid + + +@pytest.fixture(scope="module") +def resv_key(): + """NVMe reservation key.""" + resv_key = 0xABCDEF0012345678 + return resv_key + + +@pytest.fixture(scope="module") +def resv_key_2(): + """NVMe reservation key for 2nd nexus.""" + resv_key = 0x1234567890ABCDEF + return resv_key + + +@pytest.fixture(scope="module") +def create_pools(mayastor_mod, pool_config): + hdls = mayastor_mod + + cfg = pool_config + pools = [] + + pools.append(hdls["ms1"].pool_create(cfg.get("name"), cfg.get("uri"))) + + pools.append(hdls["ms2"].pool_create(cfg.get("name"), cfg.get("uri"))) + + for p in pools: + assert p.state == pb.POOL_ONLINE + yield pools + try: + hdls["ms1"].pool_destroy(cfg.get("name")) + hdls["ms2"].pool_destroy(cfg.get("name")) + except Exception: + pass + + +@pytest.fixture(scope="module") +def create_replica(mayastor_mod, replica_uuid, create_pools): + hdls = mayastor_mod + pools = create_pools + replicas = [] + + UUID, size_mb = replica_uuid + + replicas.append(hdls["ms1"].replica_create(pools[0].name, UUID, size_mb)) + replicas.append(hdls["ms2"].replica_create(pools[0].name, UUID, size_mb)) + + yield replicas + try: + hdls["ms1"].replica_destroy(UUID) + hdls["ms2"].replica_destroy(UUID) + except Exception as e: + logging.debug(e) + + +@given( + "a client connected to two controllers to the same namespace", + target_fixture="get_nvme_client", +) +def get_nvme_client(create_nexus, create_nexus_2): + dev = create_nexus + dev2 = create_nexus_2 + return dev, dev2 + + +@given("both controllers are online") +def check_controllers_online(get_nvme_client): + devs = get_nvme_client + assert devs[0] == devs[1], "should have one namespace" + desc = nvme_list_subsystems(devs[0]) + paths = desc["Subsystems"][0]["Paths"] + assert len(paths) == 2, "should have 2 paths" + + for p in paths: + assert p["State"] == "live" + + +@when("I start Fio") +def start_io(get_nvme_client): + devs = get_nvme_client + job = Fio("job1", "randwrite", devs[0]).build() + run_cmd(job) + + +@then("I should be able to see IO flowing to one path only") +def check_io_one_path(mayastor_mod): + hdls = mayastor_mod + + # default NUMA io_policy has all IO going to the first controller + stat = hdls["ms3"].stat_nvme_controllers() + assert stat[0].stats.num_write_ops > 1000 + assert stat[1].stats.num_write_ops > 1000 + + stat = hdls["ms0"].stat_nvme_controllers() + assert stat[0].stats.num_write_ops == 0 + assert stat[1].stats.num_write_ops == 0 + + +@given("a local mayastor instance") +def local_mayastor_instance(nexus_instance): + pass + + +@given("a remote mayastor instance") +def remote_mayastor_instance(remote_instance): + pass + + +@pytest.fixture(scope="module") +def remote_instance(): + yield "ms0" + + +@pytest.fixture(scope="module") +def nexus_instance(): + yield "ms1" diff --git a/test/python/tests/nexus_multipath/test_nexus_multipath.py b/test/python/tests/nexus_multipath/test_nexus_multipath.py new file mode 100644 index 000000000..927429f17 --- /dev/null +++ b/test/python/tests/nexus_multipath/test_nexus_multipath.py @@ -0,0 +1,481 @@ +from common.volume import Volume +from common.mayastor import container_mod, mayastor_mod +from common.hdl import MayastorHandle +import logging +import pytest +import subprocess +import time +import uuid as guid +import mayastor_pb2 as pb +from common.fio import Fio +from common.nvme import ( + nvme_connect, + nvme_disconnect, + nvme_list_subsystems, + nvme_resv_report, +) + + +@pytest.fixture +def create_nexus_no_destroy( + mayastor_mod, nexus_name, nexus_uuid, create_replica, min_cntlid, resv_key +): + """ Create a nexus on ms3 with 2 replicas """ + hdls = mayastor_mod + replicas = create_replica + replicas = [k.uri for k in replicas] + + NEXUS_UUID, size_mb = nexus_uuid + NEXUS_NAME = nexus_name + + hdls["ms3"].nexus_create_v2( + NEXUS_NAME, + NEXUS_UUID, + size_mb, + min_cntlid, + min_cntlid + 9, + resv_key, + 0, + replicas, + ) + uri = hdls["ms3"].nexus_publish(NEXUS_NAME) + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + assert len(hdls["ms1"].pool_list().pools) == 1 + assert len(hdls["ms2"].pool_list().pools) == 1 + + return uri + + +@pytest.fixture +def create_nexus(create_nexus_no_destroy, mayastor_mod, nexus_name): + hdls = mayastor_mod + NEXUS_NAME = nexus_name + uri = create_nexus_no_destroy + yield uri + hdls["ms3"].nexus_destroy(NEXUS_NAME) + + +@pytest.fixture +def create_nexus_2_no_destroy( + mayastor_mod, nexus_name, nexus_uuid, min_cntlid_2, resv_key_2 +): + """ Create a 2nd nexus on ms0 with the same 2 replicas but with resv_key_2 """ + hdls = mayastor_mod + NEXUS_NAME = nexus_name + + replicas = [] + list = mayastor_mod.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == NEXUS_NAME) + replicas.append(nexus.children[0].uri) + replicas.append(nexus.children[1].uri) + + NEXUS_UUID, size_mb = nexus_uuid + + hdls["ms0"].nexus_create_v2( + NEXUS_NAME, + NEXUS_UUID, + size_mb, + min_cntlid_2, + min_cntlid_2 + 9, + resv_key_2, + 0, + replicas, + ) + uri = hdls["ms0"].nexus_publish(NEXUS_NAME) + assert len(hdls["ms0"].bdev_list()) == 1 + assert len(hdls["ms1"].bdev_list()) == 2 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + return uri + + +@pytest.fixture +def create_nexus_2(create_nexus_2_no_destroy, mayastor_mod, nexus_name): + hdls = mayastor_mod + NEXUS_NAME = nexus_name + uri = create_nexus_2_no_destroy + yield uri + hdls["ms0"].nexus_destroy(NEXUS_NAME) + + +@pytest.fixture +def create_nexus_dev(create_nexus, connect_nexus): + uri = create_nexus + dev = connect_nexus + yield dev + nvme_disconnect(uri) + + +@pytest.fixture +def create_nexus_2_dev(create_nexus_2, connect_nexus_2): + uri = create_nexus_2 + dev = connect_nexus_2 + yield dev + nvme_disconnect(uri) + + +@pytest.fixture +def connect_nexus(create_nexus_no_destroy): + uri = create_nexus_no_destroy + dev = nvme_connect(uri) + return dev + + +@pytest.fixture +def connect_nexus_2(create_nexus_2_no_destroy): + uri = create_nexus_2_no_destroy + dev = nvme_connect(uri) + return dev + + +@pytest.fixture +def unpublish_nexus(mayastor_mod, nexus_name): + hdls = mayastor_mod + NEXUS_NAME = nexus_name + hdls["ms3"].nexus_unpublish(NEXUS_NAME) + + +@pytest.fixture +def destroy_nexus_2(mayastor_mod, nexus_name): + hdls = mayastor_mod + NEXUS_NAME = nexus_name + hdls["ms0"].nexus_destroy(NEXUS_NAME) + + +@pytest.fixture +def publish_nexus(mayastor_mod, nexus_name): + hdls = mayastor_mod + NEXUS_NAME = nexus_name + hdls["ms3"].nexus_publish(NEXUS_NAME) + + +@pytest.fixture +def create_nexus_3_dev( + mayastor_mod, nexus_name, nexus_uuid, replica_uuid, min_cntlid_3, resv_key_3 +): + """ Create a 3rd nexus on ms1 with the same 2 replicas but with resv_key_3 """ + hdls = mayastor_mod + NEXUS_NAME = nexus_name + + replicas = [] + list = mayastor_mod.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == NEXUS_NAME) + # use loopback until nvme initiator can connect to target in same instance + REP_UUID, rep_size_mb = replica_uuid + replicas.append("loopback:///" + REP_UUID) + replicas.append(nexus.children[1].uri) + + NEXUS_UUID, size_mb = nexus_uuid + + hdls["ms1"].nexus_create_v2( + NEXUS_NAME, + NEXUS_UUID, + size_mb, + min_cntlid_3, + min_cntlid_3 + 9, + resv_key_3, + 0, + replicas, + ) + uri = hdls["ms1"].nexus_publish(NEXUS_NAME) + assert len(hdls["ms0"].bdev_list()) == 1 + assert len(hdls["ms1"].bdev_list()) == 3 + assert len(hdls["ms2"].bdev_list()) == 2 + assert len(hdls["ms3"].bdev_list()) == 1 + + dev = nvme_connect(uri) + + yield dev + nvme_disconnect(uri) + hdls["ms1"].nexus_destroy(NEXUS_NAME) + + +@pytest.fixture +def pool_config(): + """ + The idea is this used to obtain the pool types and names that should be + created. + """ + pool = {} + pool["name"] = "tpool" + pool["uri"] = "malloc:///disk0?size_mb=100" + return pool + + +@pytest.fixture +def replica_uuid(): + """Replica UUID to be used.""" + UUID = "0000000-0000-0000-0000-000000000001" + size_mb = 64 * 1024 * 1024 + return (UUID, size_mb) + + +@pytest.fixture +def nexus_name(): + """Nexus name to be used.""" + NEXUS_NAME = "nexus0" + return NEXUS_NAME + + +@pytest.fixture +def nexus_uuid(): + """Nexus UUID to be used.""" + NEXUS_UUID = "3ae73410-6136-4430-a7b5-cbec9fe2d273" + size_mb = 64 * 1024 * 1024 + return (NEXUS_UUID, size_mb) + + +@pytest.fixture +def min_cntlid(): + """NVMe minimum controller ID to be used.""" + min_cntlid = 50 + return min_cntlid + + +@pytest.fixture +def min_cntlid_2(): + """NVMe minimum controller ID to be used for 2nd nexus.""" + min_cntlid = 60 + return min_cntlid + + +@pytest.fixture +def min_cntlid_3(): + """NVMe minimum controller ID for 3rd nexus.""" + min_cntlid = 70 + return min_cntlid + + +@pytest.fixture +def resv_key(): + """NVMe reservation key to be used.""" + resv_key = 0xABCDEF0012345678 + return resv_key + + +@pytest.fixture +def resv_key_2(): + """NVMe reservation key to be used for 2nd nexus.""" + resv_key = 0x1234567890ABCDEF + return resv_key + + +@pytest.fixture +def resv_key_3(): + """NVMe reservation key for 3rd nexus.""" + resv_key = 0x567890ABCDEF1234 + return resv_key + + +@pytest.fixture +def create_pools(mayastor_mod, pool_config): + hdls = mayastor_mod + + cfg = pool_config + pools = [] + + pools.append(hdls["ms1"].pool_create(cfg.get("name"), cfg.get("uri"))) + + pools.append(hdls["ms2"].pool_create(cfg.get("name"), cfg.get("uri"))) + + for p in pools: + assert p.state == pb.POOL_ONLINE + yield pools + try: + hdls["ms1"].pool_destroy(cfg.get("name")) + hdls["ms2"].pool_destroy(cfg.get("name")) + except Exception: + pass + + +@pytest.fixture +def create_replica(mayastor_mod, replica_uuid, create_pools): + hdls = mayastor_mod + pools = create_pools + replicas = [] + + UUID, size_mb = replica_uuid + + replicas.append(hdls["ms1"].replica_create(pools[0].name, UUID, size_mb)) + replicas.append(hdls["ms2"].replica_create(pools[0].name, UUID, size_mb)) + + yield replicas + try: + hdls["ms1"].replica_destroy(UUID) + hdls["ms2"].replica_destroy(UUID) + except Exception as e: + logging.debug(e) + + +@pytest.fixture +def start_fio(create_nexus_dev): + dev = create_nexus_dev + cmd = Fio("job1", "randwrite", dev).build().split() + output = subprocess.Popen(cmd) + # wait for fio to start + time.sleep(1) + yield + output.communicate() + assert output.returncode == 0 + + +@pytest.fixture +def delay(): + """Wait for kernel to notice change to path state""" + time.sleep(2) + + +@pytest.fixture +def delay2(): + """Wait for kernel to notice change to path state""" + time.sleep(2) + + +@pytest.fixture +def verify_paths(connect_nexus): + dev = connect_nexus + desc = nvme_list_subsystems(dev) + paths = desc["Subsystems"][0]["Paths"] + assert len(paths) == 2, "should have 2 paths" + assert paths[0]["State"] == "connecting" + assert paths[1]["State"] == "live" + + +@pytest.mark.timeout(60) +def test_nexus_multipath( + create_nexus, + create_nexus_2, + nexus_name, + nexus_uuid, + mayastor_mod, + resv_key, + resv_key_2, +): + """Create 2 nexuses, each with 2 replicas, with different NVMe reservation keys""" + + uri = create_nexus + uri2 = create_nexus_2 + NEXUS_UUID, _ = nexus_uuid + NEXUS_NAME = nexus_name + resv_key = resv_key + resv_key_2 = resv_key_2 + + list = mayastor_mod.get("ms3").nexus_list_v2() + nexus = next(n for n in list if n.name == NEXUS_NAME) + assert nexus.uuid == NEXUS_UUID + + for c in range(2): + child_uri = nexus.children[c].uri + + dev = nvme_connect(child_uri) + report = nvme_resv_report(dev) + + assert ( + report["rtype"] == 5 + ), "should have write exclusive, all registrants reservation" + assert report["regctl"] == 2, "should have 2 registered controllers" + assert report["ptpls"] == 0, "should have Persist Through Power Loss State of 0" + for i in range(2): + assert ( + report["regctlext"][i]["cntlid"] == 0xFFFF + ), "should have dynamic controller ID" + assert report["regctlext"][0]["rkey"] == resv_key + assert report["regctlext"][1]["rkey"] == resv_key_2 + assert (report["regctlext"][0]["rcsts"] & 0x1) == 1 + assert (report["regctlext"][1]["rcsts"] & 0x1) == 0 + + nvme_disconnect(child_uri) + + +@pytest.mark.timeout(60) +def test_nexus_multipath_add_3rd_path( + create_nexus_dev, + create_nexus_2_dev, + start_fio, + create_nexus_3_dev, +): + """Create 2 nexuses, connect over NVMe, start fio, create and connect a 3rd nexus.""" + + dev = create_nexus_dev + dev2 = create_nexus_2_dev + start_fio + dev3 = create_nexus_3_dev + assert dev == dev2, "should have one namespace" + assert dev == dev3, "should have one namespace" + + desc = nvme_list_subsystems(dev) + paths = desc["Subsystems"][0]["Paths"] + assert len(paths) == 3, "should have 3 paths" + + # wait for fio to complete + time.sleep(15) + + +@pytest.mark.timeout(60) +def test_nexus_multipath_remove_3rd_path( + create_nexus_dev, + create_nexus_2_no_destroy, + connect_nexus_2, + create_nexus_3_dev, + start_fio, + destroy_nexus_2, +): + """Create 3 nexuses, connect over NVMe, start fio, destroy 2nd nexus.""" + + dev = create_nexus_dev + dev2 = connect_nexus_2 + dev3 = create_nexus_3_dev + assert dev == dev2, "should have one namespace" + assert dev == dev3, "should have one namespace" + + desc = nvme_list_subsystems(dev) + paths = desc["Subsystems"][0]["Paths"] + assert len(paths) == 3, "should have 3 paths" + + assert paths[0]["State"] == "live" + # kernel 5.4 reports resetting, 5.10 reports connecting + assert paths[1]["State"] == "resetting" or paths[1]["State"] == "connecting" + assert paths[2]["State"] == "live" + + # wait for fio to complete + time.sleep(15) + + +@pytest.mark.timeout(60) +def test_nexus_multipath_remove_all_paths( + create_nexus_no_destroy, + create_nexus_2_no_destroy, + connect_nexus, + connect_nexus_2, + start_fio, + unpublish_nexus, + delay, + verify_paths, + destroy_nexus_2, + delay2, + publish_nexus, +): + """Create 2 nexuses, connect over NVMe, start fio, unpublish one nexus, + verify failover, destroy the other nexus, re-publish the first nexus, + verify IO restarts.""" + + dev = connect_nexus + dev2 = connect_nexus_2 + assert dev == dev2, "should have one namespace" + + # wait for reconnection + time.sleep(10) + + desc = nvme_list_subsystems(dev) + paths = desc["Subsystems"][0]["Paths"] + assert len(paths) == 2, "should have 2 paths" + + assert paths[0]["State"] == "live" + assert paths[1]["State"] == "connecting" + + # wait for fio to complete + time.sleep(5) diff --git a/test/python/tests/publish/docker-compose.yml b/test/python/tests/publish/docker-compose.yml new file mode 100644 index 000000000..f4b2a7ccf --- /dev/null +++ b/test/python/tests/publish/docker-compose.yml @@ -0,0 +1,65 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp + ms1: + container_name: "ms1" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.3 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.3 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/publish/features/nexus.feature b/test/python/tests/publish/features/nexus.feature new file mode 100644 index 000000000..0cda4a253 --- /dev/null +++ b/test/python/tests/publish/features/nexus.feature @@ -0,0 +1,97 @@ +Feature: Mayastor nexus management + + Background: + Given a local mayastor instance + And a remote mayastor instance + + Scenario: creating a nexus + Given a list of child devices + When creating a nexus + Then the nexus should be created + + Scenario: creating the same nexus + Given a nexus + When creating an identical nexus + Then the operation should succeed + + Scenario: creating a different nexus with the same children + Given a nexus + When attempting to create a new nexus + Then the operation should fail with existing URIs in use + + Scenario: creating a nexus without children + When attempting to create a nexus with no children + Then the operation should fail + + Scenario: creating a nexus with missing children + When attempting to create a nexus with a child URI that does not exist + Then the operation should fail + + Scenario: creating a nexus from children with mixed block sizes + When attempting to create a nexus from child devices with mixed block sizes + Then the nexus should not be created + + Scenario: creating a nexus that is larger than its children + Given a list of child devices + When attempting to create a nexus with a size larger than that of any of its children + Then the nexus should not be created + + Scenario: destroying a nexus + Given a nexus + When destroying the nexus + Then the nexus should be destroyed + + Scenario: destroying a nexus without first unpublishing + Given a nexus published via "nvmf" + When destroying the nexus without unpublishing it + Then the nexus should be destroyed + + Scenario: destroying a nexus that does not exist + When destroying a nexus that does not exist + Then the operation should succeed + + Scenario: listing nexuses + Given a nexus + When listing all nexuses + Then the nexus should appear in the output list + + Scenario: removing a child from a nexus + Given a nexus + When removing a child + Then the child should be successfully removed + + Scenario: adding a child to a nexus + Given a nexus + When removing a child + And adding the removed child + Then the child should be successfully added + + Scenario: publishing a nexus + Given a nexus + When publishing a nexus via "nvmf" + Then the nexus should be successfully published + + Scenario: unpublishing a nexus + Given a nexus published via "nvmf" + When unpublishing the nexus + Then the nexus should be successfully unpublished + + Scenario: unpublishing a nexus that is not published + Given an unpublished nexus + When unpublishing the nexus + Then the request should succeed + + Scenario: republishing a nexus with the same protocol + Given a nexus published via "nvmf" + When publishing the nexus with the same protocol + Then the nexus should be successfully published + + Scenario: republishing a nexus with a different protocol + Given a nexus published via "nvmf" + When attempting to publish the nexus with a different protocol + Then the request should fail + + Scenario: publishing a nexus with a crypto-key + Given an unpublished nexus + When publishing the nexus using a crypto-key + Then the request should succeed diff --git a/test/python/tests/publish/test_bdd_nexus.py b/test/python/tests/publish/test_bdd_nexus.py new file mode 100644 index 000000000..3384ccb35 --- /dev/null +++ b/test/python/tests/publish/test_bdd_nexus.py @@ -0,0 +1,493 @@ +import pytest +from pytest_bdd import given, scenario, then, when, parsers + +from collections import namedtuple +import subprocess + +from common.mayastor import container_mod, mayastor_mod +from common.volume import Volume + +import grpc +import mayastor_pb2 as pb + +BaseBdev = namedtuple("BaseBdev", "name uri") + +LocalFile = namedtuple("LocalFile", "path uri") + + +def megabytes(n): + return n * 1024 * 1024 + + +def get_child_uris(nexus): + return [child.uri for child in nexus.children] + + +def share_type(protocol): + TYPES = { + "nbd": pb.ShareProtocolNexus.NEXUS_NBD, + "nvmf": pb.ShareProtocolNexus.NEXUS_NVMF, + "iscsi": pb.ShareProtocolNexus.NEXUS_ISCSI, + } + return TYPES[protocol] + + +@scenario("features/nexus.feature", "creating a nexus") +def test_creating_a_nexus(): + "Creating a nexus." + + +@scenario("features/nexus.feature", "creating the same nexus") +def test_creating_the_same_nexus(): + "Creating the same nexus." + + +@scenario("features/nexus.feature", "creating a different nexus with the same children") +def test_fail_creating_a_different_nexus_with_the_same_children(): + "Creating a different nexus with the same children." + + +@scenario("features/nexus.feature", "creating a nexus without children") +def test_fail_creating_a_nexus_without_children(): + "Creating a nexus without children." + + +@scenario("features/nexus.feature", "creating a nexus with missing children") +def test_fail_creating_a_nexus_with_missing_children(): + "Creating a nexus with missing children." + + +@scenario( + "features/nexus.feature", "creating a nexus from children with mixed block sizes" +) +def test_fail_creating_a_nexus_from_children_with_mixed_block_sizes(): + "Creating a nexus from children with mixed block sizes." + + +@scenario("features/nexus.feature", "creating a nexus that is larger than its children") +def test_fail_creating_a_nexus_larger_than_its_children(): + "Creating a nexus that is larger than its children." + + +@scenario("features/nexus.feature", "destroying a nexus") +def test_destroying_a_nexus(): + "Destroying a nexus." + + +@scenario("features/nexus.feature", "destroying a nexus without first unpublishing") +def test_destroying_a_nexus_without_first_unpublishing(): + "Destroying a nexus without first unpublishing." + + +@scenario("features/nexus.feature", "destroying a nexus that does not exist") +def test_destroying_a_nexus_that_does_not_exist(): + "Destroying a nexus that does not exist." + + +@scenario("features/nexus.feature", "listing nexuses") +def test_listing_nexuses(): + "Listing nexuses." + + +@scenario("features/nexus.feature", "removing a child from a nexus") +def test_removing_a_child_from_a_nexus(): + "Removing a child from a nexus." + + +@scenario("features/nexus.feature", "adding a child to a nexus") +def test_adding_a_child_to_a_nexus(): + "Adding a child to a nexus." + + +@scenario("features/nexus.feature", "publishing a nexus") +def test_publishing_a_nexus(): + "Publishing a nexus." + + +@scenario("features/nexus.feature", "unpublishing a nexus") +def test_unpublishing_a_nexus(): + "Unpublishing a nexus." + + +@scenario("features/nexus.feature", "unpublishing a nexus that is not published") +def test_unpublishing_a_nexus_that_is_not_published(): + "Unpublishing a nexus that is not published." + + +@scenario("features/nexus.feature", "republishing a nexus with a different protocol") +def test_fail_republishing_a_nexus_with_a_different_protocol(): + "Republishing a nexus with a different protocol." + + +@scenario("features/nexus.feature", "republishing a nexus with the same protocol") +def test_republishing_a_nexus_with_the_same_protocol(): + "Republishing a nexus with the same protocol." + + +@scenario("features/nexus.feature", "publishing a nexus with a crypto-key") +def test_publishing_a_nexus_with_a_cryptokey(): + "Publishing a nexus with a crypto-key." + + +@pytest.fixture(scope="module") +def remote_instance(): + yield "ms0" + + +@pytest.fixture(scope="module") +def nexus_instance(): + yield "ms1" + + +@pytest.fixture(scope="module") +def base_instances(remote_instance, nexus_instance): + yield [remote_instance, nexus_instance] + + +@pytest.fixture(scope="module") +def nexus_uuid(): + yield "86050f0e-6914-4e9c-92b9-1237fd6d17a6" + + +@pytest.fixture(scope="module") +def local_bdev_uri(): + yield "bdev:///malloc0" + + +@pytest.fixture(scope="module") +def base_bdevs(mayastor_mod, base_instances): + devices = {} + for instance in base_instances: + uri = "malloc:///malloc0?size_mb=64&blk_size=4096" + name = mayastor_mod[instance].bdev.Create(pb.BdevUri(uri=uri)).name + devices[instance] = BaseBdev(name, uri) + yield devices + for instance, bdev in devices.items(): + mayastor_mod[instance].bdev.Destroy(pb.BdevUri(uri=bdev.uri)) + + +@pytest.fixture(scope="module") +def shared_remote_bdev_uri(mayastor_mod, base_bdevs, remote_instance): + name = base_bdevs[remote_instance].name + uri = ( + mayastor_mod[remote_instance] + .bdev.Share(pb.BdevShareRequest(name=name, proto="nvmf")) + .uri + ) + yield uri + mayastor_mod[remote_instance].bdev.Unshare(pb.BdevShareRequest(name=name)) + + +@pytest.fixture(scope="module") +def file_types(): + yield ["aio", "uring"] + + +@pytest.fixture(scope="module") +def local_files(file_types): + files = {} + for type in file_types: + path = f"/tmp/{type}-file.img" + uri = f"{type}://{path}?blk_size=4096" + subprocess.run( + ["sudo", "sh", "-c", f"rm -f '{path}' && truncate -s 64M '{path}'"], + check=True, + ) + files[type] = LocalFile(path, uri) + yield files + for path in [file.path for file in files.values()]: + subprocess.run(["sudo", "sh", "-c", f"rm -f '{path}'"], check=True) + + +@pytest.fixture(scope="module") +def find_nexus(mayastor_mod, nexus_instance): + def find(uuid): + for nexus in mayastor_mod[nexus_instance].ms.ListNexus(pb.Null()).nexus_list: + if nexus.uuid == uuid: + return nexus + return None + + yield find + + +@pytest.fixture +def local_bdev_with_512_blocksize(mayastor_mod, nexus_instance): + uri = "malloc:///malloc1?size_mb=64&blk_size=512" + mayastor_mod[nexus_instance].bdev.Create(pb.BdevUri(uri=uri)) + yield uri + mayastor_mod[nexus_instance].bdev.Destroy(pb.BdevUri(uri=uri)) + + +@pytest.fixture +def nexus_children(local_bdev_uri, shared_remote_bdev_uri, local_files): + return [local_bdev_uri, shared_remote_bdev_uri] + [ + file.uri for file in local_files.values() + ] + + +@pytest.fixture +def created_nexuses(mayastor_mod, nexus_instance): + nexuses = {} + yield nexuses + for uuid in nexuses.keys(): + mayastor_mod[nexus_instance].ms.DestroyNexus(pb.DestroyNexusRequest(uuid=uuid)) + + +@pytest.fixture +def create_nexus(mayastor_mod, nexus_instance, created_nexuses): + def create(uuid, size, children): + nexus = mayastor_mod[nexus_instance].ms.CreateNexus( + pb.CreateNexusRequest(uuid=uuid, size=size, children=children) + ) + created_nexuses[uuid] = nexus + return nexus + + yield create + + +@given("a list of child devices") +def get_child_devices(nexus_children): + pass + + +@given("a local mayastor instance") +def local_mayastor_instance(nexus_instance): + pass + + +@given("a nexus") +@given("an unpublished nexus") +def get_nexus(create_nexus, nexus_uuid, nexus_children): + create_nexus(nexus_uuid, megabytes(64), nexus_children) + + +@given( + parsers.parse('a nexus published via "{protocol}"'), + target_fixture="get_published_nexus", +) +def get_published_nexus( + create_nexus, + mayastor_mod, + nexus_instance, + nexus_uuid, + nexus_children, + find_nexus, + protocol, +): + create_nexus(nexus_uuid, megabytes(64), nexus_children) + mayastor_mod[nexus_instance].ms.PublishNexus( + pb.PublishNexusRequest(uuid=nexus_uuid, key="", share=share_type(protocol)) + ) + nexus = find_nexus(nexus_uuid) + assert nexus.device_uri + + +@given("a remote mayastor instance") +def remote_mayastor_instance(remote_instance): + pass + + +@when("creating a nexus") +@when("creating an identical nexus") +def creating_a_nexus(create_nexus, nexus_uuid, nexus_children): + create_nexus(nexus_uuid, megabytes(64), nexus_children) + + +@when("attempting to create a new nexus") +def attempt_to_create_new_nexus(create_nexus, nexus_children): + with pytest.raises(grpc.RpcError) as error: + create_nexus( + "ed378e05-d704-4e7a-a5bc-7d2344b3fb83", megabytes(64), nexus_children + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("attempting to create a nexus with no children") +def attempt_to_create_nexus_with_no_children(create_nexus, nexus_uuid): + with pytest.raises(grpc.RpcError) as error: + create_nexus(nexus_uuid, megabytes(64), []) + assert error.value.code() == grpc.StatusCode.INTERNAL + + +@when("attempting to create a nexus with a child URI that does not exist") +def attempt_to_create_nexus_with_child_uri_that_does_not_exist( + create_nexus, nexus_uuid, nexus_children +): + with pytest.raises(grpc.RpcError) as error: + create_nexus( + nexus_uuid, + megabytes(64), + nexus_children + ["nvmf://10.0.0.2:8420/nqn.2019-05.io.openebs:missing"], + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("attempting to create a nexus from child devices with mixed block sizes") +def attempt_to_create_nexus_from_child_devices_with_mixed_block_sizes( + create_nexus, nexus_uuid, nexus_children, local_bdev_with_512_blocksize +): + with pytest.raises(grpc.RpcError) as error: + create_nexus(nexus_uuid, megabytes(64), ["bdev:///malloc0", "bdev:///malloc1"]) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when( + "attempting to create a nexus with a size larger than that of any of its children" +) +def attempt_to_create_nexus_with_size_larger_than_size_of_children( + create_nexus, nexus_uuid, nexus_children +): + with pytest.raises(grpc.RpcError) as error: + create_nexus(nexus_uuid, megabytes(128), nexus_children) + assert error.value.code() == grpc.StatusCode.INTERNAL + + +@when("destroying the nexus") +@when("destroying the nexus without unpublishing it") +def destroying_the_nexus(mayastor_mod, nexus_instance, nexus_uuid, created_nexuses): + mayastor_mod[nexus_instance].ms.DestroyNexus( + pb.DestroyNexusRequest(uuid=nexus_uuid) + ) + del created_nexuses[nexus_uuid] + + +@when("destroying a nexus that does not exist") +def destroying_a_nexus_that_does_not_exist(mayastor_mod, nexus_instance): + mayastor_mod[nexus_instance].ms.DestroyNexus( + pb.DestroyNexusRequest(uuid="e6629036-1376-494d-bbc2-0b6345ab10df") + ) + + +@when("listing all nexuses", target_fixture="list_nexuses") +def list_nexuses(mayastor_mod, nexus_instance): + return mayastor_mod[nexus_instance].ms.ListNexus(pb.Null()).nexus_list + + +@when( + parsers.parse('publishing a nexus via "{protocol}"'), target_fixture="publish_nexus" +) +def publish_nexus(mayastor_mod, nexus_instance, nexus_uuid, protocol): + mayastor_mod[nexus_instance].ms.PublishNexus( + pb.PublishNexusRequest(uuid=nexus_uuid, key="", share=share_type(protocol)) + ) + + +@when("publishing the nexus with the same protocol") +def publishing_the_nexus_with_the_same_protocol( + find_nexus, mayastor_mod, nexus_instance, nexus_uuid +): + nexus = find_nexus(nexus_uuid) + mayastor_mod[nexus_instance].ms.PublishNexus( + pb.PublishNexusRequest( + uuid=nexus_uuid, key="", share=pb.ShareProtocolNexus.NEXUS_NVMF + ) + ) + + +@when("attempting to publish the nexus with a different protocol") +def attempt_to_publish_nexus_with_different_protocol( + find_nexus, mayastor_mod, nexus_instance, nexus_uuid +): + nexus = find_nexus(nexus_uuid) + with pytest.raises(grpc.RpcError) as error: + mayastor_mod[nexus_instance].ms.PublishNexus( + pb.PublishNexusRequest( + uuid=nexus_uuid, key="", share=pb.ShareProtocolNexus.NEXUS_NBD + ) + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("publishing the nexus using a crypto-key") +def publish_nexus_with_cryptokey(mayastor_mod, nexus_instance, nexus_uuid): + mayastor_mod[nexus_instance].ms.PublishNexus( + pb.PublishNexusRequest( + uuid=nexus_uuid, key="0123456789123456", share=share_type("nvmf") + ) + ) + + +@when("unpublishing the nexus") +def unpublish_nexus(mayastor_mod, nexus_instance, nexus_uuid): + mayastor_mod[nexus_instance].ms.UnpublishNexus( + pb.UnpublishNexusRequest(uuid=nexus_uuid) + ) + + +@when("removing a child") +def remove_child(mayastor_mod, nexus_instance, nexus_uuid, nexus_children): + mayastor_mod[nexus_instance].ms.RemoveChildNexus( + pb.RemoveChildNexusRequest(uuid=nexus_uuid, uri=nexus_children[0]) + ) + + +@when("adding the removed child") +def add_child(mayastor_mod, nexus_instance, nexus_uuid, nexus_children): + child = mayastor_mod[nexus_instance].ms.AddChildNexus( + pb.AddChildNexusRequest(uuid=nexus_uuid, uri=nexus_children[0]) + ) + assert child.state == pb.ChildState.CHILD_DEGRADED + + +@then("the child should be successfully removed") +def the_child_should_be_successfully_removed(find_nexus, nexus_uuid, nexus_children): + nexus = find_nexus(nexus_uuid) + assert len(nexus.children) + 1 == len(nexus_children) + assert nexus_children[0] not in get_child_uris(nexus) + + +@then("the child should be successfully added") +def the_child_should_be_successfully_added(find_nexus, nexus_uuid, nexus_children): + nexus = find_nexus(nexus_uuid) + assert sorted(get_child_uris(nexus)) == sorted(nexus_children) + assert nexus.state == pb.NexusState.NEXUS_DEGRADED + + +@then("the nexus should appear in the output list") +def nexus_should_appear_in_output(nexus_uuid, list_nexuses): + assert nexus_uuid in [nexus.uuid for nexus in list_nexuses] + + +@then("the nexus should be created") +def nexus_should_be_created(find_nexus, nexus_uuid, nexus_children): + nexus = find_nexus(nexus_uuid) + assert nexus != None + assert sorted(get_child_uris(nexus)) == sorted(nexus_children) + assert nexus.state == pb.NexusState.NEXUS_ONLINE + for child in nexus.children: + assert child.state == pb.ChildState.CHILD_ONLINE + + +@then("the nexus should not be created") +def nexus_should_not_be_created(find_nexus, nexus_uuid): + assert find_nexus(nexus_uuid) == None + + +@then("the nexus should be destroyed") +def nexus_should_be_destroyed(find_nexus, nexus_uuid): + assert find_nexus(nexus_uuid) == None + + +@then("the nexus should be successfully published") +def nexus_successfully_published(find_nexus, nexus_uuid): + nexus = find_nexus(nexus_uuid) + assert nexus.device_uri + + +@then("the nexus should be successfully unpublished") +def nexus_successfully_unpublished(find_nexus, nexus_uuid): + nexus = find_nexus(nexus_uuid) + assert not nexus.device_uri + + +@then("the operation should fail with existing URIs in use") +@then("the operation should fail") +@then("the request should fail") +def operation_should_fail(): + pass + + +@then("the operation should succeed") +@then("the request should succeed") +def operation_should_succeed(): + pass diff --git a/test/python/tests/publish/test_nexus_publish.py b/test/python/tests/publish/test_nexus_publish.py new file mode 100644 index 000000000..73b1956d7 --- /dev/null +++ b/test/python/tests/publish/test_nexus_publish.py @@ -0,0 +1,212 @@ +import pytest + +from collections import namedtuple +import subprocess + +from common.mayastor import container_mod, mayastor_mod + +import grpc +import mayastor_pb2 as pb + +BaseBdev = namedtuple("BaseBdev", "name uri") + +LocalFile = namedtuple("LocalFile", "path uri") + + +def megabytes(n): + return n * 1024 * 1024 + + +def get_uuid(n): + return "11111111-0000-0000-0000-%.12d" % (n) + + +def get_child_uris(nexus): + return [child.uri for child in nexus.children] + + +def share_type(protocol): + TYPES = { + "nbd": pb.ShareProtocolNexus.NEXUS_NBD, + "nvmf": pb.ShareProtocolNexus.NEXUS_NVMF, + "iscsi": pb.ShareProtocolNexus.NEXUS_ISCSI, + } + return TYPES[protocol] + + +@pytest.fixture(scope="module") +def remote_instance(): + yield "ms0" + + +@pytest.fixture(scope="module") +def nexus_instance(): + yield "ms1" + + +@pytest.fixture(scope="module") +def base_instances(remote_instance, nexus_instance): + yield [remote_instance, nexus_instance] + + +@pytest.fixture(scope="module") +def local_bdev_uri(): + yield "bdev:///malloc0" + + +@pytest.fixture(scope="module") +def base_bdevs(mayastor_mod, base_instances): + devices = {} + for instance in base_instances: + uri = "malloc:///malloc0?size_mb=64&blk_size=4096" + name = mayastor_mod[instance].bdev.Create(pb.BdevUri(uri=uri)).name + devices[instance] = BaseBdev(name, uri) + yield devices + for instance, bdev in devices.items(): + mayastor_mod[instance].bdev.Destroy(pb.BdevUri(uri=bdev.uri)) + + +@pytest.fixture(scope="module") +def shared_remote_bdev_uri(mayastor_mod, base_bdevs, remote_instance): + name = base_bdevs[remote_instance].name + uri = ( + mayastor_mod[remote_instance] + .bdev.Share(pb.BdevShareRequest(name=name, proto="nvmf")) + .uri + ) + yield uri + mayastor_mod[remote_instance].bdev.Unshare(pb.BdevShareRequest(name=name)) + + +@pytest.fixture(scope="module") +def file_types(): + yield ["aio", "uring"] + + +@pytest.fixture(scope="module") +def local_files(file_types): + files = {} + for type in file_types: + path = f"/tmp/{type}-file.img" + uri = f"{type}://{path}?blk_size=4096" + subprocess.run( + ["sudo", "sh", "-c", f"rm -f '{path}' && truncate -s 64M '{path}'"], + check=True, + ) + files[type] = LocalFile(path, uri) + yield files + for path in [file.path for file in files.values()]: + subprocess.run(["sudo", "sh", "-c", f"rm -f '{path}'"], check=True) + + +@pytest.fixture(scope="module") +def nexus_count(mayastor_mod, nexus_instance): + def count(): + return len(mayastor_mod[nexus_instance].ms.ListNexus(pb.Null()).nexus_list) + + yield count + + +@pytest.fixture +def nexus_children(local_bdev_uri, shared_remote_bdev_uri, local_files): + return [local_bdev_uri, shared_remote_bdev_uri] + [ + file.uri for file in local_files.values() + ] + + +@pytest.fixture +def created_nexuses(mayastor_mod, nexus_instance): + nexuses = {} + yield nexuses + for uuid in nexuses.keys(): + mayastor_mod[nexus_instance].ms.DestroyNexus(pb.DestroyNexusRequest(uuid=uuid)) + + +@pytest.fixture +def create_nexus(mayastor_mod, nexus_instance, created_nexuses): + def create(uuid, size, children): + nexus = mayastor_mod[nexus_instance].ms.CreateNexus( + pb.CreateNexusRequest(uuid=uuid, size=size, children=children) + ) + created_nexuses[uuid] = nexus + return nexus + + yield create + + +@pytest.fixture() +def publish_nexus(mayastor_mod, nexus_instance): + def publish(uuid, protocol): + mayastor_mod[nexus_instance].ms.PublishNexus( + pb.PublishNexusRequest(uuid=uuid, key="", share=share_type(protocol)) + ) + + yield publish + + +@pytest.fixture() +def unpublish_nexus(mayastor_mod, nexus_instance): + def unpublish(uuid): + mayastor_mod[nexus_instance].ms.UnpublishNexus( + pb.UnpublishNexusRequest(uuid=uuid) + ) + + yield unpublish + + +@pytest.fixture() +def destroy_nexus(mayastor_mod, nexus_instance, created_nexuses): + def destroy(uuid): + mayastor_mod[nexus_instance].ms.DestroyNexus(pb.DestroyNexusRequest(uuid=uuid)) + del created_nexuses[uuid] + + yield destroy + + +@pytest.fixture(params=["iscsi", "nvmf"]) +def share_protocol(request): + yield request.param + + +@pytest.mark.parametrize("n", range(5)) +def test_create_destroy(create_nexus, destroy_nexus, nexus_count, nexus_children, n): + uuid = get_uuid(n) + create_nexus(uuid, megabytes(64), nexus_children) + destroy_nexus(uuid) + assert nexus_count() == 0 + + +@pytest.mark.parametrize("n", range(5)) +def test_create_publish_unpublish_destroy( + create_nexus, + publish_nexus, + unpublish_nexus, + destroy_nexus, + nexus_count, + nexus_children, + share_protocol, + n, +): + uuid = get_uuid(n) + create_nexus(uuid, megabytes(64), nexus_children) + publish_nexus(uuid, share_protocol) + unpublish_nexus(uuid) + destroy_nexus(uuid) + assert nexus_count() == 0 + + +@pytest.mark.parametrize("n", range(5)) +def test_create_publish_destroy( + create_nexus, + publish_nexus, + destroy_nexus, + nexus_count, + nexus_children, + share_protocol, + n, +): + uuid = get_uuid(n) + create_nexus(uuid, megabytes(64), nexus_children) + publish_nexus(uuid, share_protocol) + destroy_nexus(uuid) + assert nexus_count() == 0 diff --git a/test/python/tests/rebuild/docker-compose.yml b/test/python/tests/rebuild/docker-compose.yml new file mode 100644 index 000000000..1dfa5b03f --- /dev/null +++ b/test/python/tests/rebuild/docker-compose.yml @@ -0,0 +1,39 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/rebuild/features/rebuild.feature b/test/python/tests/rebuild/features/rebuild.feature new file mode 100644 index 000000000..8a81a1aef --- /dev/null +++ b/test/python/tests/rebuild/features/rebuild.feature @@ -0,0 +1,70 @@ +Feature: Nexus rebuild functionality + + Background: + Given a mayastor instance + And a nexus with a source child device + + Scenario: running rebuild + When a target child is added to the nexus + And the rebuild operation is started + Then the nexus state is DEGRADED + And the source child state is ONLINE + And the target child state is DEGRADED + And the rebuild state is "running" + And the rebuild count is 1 + + Scenario: stopping rebuild + When a target child is added to the nexus + And the rebuild operation is started + And the rebuild operation is then stopped + Then the nexus state is DEGRADED + And the source child state is ONLINE + And the target child state is DEGRADED + And the rebuild state is undefined + And the rebuild count is 0 + + Scenario: pausing rebuild + When a target child is added to the nexus + And the rebuild operation is started + And the rebuild operation is then paused + And the rebuild statistics are requested + Then the nexus state is DEGRADED + And the source child state is ONLINE + And the target child state is DEGRADED + And the rebuild state is "paused" + And the rebuild statistics counter "blocks_total" is non-zero + And the rebuild statistics counter "blocks_recovered" is non-zero + And the rebuild statistics counter "progress" is non-zero + And the rebuild statistics counter "tasks_total" is non-zero + And the rebuild statistics counter "tasks_active" is zero + + Scenario: resuming rebuild + When a target child is added to the nexus + And the rebuild operation is started + And the rebuild operation is then paused + And the rebuild operation is then resumed + Then the nexus state is DEGRADED + And the source child state is ONLINE + And the target child state is DEGRADED + And the rebuild state is "running" + And the rebuild count is 1 + + Scenario: setting a child ONLINE + When a target child is added to the nexus + And the target child is set OFFLINE + And the target child is then set ONLINE + Then the nexus state is DEGRADED + And the source child state is ONLINE + And the target child state is DEGRADED + And the rebuild state is "running" + And the rebuild count is 1 + + Scenario: setting a child OFFLINE + When a target child is added to the nexus + And the rebuild operation is started + And the target child is set OFFLINE + Then the nexus state is DEGRADED + And the source child state is ONLINE + And the target child state is DEGRADED + And the rebuild state is undefined + And the rebuild count is 0 diff --git a/test/python/tests/rebuild/test_bdd_rebuild.py b/test/python/tests/rebuild/test_bdd_rebuild.py new file mode 100644 index 000000000..e0926c104 --- /dev/null +++ b/test/python/tests/rebuild/test_bdd_rebuild.py @@ -0,0 +1,254 @@ +import pytest +from pytest_bdd import given, scenario, then, when, parsers + +import os +import subprocess +import time + +from common.mayastor import container_mod, mayastor_mod +from common.volume import Volume + +import grpc +import mayastor_pb2 as pb + + +def megabytes(n): + return n * 1024 * 1024 + + +def find_child(nexus, uri): + for child in nexus.children: + if child.uri == uri: + return child + return None + + +def convert_nexus_state(state): + STATES = { + "UNKNOWN": pb.NexusState.NEXUS_UNKNOWN, + "ONLINE": pb.NexusState.NEXUS_ONLINE, + "DEGRADED": pb.NexusState.NEXUS_DEGRADED, + "FAULTED": pb.NexusState.NEXUS_FAULTED, + } + return STATES[state] + + +def convert_child_state(state): + STATES = { + "UNKNOWN": pb.ChildState.CHILD_UNKNOWN, + "ONLINE": pb.ChildState.CHILD_ONLINE, + "DEGRADED": pb.ChildState.CHILD_DEGRADED, + "FAULTED": pb.ChildState.CHILD_FAULTED, + } + return STATES[state] + + +def convert_child_action(state): + ACTIONS = { + "OFFLINE": pb.ChildAction.offline, + "ONLINE": pb.ChildAction.online, + } + return ACTIONS[state] + + +@scenario("features/rebuild.feature", "running rebuild") +def test_running_rebuild(): + "Running rebuild." + + +@scenario("features/rebuild.feature", "stopping rebuild") +def test_stopping_rebuild(): + "Stopping rebuild." + + +@scenario("features/rebuild.feature", "pausing rebuild") +def test_pausing_rebuild(): + "Pausing rebuild." + + +@scenario("features/rebuild.feature", "resuming rebuild") +def test_resuming_rebuild(): + "Resuming rebuild." + + +@scenario("features/rebuild.feature", "setting a child ONLINE") +def test_setting_a_child_online(): + "Setting a child ONLINE." + + +@scenario("features/rebuild.feature", "setting a child OFFLINE") +def test_setting_a_child_offline(): + "Setting a child OFFLINE." + + +@pytest.fixture(scope="module") +def local_files(): + files = [f"/tmp/disk-rebuild-{base}.img" for base in ["source", "target"]] + for path in files: + subprocess.run( + ["sudo", "sh", "-c", f"rm -f '{path}' && truncate -s 64M '{path}'"], + check=True, + ) + yield + for path in files: + subprocess.run(["sudo", "rm", "-f", path], check=True) + + +@pytest.fixture(scope="module") +def source_uri(local_files): + yield "aio:///tmp/disk-rebuild-source.img?blk_size=4096" + + +@pytest.fixture(scope="module") +def target_uri(local_files): + yield "aio:///tmp/disk-rebuild-target.img?blk_size=4096" + + +@pytest.fixture(scope="module") +def nexus_uuid(): + yield "2c58c9f0-da89-4cb9-8097-dc67fa132493" + + +@pytest.fixture(scope="module") +def mayastor_instance(mayastor_mod): + yield mayastor_mod["ms0"] + + +@pytest.fixture(scope="module") +def find_nexus(mayastor_instance): + def find(uuid): + for nexus in mayastor_instance.ms.ListNexus(pb.Null()).nexus_list: + if nexus.uuid == uuid: + return nexus + return None + + yield find + + +@pytest.fixture +def mayastor_nexus(mayastor_instance, nexus_uuid, source_uri): + nexus = mayastor_instance.ms.CreateNexus( + pb.CreateNexusRequest( + uuid=nexus_uuid, size=megabytes(64), children=[source_uri] + ) + ) + yield nexus + mayastor_instance.ms.DestroyNexus(pb.DestroyNexusRequest(uuid=nexus_uuid)) + + +@pytest.fixture +def nexus_state(mayastor_nexus, find_nexus, nexus_uuid): + yield find_nexus(nexus_uuid) + + +@pytest.fixture +def rebuild_state(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + try: + yield mayastor_instance.ms.GetRebuildState( + pb.RebuildStateRequest(uuid=nexus_uuid, uri=target_uri) + ).state + except: + yield None + + +@given("a mayastor instance") +@given(parsers.parse('a mayastor instance "{name}"')) +def get_instance(mayastor_instance): + pass + + +@given("a nexus") +@given("a nexus with a source child device") +def get_nexus(mayastor_nexus): + pass + + +@when("a target child is added to the nexus") +def add_child(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + mayastor_instance.ms.AddChildNexus( + pb.AddChildNexusRequest(uuid=nexus_uuid, uri=target_uri, norebuild=True) + ) + + +@when("the rebuild operation is started") +def start_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + mayastor_instance.ms.StartRebuild( + pb.StartRebuildRequest(uuid=nexus_uuid, uri=target_uri) + ) + + +@when("the rebuild operation is then paused") +def pause_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + mayastor_instance.ms.PauseRebuild( + pb.PauseRebuildRequest(uuid=nexus_uuid, uri=target_uri) + ) + time.sleep(0.5) + + +@when("the rebuild operation is then resumed") +def resume_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + mayastor_instance.ms.ResumeRebuild( + pb.ResumeRebuildRequest(uuid=nexus_uuid, uri=target_uri) + ) + + +@when("the rebuild operation is then stopped") +def stop_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + mayastor_instance.ms.StopRebuild( + pb.StopRebuildRequest(uuid=nexus_uuid, uri=target_uri) + ) + time.sleep(0.5) + + +@when("the rebuild statistics are requested", target_fixture="rebuild_statistics") +def rebuild_statistics(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri): + return mayastor_instance.ms.GetRebuildStats( + pb.RebuildStatsRequest(uuid=nexus_uuid, uri=target_uri) + ) + + +@when(parsers.parse("the target child is set {state}"), target_fixture="set_child") +@when(parsers.parse("the target child is then set {state}"), target_fixture="set_child") +def set_child(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri, state): + mayastor_instance.ms.ChildOperation( + pb.ChildNexusRequest( + uuid=nexus_uuid, uri=target_uri, action=convert_child_action(state) + ) + ) + + +@then(parsers.parse("the nexus state is {expected}")) +def check_nexus_state(nexus_state, expected): + assert nexus_state.state == convert_nexus_state(expected) + + +@then(parsers.parse("the source child state is {expected}")) +def check_source_child_state(nexus_state, source_uri, expected): + child = find_child(nexus_state, source_uri) + assert child.state == convert_child_state(expected) + + +@then(parsers.parse("the target child state is {expected}")) +def check_target_child_state(nexus_state, target_uri, expected): + child = find_child(nexus_state, target_uri) + assert child.state == convert_child_state(expected) + + +@then(parsers.parse("the rebuild count is {expected:d}")) +def check_rebuild_count(nexus_state, expected): + assert nexus_state.rebuilds == expected + + +@then(parsers.parse('the rebuild state is "{expected}"')) +def check_rebuild_state(rebuild_state, expected): + assert rebuild_state == expected + + +@then("the rebuild state is undefined") +def rebuild_state_is_undefined(rebuild_state): + assert rebuild_state is None + + +@then(parsers.parse('the rebuild statistics counter "{name}" is {expected}')) +def check_rebuild_statistics_counter(rebuild_statistics, name, expected): + assert (getattr(rebuild_statistics, name) == 0) == (expected == "zero") diff --git a/test/python/tests/replica/docker-compose.yml b/test/python/tests/replica/docker-compose.yml new file mode 100644 index 000000000..1dfa5b03f --- /dev/null +++ b/test/python/tests/replica/docker-compose.yml @@ -0,0 +1,39 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/replica/features/pool.feature b/test/python/tests/replica/features/pool.feature new file mode 100644 index 000000000..e560f6805 --- /dev/null +++ b/test/python/tests/replica/features/pool.feature @@ -0,0 +1,35 @@ +Feature: Mayastor pool management + + Background: + Given a mayastor instance "ms0" + + Scenario: creating a pool using disk with invalid block size + When the user attempts to create a pool specifying a disk with an invalid block size + Then the pool creation should fail + + Scenario: creating a pool with multiple disks + When the user attempts to create a pool specifying multiple disks + Then the pool creation should fail + + Scenario: creating a pool with an AIO disk + When the user creates a pool specifying a URI representing an aio disk + Then the pool should be created + + Scenario: creating a pool with a name that already exists + Given a pool "p0" + When the user creates a pool with the name of an existing pool + Then the pool creation should succeed + + Scenario: listing pools + Given a pool "p0" + When the user lists the current pools + Then the pool should appear in the output list + + Scenario: destroying a pool + Given a pool "p0" + When the user destroys the pool + Then the pool should be destroyed + + Scenario: destroying a pool that does not exist + When the user destroys a pool that does not exist + Then the pool destroy command should succeed diff --git a/test/python/tests/replica/features/replica.feature b/test/python/tests/replica/features/replica.feature new file mode 100644 index 000000000..4aecba51d --- /dev/null +++ b/test/python/tests/replica/features/replica.feature @@ -0,0 +1,88 @@ +Feature: Mayastor replica management + + Background: + Given a mayastor instance "ms0" + And a pool "p0" + + Scenario: creating a replica + When the user creates an unshared replica + Then the replica is created + And the share state is unshared + + Scenario: creating a replica shared over "iscsi" + When the user attempts to create a replica shared over "iscsi" + Then the create replica command should fail + + Scenario: creating a replica with a name that already exists + Given a replica + When the user creates a replica that already exists + Then the create replica command should succeed + + Scenario: listing replicas + Given a replica + When the user lists the current replicas + Then the replica should appear in the output list + + Scenario: sharing a replica over "nvmf" + Given a replica that is unshared + When the user shares the replica over "nvmf" + Then the share state should change to "nvmf" + + Scenario: sharing a replica over "iscsi" + Given a replica that is unshared + When the user attempts to share the replica over "iscsi" + Then the share replica command should fail + + Scenario: sharing a replica that is already shared with the same protocol + Given a replica shared over "nvmf" + When the user shares the replica with the same protocol + Then the share replica command should succeed + + Scenario: sharing a replica that is already shared with a different protocol + Given a replica shared over "nvmf" + When the user attempts to share the replica with a different protocol + Then the share replica command should fail + + Scenario: unsharing a replica + Given a replica shared over "nvmf" + When the user unshares the replica + Then the share state should change to unshared + + Scenario: destroying a replica + Given a replica + When the user destroys the replica + Then the replica should be destroyed + + Scenario: destroying a replica that does not exist + When the user destroys a replica that does not exist + Then the replica destroy command should succeed + + Scenario: listing replica stats + Given a replica + When the user gets replica stats + Then the stats for the replica should be listed + + Scenario: creating a replica shared over "nvmf" + When the user creates a replica shared over "nvmf" + Then the replica is created + And the share state is "nvmf" + + Scenario: listing replicas + Given a replica + When the user lists the current replicas + Then the replica should appear in the output list + + Scenario: writing to a shared replica + Given a replica shared over "nvmf" + When the user writes to the replica + Then the write operation should succeed + + Scenario: reading from a shared replica + Given a replica shared over "nvmf" + When the user reads from the replica + Then the read operation should succeed + + Scenario: recreating a replica + Given a replica + When the user attempts to recreate the existing replica + Then the old data should have been reset diff --git a/test/python/tests/replica/test_bdd_pool.py b/test/python/tests/replica/test_bdd_pool.py new file mode 100644 index 000000000..fb66109a3 --- /dev/null +++ b/test/python/tests/replica/test_bdd_pool.py @@ -0,0 +1,164 @@ +import pytest +from pytest_bdd import given, scenario, then, when, parsers + +from common.command import run_cmd +from common.mayastor import container_mod, mayastor_mod + +import grpc +import mayastor_pb2 as pb + + +@scenario("features/pool.feature", "creating a pool using disk with invalid block size") +def test_fail_creating_a_pool_using_disk_with_invalid_block_size(): + "Creating a pool using disk with invalid block size." + + +@scenario("features/pool.feature", "creating a pool with a name that already exists") +def test_creating_a_pool_with_a_name_that_already_exists(): + "Creating a pool with a name that already exists." + + +@scenario("features/pool.feature", "creating a pool with an AIO disk") +def test_creating_a_pool_with_an_aio_disk(): + "Creating a pool with an AIO disk." + + +@scenario("features/pool.feature", "creating a pool with multiple disks") +def test_fail_creating_a_pool_with_multiple_disks(): + "Creating a pool with multiple disks." + + +@scenario("features/pool.feature", "destroying a pool") +def test_destroying_a_pool(): + "Destroying a pool." + + +@scenario("features/pool.feature", "destroying a pool that does not exist") +def test_destroying_a_pool_that_does_not_exist(): + "Destroying a pool that does not exist." + + +@scenario("features/pool.feature", "listing pools") +def test_listing_pools(): + "Listing pools." + + +@pytest.fixture +def image_file(): + name = "/tmp/ms0-disk0.img" + run_cmd(f"rm -f '{name}'", True) + run_cmd(f"truncate -s 64M '{name}'", True) + yield name + run_cmd(f"rm -f '{name}'", True) + + +@pytest.fixture +def find_pool(get_mayastor_instance): + def find(name): + for pool in get_mayastor_instance.ms.ListPools(pb.Null()).pools: + if pool.name == name: + return pool + return None + + yield find + + +@pytest.fixture +def replica_pools(get_mayastor_instance): + pools = {} + yield pools + for name in pools.keys(): + get_mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name=name)) + + +@pytest.fixture +def create_pool(get_mayastor_instance, replica_pools): + def create(name, disks): + pool = get_mayastor_instance.ms.CreatePool( + pb.CreatePoolRequest(name=name, disks=disks) + ) + replica_pools[name] = pool + + yield create + + +@given( + parsers.parse('a mayastor instance "{name}"'), + target_fixture="get_mayastor_instance", +) +def get_mayastor_instance(mayastor_mod, name): + return mayastor_mod[name] + + +@given(parsers.parse('a pool "{name}"'), target_fixture="get_pool_name") +def get_pool_name(get_mayastor_instance, create_pool, name): + create_pool(name, ["malloc:///disk0?size_mb=100"]) + return name + + +@when("the user creates a pool specifying a URI representing an aio disk") +def create_pool_from_aio_disk(get_mayastor_instance, create_pool, image_file): + create_pool("p0", [f"aio://{image_file}"]) + + +@when("the user attempts to create a pool specifying a disk with an invalid block size") +def attempt_to_create_pool_from_disk_with_invalid_block_size( + get_mayastor_instance, create_pool +): + with pytest.raises(grpc.RpcError) as error: + create_pool("p0", "malloc:///disk0?size_mb=100&blk_size=1024") + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("the user attempts to create a pool specifying multiple disks") +def attempt_to_create_pool_from_multiple_disks(get_mayastor_instance, create_pool): + with pytest.raises(grpc.RpcError) as error: + create_pool( + "p0", ["malloc:///disk0?size_mb=100", "malloc:///disk1?size_mb=100"] + ) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when("the user creates a pool with the name of an existing pool") +def create_pool_that_already_exists(get_mayastor_instance, create_pool, get_pool_name): + create_pool(get_pool_name, ["malloc:///disk0?size_mb=100"]) + + +@when("the user destroys a pool that does not exist") +def destroy_pool_that_does_not_exist(get_mayastor_instance, find_pool): + assert find_pool("p0") == None + get_mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name="p0")) + + +@when("the user destroys the pool") +def destroy_pool(get_mayastor_instance, replica_pools, get_pool_name): + pool = replica_pools[get_pool_name] + get_mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name=pool.name)) + del replica_pools[get_pool_name] + + +@when("the user lists the current pools", target_fixture="list_pools") +def list_pools(get_mayastor_instance): + return get_mayastor_instance.ms.ListPools(pb.Null(), wait_for_ready=True).pools + + +@then("the pool creation should fail") +def pool_creation_should_fail(find_pool): + assert find_pool("p0") == None + + +@then("the pool creation should succeed") +@then("the pool should be created") +def pool_creation_should_succeed(find_pool): + assert find_pool("p0") != None + + +@then("the pool destroy command should succeed") +@then("the pool should be destroyed") +def pool_destruction_should_succeed(find_pool): + assert find_pool("p0") == None + + +@then("the pool should appear in the output list") +def pool_should_appear_in_output(get_pool_name, list_pools): + assert get_pool_name in [pool.name for pool in list_pools] diff --git a/test/python/tests/replica/test_bdd_replica.py b/test/python/tests/replica/test_bdd_replica.py new file mode 100644 index 000000000..0229706b3 --- /dev/null +++ b/test/python/tests/replica/test_bdd_replica.py @@ -0,0 +1,355 @@ +import pytest +from pytest_bdd import given, scenario, then, when, parsers + +from common.mayastor import container_mod, mayastor_mod + +import grpc +import mayastor_pb2 as pb + + +@scenario("features/replica.feature", "creating a replica") +def test_creating_a_replica(): + "Creating a replica." + + +@scenario("features/replica.feature", 'creating a replica shared over "iscsi"') +def test_fail_creating_a_replica_shared_over_iscsi(): + 'Creating a replica shared over "iscsi".' + + +@scenario("features/replica.feature", 'creating a replica shared over "nvmf"') +def test_creating_a_replica_shared_over_nvmf(): + 'Creating a replica shared over "nvmf".' + + +@scenario( + "features/replica.feature", "creating a replica with a name that already exists" +) +def test_creating_a_replica_with_a_name_that_already_exists(): + "Creating a replica with a name that already exists." + + +@scenario("features/replica.feature", "destroying a replica") +def test_destroying_a_replica(): + "Destroying a replica." + + +@scenario("features/replica.feature", "destroying a replica that does not exist") +def test_destroying_a_replica_that_does_not_exist(): + "Destroying a replica that does not exist." + + +@scenario("features/replica.feature", "listing replica stats") +def test_listing_replica_stats(): + "Listing replica stats." + + +@scenario("features/replica.feature", "listing replicas") +def test_listing_replicas(): + "Listing replicas." + + +@scenario("features/replica.feature", 'sharing a replica over "iscsi"') +def test_fail_sharing_a_replica_over_iscsi(): + 'Sharing a replica over "iscsi".' + + +@scenario("features/replica.feature", 'sharing a replica over "nvmf"') +def test_sharing_a_replica_over_nvmf(): + 'Sharing a replica over "nvmf".' + + +@scenario( + "features/replica.feature", + "sharing a replica that is already shared with a different protocol", +) +def test_fail_sharing_a_replica_that_is_already_shared_with_a_different_protocol(): + "Sharing a replica that is already shared with a different protocol." + + +@scenario( + "features/replica.feature", + "sharing a replica that is already shared with the same protocol", +) +def test_sharing_a_replica_that_is_already_shared_with_the_same_protocol(): + "Sharing a replica that is already shared with the same protocol." + + +@scenario("features/replica.feature", "unsharing a replica") +def test_unsharing_a_replica(): + "Unsharing a replica." + + +@pytest.mark.skip(reason="todo") +@scenario("features/replica.feature", "reading from a shared replica") +def test_reading_from_a_shared_replica(): + "Reading from a shared replica." + + +@pytest.mark.skip(reason="todo") +@scenario("features/replica.feature", "writing to a shared replica") +def test_writing_to_a_shared_replica(): + "Writing to a shared replica." + + +def share_protocol(name): + PROTOCOLS = { + "none": pb.REPLICA_NONE, + "nvmf": pb.REPLICA_NVMF, + "iscsi": pb.REPLICA_ISCSI, + } + return PROTOCOLS[name] + + +@pytest.fixture(scope="module") +def mayastor_instance(mayastor_mod): + yield mayastor_mod["ms0"] + + +@pytest.fixture(scope="module") +def mayastor_pool(mayastor_instance): + pool = mayastor_instance.ms.CreatePool( + pb.CreatePoolRequest(name="p0", disks=["malloc:///disk0?size_mb=512"]) + ) + yield pool.name + mayastor_instance.ms.DestroyPool(pb.DestroyPoolRequest(name=pool.name)) + + +@pytest.fixture(scope="module") +def replica_uuid(): + yield "22ca10d3-4f2b-4b95-9814-9181c025cc1a" + + +@pytest.fixture(scope="module") +def replica_size(): + yield 32 * 1024 * 1024 + + +@pytest.fixture(scope="module") +def find_replica(mayastor_instance, mayastor_pool): + def find(uuid): + for replica in mayastor_instance.ms.ListReplicas(pb.Null()).replicas: + if replica.uuid == uuid: + return replica + return None + + yield find + + +@pytest.fixture +def current_replicas(mayastor_instance, mayastor_pool): + replicas = {} + yield replicas + for uuid in replicas.keys(): + mayastor_instance.ms.DestroyReplica(pb.DestroyReplicaRequest(uuid=uuid)) + + +@pytest.fixture +def create_replica(mayastor_instance, mayastor_pool, current_replicas): + def create(uuid, size, share): + replica = mayastor_instance.ms.CreateReplica( + pb.CreateReplicaRequest( + pool=mayastor_pool, uuid=uuid, size=size, share=share + ) + ) + current_replicas[uuid] = replica + + yield create + + +@given(parsers.parse('a mayastor instance "{name}"')) +def get_instance(mayastor_instance): + pass + + +@given(parsers.parse('a pool "{name}"')) +def get_pool(mayastor_pool): + pass + + +@given("a replica") +@given("a replica that is unshared") +def get_replica(create_replica, replica_uuid, replica_size): + create_replica(replica_uuid, replica_size, pb.REPLICA_NONE) + + +@given(parsers.parse('a replica shared over "{share}"'), target_fixture="share_replica") +def get_shared_replica(create_replica, replica_uuid, replica_size, share): + create_replica(replica_uuid, replica_size, share=share_protocol(share)) + + +@when("the user creates a replica") +@when("the user creates an unshared replica") +def create_unshared_replica(create_replica, replica_uuid, replica_size): + create_replica(replica_uuid, replica_size, pb.REPLICA_NONE) + + +@when('the user attempts to create a replica shared over "iscsi"') +def attempt_to_create_replica_shared_over_iscsi( + create_replica, replica_uuid, replica_size +): + with pytest.raises(grpc.RpcError) as error: + create_replica(replica_uuid, replica_size, pb.REPLICA_ISCSI) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + +@when( + parsers.parse('the user creates a replica shared over "{share}"'), + target_fixture="share_replica", +) +def create_shared_replica(create_replica, replica_uuid, replica_size, share): + create_replica(replica_uuid, replica_size, share=share_protocol(share)) + + +@when("the user creates a replica that already exists") +@when("the user recreates the existing replica") +def create_replica_that_already_exists(create_replica, current_replicas, replica_uuid): + replica = current_replicas[replica_uuid] + create_replica(replica.uuid, replica.size, share=replica.share) + + +@when("the user destroys a replica that does not exist") +def the_user_destroys_a_replica_that_does_not_exist( + mayastor_instance, find_replica, replica_uuid +): + assert find_replica(replica_uuid) == None + mayastor_instance.ms.DestroyReplica(pb.DestroyReplicaRequest(uuid=replica_uuid)) + + +@when("the user destroys the replica") +def the_user_destroys_the_replica(mayastor_instance, current_replicas, replica_uuid): + replica = current_replicas[replica_uuid] + mayastor_instance.ms.DestroyReplica(pb.DestroyReplicaRequest(uuid=replica.uuid)) + del current_replicas[replica_uuid] + + +@when("the user gets replica stats", target_fixture="stat_replicas") +def stat_replicas(mayastor_instance): + return mayastor_instance.ms.StatReplicas(pb.Null()).replicas + + +@when("the user lists the current replicas", target_fixture="list_replicas") +def list_replicas(mayastor_instance): + return mayastor_instance.ms.ListReplicas(pb.Null()).replicas + + +@when("the user attempts to share the replica with a different protocol") +def attempt_to_share_replica_with_different_protocol( + mayastor_instance, current_replicas, replica_uuid +): + replica = current_replicas[replica_uuid] + share = pb.REPLICA_ISCSI if replica.share == pb.REPLICA_NVMF else pb.REPLICA_NVMF + with pytest.raises(grpc.RpcError) as error: + mayastor_instance.ms.ShareReplica( + pb.ShareReplicaRequest(uuid=replica.uuid, share=share) + ) + assert error.value.code() == grpc.StatusCode.INTERNAL + + +@when("the user shares the replica with the same protocol") +def share_replica_with_the_same_protocol( + mayastor_instance, current_replicas, replica_uuid +): + replica = current_replicas[replica_uuid] + mayastor_instance.ms.ShareReplica( + pb.ShareReplicaRequest(uuid=replica.uuid, share=replica.share) + ) + + +@when('the user attempts to share the replica over "iscsi"') +def attempt_to_share_replica_over_iscsi(mayastor_instance, replica_uuid): + with pytest.raises(grpc.RpcError) as error: + mayastor_instance.ms.ShareReplica( + pb.ShareReplicaRequest(uuid=replica_uuid, share=pb.REPLICA_ISCSI) + ) + assert error.value.code() == grpc.StatusCode.INTERNAL + + +@when( + parsers.parse('the user shares the replica over "{share}"'), + target_fixture="share_replica", +) +def share_replica(mayastor_instance, replica_uuid, share): + mayastor_instance.ms.ShareReplica( + pb.ShareReplicaRequest(uuid=replica_uuid, share=share_protocol(share)) + ) + + +@when("the user unshares the replica") +def unshare_replica(mayastor_instance, replica_uuid): + mayastor_instance.ms.ShareReplica( + pb.ShareReplicaRequest(uuid=replica_uuid, share=pb.REPLICA_NONE) + ) + + +@when("the user reads from the replica") +def read_from_replica(): + raise NotImplementedError + + +@when("the user writes to the replica") +def write_to_replica(): + raise NotImplementedError + + +@then("the create replica command should fail") +def create_replica_should_fail(find_replica, replica_uuid): + assert find_replica(replica_uuid) == None + + +@then("the create replica command should succeed") +@then("the replica is created") +def create_replica_should_succeed(find_replica, replica_uuid): + assert find_replica(replica_uuid) != None + + +@then("the replica destroy command should succeed") +@then("the replica should be destroyed") +def destroy_replica_should_succeed(find_replica, replica_uuid): + assert find_replica(replica_uuid) == None + + +@then("the replica should appear in the output list") +def replica_should_appear_in_output(replica_uuid, list_replicas): + assert replica_uuid in [replica.uuid for replica in list_replicas] + + +@then("the share replica command should fail") +def share_replica_should_fail(): + pass + + +@then("the share replica command should succeed") +def share_replica_should_succeed(): + pass + + +@then('the share state is "nvmf"') +@then('the share state should change to "nvmf"') +def share_state_is_nvmf(find_replica, replica_uuid): + replica = find_replica(replica_uuid) + assert replica != None + assert replica.share == pb.REPLICA_NVMF + + +@then("the share state is unshared") +@then("the share state should change to unshared") +def share_state_is_unshared(find_replica, replica_uuid): + replica = find_replica(replica_uuid) + assert replica != None + assert replica.share == pb.REPLICA_NONE + + +@then("the stats for the replica should be listed") +def stats_for_replica_should_be_listed(replica_uuid, stat_replicas): + assert replica_uuid in [stats.uuid for stats in stat_replicas] + + +@then("the read operation should succeed") +def read_operation_should_succeed(): + raise NotImplementedError + + +@then("the write operation should succeed") +def write_operation_should_succeed(): + raise NotImplementedError diff --git a/test/python/tests/replica_uuid/docker-compose.yml b/test/python/tests/replica_uuid/docker-compose.yml new file mode 100644 index 000000000..1dfa5b03f --- /dev/null +++ b/test/python/tests/replica_uuid/docker-compose.yml @@ -0,0 +1,39 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms0: + container_name: "ms0" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.2 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 1,2 -r /tmp/ms0.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.2 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/replica_uuid/features/replica-guid.feature b/test/python/tests/replica_uuid/features/replica-guid.feature new file mode 100644 index 000000000..166f24d83 --- /dev/null +++ b/test/python/tests/replica_uuid/features/replica-guid.feature @@ -0,0 +1,18 @@ +Feature: Explicit UUID and name for replicas + + Background: + Given a mayastor instance + And a data pool created on mayastor instance + + Scenario: create a replica with explicit UUID and name + When a new replica is successfully created using UUID and name + Then replica block device has the given name and UUID + And replica name is returned in the response object to the caller + + Scenario: list replicas created with explicit UUIDs and names + When a new replica is successfully created using UUID and name + Then both UUID and name should be provided upon replicas enumeration + + Scenario: list replicas created using v1 api + When a new replica is successfully created using v1 api + Then replica should be successfully enumerated via new replica enumeration api diff --git a/test/python/tests/replica_uuid/test_replica_uuid.py b/test/python/tests/replica_uuid/test_replica_uuid.py new file mode 100644 index 000000000..c0b339979 --- /dev/null +++ b/test/python/tests/replica_uuid/test_replica_uuid.py @@ -0,0 +1,128 @@ +import pytest +from pytest_bdd import given, scenario, then, when +from common.mayastor import container_mod, mayastor_mod +import mayastor_pb2 as pb + +POOL_NAME = "pool0" +REPLICA_NAME = "replica-4cb9-8097" +REPLICA_UUID = "45d2fd3e-38f2-42bf-8b5f-acddccf0ff53" +REPLICA_SIZE = 1024 * 1024 * 16 + +REPLICA_NAME_V1 = "replica-56e8-443f" + + +@scenario( + "features/replica-guid.feature", "create a replica with explicit UUID and name" +) +def test_create_replica_with_guid(): + "Create a replica with explicit UUID and name" + + +@scenario( + "features/replica-guid.feature", + "list replicas created with explicit UUIDs and names", +) +def test_list_replica_with_guid(): + "List replicas created with explicit UUIDs and names" + + +@scenario( + "features/replica-guid.feature", + "list replicas created using v1 api", +) +def test_list_v1_replicas(): + "List replicas created using v1 api" + + +@pytest.fixture(scope="module") +def mayastor_instance(mayastor_mod): + yield mayastor_mod["ms0"] + + +@pytest.fixture(scope="module") +def mayastor_pool(mayastor_instance): + pool = mayastor_instance.pool_create(POOL_NAME, "malloc:///disk0?size_mb=64") + assert pool.state == pb.POOL_ONLINE + yield POOL_NAME + try: + mayastor_instance.pool_destroy(POOL_NAME) + except Exception: + pass + + +@given("a mayastor instance") +def given_instance(): + pass + + +@given("a data pool created on mayastor instance") +def given_pool(): + pass + + +@when("a new replica is successfully created using UUID and name") +def create_replica(mayastor_instance, mayastor_pool): + replica = mayastor_instance.replica_create_v2( + mayastor_pool, REPLICA_NAME, REPLICA_UUID, REPLICA_SIZE + ) + assert replica.name == REPLICA_NAME, "Replica name does not match" + assert replica.uuid == REPLICA_UUID, "Replica UUID does not match" + assert replica.size == REPLICA_SIZE, "Replica size does not match" + assert replica.pool == POOL_NAME, "Pool name does not match" + + +@then("replica block device has the given name and UUID") +def check_replica_device(mayastor_instance): + devs = [d for d in mayastor_instance.bdev_list() if d.name == REPLICA_NAME] + assert len(devs) == 1, "Replica device not found among Mayastor devices" + assert devs[0].uuid == REPLICA_UUID, "UUID for replica device does not match" + + size = devs[0].num_blocks * devs[0].blk_size + assert size == REPLICA_SIZE, "Replica device size does not match" + + +@then("both UUID and name should be provided upon replicas enumeration") +def check_replica_enumeration(mayastor_instance): + replicas = [ + r + for r in mayastor_instance.replica_list_v2().replicas + if r.name == REPLICA_NAME + ] + assert len(replicas) == 1, "Replica can not be found by its name" + + replica = replicas[0] + assert replica.name == REPLICA_NAME + assert replica.uuid == REPLICA_UUID + assert replica.size == REPLICA_SIZE + assert replica.pool == POOL_NAME + + +@then("replica name is returned in the response object to the caller") +def check_replica_name(): + # Already checked upon replica creation. + pass + + +@when("a new replica is successfully created using v1 api") +def create_v1_replica(mayastor_instance, mayastor_pool): + replica = mayastor_instance.replica_create( + mayastor_pool, REPLICA_NAME_V1, REPLICA_SIZE + ) + assert replica.uuid == REPLICA_NAME_V1, "Replica name does not match" + assert replica.size == REPLICA_SIZE, "Replica size does not match" + + +@then("replica should be successfully enumerated via new replica enumeration api") +def check_v1_replica(mayastor_instance): + replicas = [ + r + for r in mayastor_instance.replica_list_v2().replicas + if r.name == REPLICA_NAME_V1 + ] + assert len(replicas) == 1, "Replica can not be found by its name" + + replica = replicas[0] + assert replica.name == REPLICA_NAME_V1 + assert replica.size == REPLICA_SIZE + assert replica.pool == POOL_NAME, "Pool name does not match" + assert replica.uuid != REPLICA_NAME_V1, "Replica UUID was set to replica name" diff --git a/test/python/tests/rpc/docker-compose.yml b/test/python/tests/rpc/docker-compose.yml new file mode 100644 index 000000000..2343ffd2a --- /dev/null +++ b/test/python/tests/rpc/docker-compose.yml @@ -0,0 +1,39 @@ +# +# {SRCDIR} should point to your working tree which should be your current pwd +# + +version: '3' +services: + ms1: + container_name: "ms1" + image: rust:latest + environment: + - MY_POD_IP=10.0.0.3 + - NEXUS_NVMF_ANA_ENABLE=1 + - NEXUS_NVMF_RESV_ENABLE=1 + command: ${SRCDIR}/target/debug/mayastor -g 0.0.0.0 -l 3,4 -r /tmp/ms1.sock + networks: + mayastor_net: + ipv4_address: 10.0.0.3 + cap_add: + # NUMA related + - SYS_ADMIN + - SYS_NICE + # uring needs mmap + - IPC_LOCK + security_opt: + # we can set this to a JSON file to allow per syscall access + - seccomp=unconfined + volumes: + - ${SRCDIR}:${SRCDIR} + - /nix:/nix + - /dev/hugepages:/dev/hugepages + - /tmp:/tmp + - /var/tmp:/var/tmp +networks: + mayastor_net: + name: mayastor_net + ipam: + driver: default + config: + - subnet: "10.0.0.0/16" diff --git a/test/python/tests/rpc/test_rpc.py b/test/python/tests/rpc/test_rpc.py new file mode 100644 index 000000000..70af6c1c9 --- /dev/null +++ b/test/python/tests/rpc/test_rpc.py @@ -0,0 +1,55 @@ +from common.command import run_cmd +from common.mayastor import container_mod, mayastor_mod +import pytest +import grpc +import uuid as guid +import asyncio + + +@pytest.fixture +def pool_file(): + return "/var/tmp/pool1.img" + + +@pytest.fixture +def create_temp_file(pool_file): + run_cmd("rm -f {}".format(pool_file)) + run_cmd("truncate -s 3G {}".format(pool_file), True) + yield + run_cmd("rm -f {}".format(pool_file)) + + +@pytest.mark.asyncio +async def test_rpc_timeout(container_mod, mayastor_mod, create_temp_file, pool_file): + ms1_c = container_mod.get("ms1") + ms1 = mayastor_mod.get("ms1") + uuid = str(guid.uuid4()) + + timeout_pattern = 'destroy_replica: gRPC method timed out, args: DestroyReplicaRequest {{ uuid: "{}" }}'.format( + uuid + ) + + # Create a pool and a big replica (> 1 GB) + ms1.pool_create("pool1", "uring://{}".format(pool_file)) + ms1.replica_create("pool1", uuid, 2 * 1024 * 1024 * 1024) + + # Set timeout to the minimum possible value and reconnect handles. + ms1.set_timeout(1) + ms1.reconnect() + + # Destroy the replica and trigger the timeout. + with pytest.raises(grpc.RpcError) as error: + ms1.replica_destroy(uuid) + assert error.value.code() == grpc.StatusCode.INVALID_ARGUMENT + + # Should not see error message pattern, as we expect the call to be timed out. + assert str(ms1_c.logs()).find(timeout_pattern) == -1 + + # Try to destroy the replica one more time - the call should complete + # without assertions. + # We expect this call to detect the incompleted previous call and emit + # a warning. + ms1.replica_destroy(uuid) + + # Now we should see the evidence that the gRPC call was timed out. + assert str(ms1_c.logs()).find(timeout_pattern) > 0