From 8858ed358deb217c36a5b1892c839dd9decea1cf Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Tue, 13 Oct 2020 13:31:37 +0100 Subject: [PATCH 01/92] Remove unnecessary force flag which... ... applies only to NFS during volume unpublish calls fixes an issue where deleting a pod using our pvc was stuck because the umount force call was failing. --- csi/src/mount.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/csi/src/mount.rs b/csi/src/mount.rs index e76e29c0c..40bb824aa 100644 --- a/csi/src/mount.rs +++ b/csi/src/mount.rs @@ -257,9 +257,7 @@ pub fn bind_remount(target: &str, options: &[String]) -> Result { /// Unmounts a path that has previously been bind mounted. /// Should not be used for unmounting devices. pub fn bind_unmount(target: &str) -> Result<(), Error> { - let mut flags = UnmountFlags::empty(); - - flags.insert(UnmountFlags::FORCE); + let flags = UnmountFlags::empty(); unmount(target, flags)?; @@ -303,7 +301,7 @@ pub fn blockdevice_mount( /// Unmount a block device. pub fn blockdevice_unmount(target: &str) -> Result<(), Error> { - let flags = UnmountFlags::FORCE; + let flags = UnmountFlags::empty(); debug!("Unmounting block device {} (flags={:?}) ...", target, flags); From 4781e63503f8617e6da524e5f57976becfe2ed23 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Tue, 22 Sep 2020 21:27:46 +0200 Subject: [PATCH 02/92] test: add framework to start mayastor instances Its been rather complicated to start/stop multiple mayastors within a single test case. This is true for the BDD tests as well as the unit tests. The bookkeeping of the processes itself is a PITA, but things get more complicated when we want to send signals, pause instances and the likes. We need to be able to do this simply and easily for more sophisticated recovery logic. So using containers seems to be a logical, and instead of using YAML files to compose targets, I've opted for using the docker remote API directly. Note, this API can be used on anything that runs docker local or remote. However, we use "FROM SCRATCH" and do not make use of a basic image, and start the artefact located in /target directly. This docker API rather verbose, and so you will see alot of Some() and Default but once you get over that is surprisingly well written. The reason for not simply do a fork of compose up down is that we need/want to "do things" with the containers as well. Consider the case where we want to emulate a nexus loosing its replica. To do this we would need to call pause -- but pause who? What if we want to send a grpc command to one of the targets, send it to who? We could have gone the easy route, which is to hard code everything but that would get us into duplicated constants between config files and exec calls. The test suites are now driven by tokio, and have a fundamental difference compared to the previous test. Before this commit, when you did something like mayastor_start(|| whatever()) the test thread itself, was running mayastor. That now is changed, and the test thread itself is not running mayastor and you keep control over it without the need for polling or blocking calls at all. An example is given in mayastor_compose_basic.rs and i've adapted an existing test as well. fix ::rpc temp --- Cargo.lock | 803 +++++++++++++++-------- Cargo.toml | 3 + mayastor/Cargo.toml | 5 +- mayastor/src/bdev/nexus/nexus_channel.rs | 2 +- mayastor/src/core/reactor.rs | 27 +- mayastor/src/core/thread.rs | 2 +- mayastor/src/grpc/bdev_grpc.rs | 3 +- mayastor/src/grpc/mayastor_grpc.rs | 2 +- mayastor/src/grpc/mod.rs | 4 +- mayastor/src/grpc/nexus_grpc.rs | 2 +- mayastor/src/pool.rs | 2 +- mayastor/src/replica.rs | 2 +- mayastor/tests/common/bdev_io.rs | 4 +- mayastor/tests/common/compose.rs | 658 +++++++++++++++++++ mayastor/tests/common/mod.rs | 7 +- mayastor/tests/mayastor_compose_basic.rs | 96 +++ mayastor/tests/replica_timeout.rs | 221 +++---- mayastor/tests/reset.rs | 143 ++-- nix/pkgs/mayastor/default.nix | 2 +- spdk-sys/build.rs | 1 + 20 files changed, 1464 insertions(+), 525 deletions(-) create mode 100644 mayastor/tests/common/compose.rs create mode 100644 mayastor/tests/mayastor_compose_basic.rs diff --git a/Cargo.lock b/Cargo.lock index a44200530..fe6170c3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -44,9 +44,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b" +checksum = "a1fd36ffbb1fb7c834eac128ea8d0e310c5aeb635548f9d58861e1308d46e71c" [[package]] name = "arc-swap" @@ -55,26 +55,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" [[package]] -name = "assert_matches" -version = "1.3.0" +name = "arrayref" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] -name = "async-barrier" -version = "1.0.1" +name = "arrayvec" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06293698675eb72e1155867e5982f199d6b6c230dca35bc5ffd9852f470c22a" -dependencies = [ - "async-mutex", - "event-listener", -] +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + +[[package]] +name = "assert_matches" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" [[package]] name = "async-channel" -version = "1.4.2" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21279cfaa4f47df10b1816007e738ca3747ef2ee53ffc51cdbf57a8bb266fee3" +checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" dependencies = [ "concurrent-queue", "event-listener", @@ -83,9 +85,9 @@ dependencies = [ [[package]] name = "async-dup" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c23bdd6ada8f5f586141f56fd8ea7f60700be462154325cfbbb674126688a51a" +checksum = "7427a12b8dc09291528cfb1da2447059adb4a257388c2acd6497a79d55cf6f7c" dependencies = [ "futures-io", "simple-mutex", @@ -97,7 +99,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801" dependencies = [ - "async-task 4.0.2", + "async-task", "concurrent-queue", "fastrand", "futures-lite", @@ -107,23 +109,25 @@ dependencies = [ [[package]] name = "async-fs" -version = "1.3.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3572236ba37147ca2b674a0bd5afd20aec0cd925ab125ab6fad6543960f9002" +checksum = "8b3ca4f8ff117c37c278a2f7415ce9be55560b846b5bc4412aaa5d29c1c3dae2" dependencies = [ + "async-lock", "blocking", "futures-lite", ] [[package]] name = "async-io" -version = "1.1.5" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e727cebd055ab2861a854f79def078c4b99ea722d54c6800a0e274389882d4c" +checksum = "d54bc4c1c7292475efb2253227dbcfad8fe1ca4c02bc62c510cc2f3da5c4704e" dependencies = [ "concurrent-queue", "fastrand", "futures-lite", + "libc", "log", "nb-connect", "once_cell", @@ -131,34 +135,32 @@ dependencies = [ "polling", "vec-arena", "waker-fn", + "winapi 0.3.9", ] [[package]] name = "async-lock" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76000290eb3c67dfe4e2bdf6b6155847f8e16fc844377a7bd0b5e97622656362" +checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" dependencies = [ - "async-barrier", - "async-mutex", - "async-rwlock", - "async-semaphore", + "event-listener", ] [[package]] name = "async-mutex" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66941c2577c4fa351e4ce5fdde8f86c69b88d623f3b955be1bc7362a23434632" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" dependencies = [ "event-listener", ] [[package]] name = "async-net" -version = "1.4.6" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14a5335056541826f855bf95b936df9788adbacf94b15ef7104029f7fff3e82a" +checksum = "ee4c3668eb091d781e97f0026b5289b457c77d407a85749a9bb4c057456c428f" dependencies = [ "async-io", "blocking", @@ -168,13 +170,13 @@ dependencies = [ [[package]] name = "async-process" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb915df28b8309139bd9c9c700d84c20e5c21385d05378caa84912332d0f6a1" +checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" dependencies = [ "async-io", "blocking", - "cfg-if", + "cfg-if 0.1.10", "event-listener", "futures-lite", "once_cell", @@ -182,25 +184,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "async-rwlock" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806b1cc0828c2b1611ccbdd743fc0cc7af09009e62c95a0501c1e5da7b142a22" -dependencies = [ - "async-mutex", - "event-listener", -] - -[[package]] -name = "async-semaphore" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538c756e85eb6ffdefaec153804afb6da84b033e2e5ec3e9d459c34b4bf4d3f6" -dependencies = [ - "event-listener", -] - [[package]] name = "async-stream" version = "0.2.1" @@ -217,17 +200,11 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] -[[package]] -name = "async-task" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17772156ef2829aadc587461c7753af20b7e8db1529bc66855add962a3b35d3" - [[package]] name = "async-task" version = "4.0.2" @@ -249,13 +226,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.38" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1a4a2f97ce50c9d0282c1468816208588441492b40d813b2e0419c22c05e7f" +checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] @@ -289,12 +266,12 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "707b586e0e2f247cbde68cdd2c3ce69ea7b7be43e1c5b426e37c9319c4b9838e" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide", "object", @@ -337,13 +314,13 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.54.1" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4d49b80beb70d76cdac92f5681e666f9a697c737c4f4117a67229a0386dc736" +checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" dependencies = [ "bitflags", "cexpr", - "cfg-if", + "cfg-if 0.1.10", "clang-sys", "clap", "env_logger", @@ -351,7 +328,7 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", "regex", "rustc-hash", @@ -365,6 +342,17 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "blake2b_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + [[package]] name = "blkid" version = "0.2.1" @@ -406,16 +394,63 @@ dependencies = [ [[package]] name = "blocking" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2640778f8053e72c11f621b0a5175a0560a269282aa98ed85107773ab8e2a556" +checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" dependencies = [ "async-channel", + "async-task", "atomic-waker", "fastrand", "futures-lite", "once_cell", - "waker-fn", +] + +[[package]] +name = "bollard" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98e70e4f2f2dec6396a87cd2a9acc0ac14d5aa0941a5f4a287bb25ae3a9ca183" +dependencies = [ + "base64 0.12.3", + "bollard-stubs", + "bytes 0.5.6", + "chrono", + "ct-logs", + "dirs", + "futures-core", + "futures-util", + "hex", + "http 0.2.1", + "hyper", + "hyper-rustls", + "hyper-unix-connector", + "log", + "mio-named-pipes", + "pin-project", + "rustls", + "rustls-native-certs", + "serde", + "serde_derive", + "serde_json", + "serde_urlencoded", + "thiserror", + "tokio", + "tokio-util 0.3.1", + "url", + "webpki-roots", + "winapi 0.3.9", +] + +[[package]] +name = "bollard-stubs" +version = "1.40.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c0039b619b9795bb6203a1ad7156b8418e38d4fdb857bf60984746b5a0fdb04" +dependencies = [ + "chrono", + "serde", + "serde_with", ] [[package]] @@ -478,9 +513,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cc" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66120af515773fb005778dc07c261bd201ec8ce50bd6e7144c927753fe013381" +checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" [[package]] name = "cexpr" @@ -497,15 +532,24 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chrono" -version = "0.4.15" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942f72db697d8767c22d46a598e01f2d3b475501ea43d0db4f16d90259182d0b" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ + "libc", "num-integer", "num-traits 0.2.12", + "serde", "time", + "winapi 0.3.9", ] [[package]] @@ -574,6 +618,12 @@ dependencies = [ "cache-padded", ] +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + [[package]] name = "core-foundation" version = "0.7.0" @@ -605,7 +655,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -615,12 +665,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ee0cc8804d5393478d743b035099520087a5186f3b93fa58cec08fa62407b6" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" dependencies = [ - "cfg-if", "crossbeam-utils", + "maybe-uninit", ] [[package]] @@ -641,7 +691,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg 1.0.1", - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "lazy_static", "maybe-uninit", @@ -655,7 +705,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "maybe-uninit", ] @@ -673,7 +723,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg 1.0.1", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", ] @@ -724,6 +774,15 @@ dependencies = [ "which", ] +[[package]] +name = "ct-logs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e" +dependencies = [ + "sct", +] + [[package]] name = "curve25519-dalek" version = "1.2.4" @@ -743,8 +802,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcfbcb0c5961907597a7d1148e3af036268f2b773886b8bb3eeb1e1281d3d3d6" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.9.0", + "darling_macro 0.9.0", +] + +[[package]] +name = "darling" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" +dependencies = [ + "darling_core 0.10.2", + "darling_macro 0.10.2", ] [[package]] @@ -761,17 +830,42 @@ dependencies = [ "syn 0.15.44", ] +[[package]] +name = "darling_core" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.24", + "quote 1.0.7", + "strsim 0.9.3", + "syn 1.0.44", +] + [[package]] name = "darling_macro" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6d8dac1c6f1d29a41c4712b4400f878cb4fcc4c7628f298dd75038e024998d1" dependencies = [ - "darling_core", + "darling_core 0.9.0", "quote 0.6.13", "syn 0.15.44", ] +[[package]] +name = "darling_macro" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" +dependencies = [ + "darling_core 0.10.2", + "quote 1.0.7", + "syn 1.0.44", +] + [[package]] name = "data-encoding" version = "2.3.0" @@ -784,7 +878,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ac53fa6a3cda160df823a9346442525dcaf1e171999a1cf23e67067e4fd64d4" dependencies = [ - "darling", + "darling 0.9.0", "derive_builder_core", "proc-macro2 0.4.30", "quote 0.6.13", @@ -797,7 +891,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0288a23da9333c246bb18c143426074a6ae96747995c5819d2947b64cd942b37" dependencies = [ - "darling", + "darling 0.9.0", "proc-macro2 0.4.30", "quote 0.6.13", "syn 0.15.44", @@ -822,13 +916,33 @@ dependencies = [ "generic-array", ] +[[package]] +name = "dirs" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "142995ed02755914747cc6ca76fc7e4583cd18578746716d0508ea6ed558b9ff" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.9", +] + [[package]] name = "dns-lookup" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f69635ffdfbaea44241d7cca30a5e3a2e1c892613a6a8ad8ef03deeb6803480" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "socket2", "winapi 0.3.9", @@ -871,9 +985,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "enum-primitive-derive" @@ -906,10 +1020,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22deed3a8124cff5fa835713fa105621e43bbdc46690c3a6b68328a012d350d4" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", "rustversion", - "syn 1.0.38", + "syn 1.0.44", "synstructure", ] @@ -936,9 +1050,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "2.4.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cd41440ae7e4734bbd42302f63eaba892afc93a3912dad84006247f0dedb0e" +checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" [[package]] name = "failure" @@ -956,9 +1070,9 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", "synstructure", ] @@ -970,9 +1084,12 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "1.3.5" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c85295147490b8fcf2ea3d104080a105a8b2c63f9c319e82c02d8e952388919" +checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" +dependencies = [ + "instant", +] [[package]] name = "fixedbitset" @@ -1020,9 +1137,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "5d8e3078b7b2a8a671cb7a3d17b4760e4181ea243227776ba83fd043b4ca034e" dependencies = [ "futures-channel", "futures-core", @@ -1035,9 +1152,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "a7a4d35f7401e948629c9c3d6638fb9bf94e0b2121e96c3b428cc4e631f3eb74" dependencies = [ "futures-core", "futures-sink", @@ -1045,15 +1162,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" +checksum = "d674eaa0056896d5ada519900dbf97ead2e46a7b6621e8160d79e2f2e1e2784b" [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "cc709ca1da6f66143b8c9bec8e6260181869893714e9b5a490b169b0414144ab" dependencies = [ "futures-core", "futures-task", @@ -1062,15 +1179,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "5fc94b64bb39543b4e432f1790b6bf18e3ee3b74653c5449f63310e9a74b123c" [[package]] name = "futures-lite" -version = "1.8.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0db18c5f58083b54b0c416638ea73066722c2815c1e54dd8ba85ee3def593c3a" +checksum = "381a7ad57b1bad34693f63f6f377e1abded7a9c85c9d3eb6771e11c60aaadab9" dependencies = [ "fastrand", "futures-core", @@ -1083,27 +1200,27 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "f57ed14da4603b2554682e9f2ff3c65d7567b53188db96cb71538217fc64581b" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "0d8764258ed64ebc5d9ed185cf86a95db5cac810269c5d20ececb32e0088abbd" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +checksum = "4dd26820a9f3637f1302da8bceba3ff33adbe53464b54ca24d4e2d4f1db30f94" dependencies = [ "once_cell", ] @@ -1116,9 +1233,9 @@ checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "8a894a0acddba51a2d49a6f4263b1e64b8c579ece8af50fa86503d52cd1eea34" dependencies = [ "futures-channel", "futures-core", @@ -1151,13 +1268,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -1183,9 +1300,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34a97a52fdee1870a34fa6e4b77570cba531b27d1838874fef4429a791a3d657" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] @@ -1215,12 +1332,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" -dependencies = [ - "autocfg 1.0.1", -] +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" @@ -1233,13 +1347,19 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" dependencies = [ "libc", ] +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + [[package]] name = "http" version = "0.1.21" @@ -1288,6 +1408,12 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + [[package]] name = "humantime" version = "1.3.0" @@ -1299,9 +1425,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.7" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "2f3afcfae8af5ad0576a31e768415edb627824129e8e5a29b8bfccb2f234e835" dependencies = [ "bytes 0.5.6", "futures-channel", @@ -1311,16 +1437,48 @@ dependencies = [ "http 0.2.1", "http-body 0.3.1", "httparse", + "httpdate", "itoa", "pin-project", "socket2", - "time", "tokio", "tower-service", "tracing", "want", ] +[[package]] +name = "hyper-rustls" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" +dependencies = [ + "bytes 0.5.6", + "ct-logs", + "futures-util", + "hyper", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "webpki", +] + +[[package]] +name = "hyper-unix-connector" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42b66be14087ec25c5150c9d1228a1e9bbbfe7fe2506ff85daed350724980319" +dependencies = [ + "anyhow", + "futures-util", + "hex", + "hyper", + "pin-project", + "tokio", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1340,14 +1498,23 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" dependencies = [ "autocfg 1.0.1", "hashbrown", ] +[[package]] +name = "instant" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63312a18f7ea8760cdd0a7c5aac1a619752a246b833545e3e36d1f81f7cd9e66" +dependencies = [ + "cfg-if 0.1.10", +] + [[package]] name = "io-uring" version = "0.3.5" @@ -1373,6 +1540,15 @@ dependencies = [ "libc", ] +[[package]] +name = "ipnetwork" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" +dependencies = [ + "serde", +] + [[package]] name = "itertools" version = "0.8.2" @@ -1399,9 +1575,9 @@ checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "js-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" +checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" dependencies = [ "wasm-bindgen", ] @@ -1451,9 +1627,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.77" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235" +checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" [[package]] name = "libloading" @@ -1487,7 +1663,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -1520,9 +1696,10 @@ name = "mayastor" version = "0.1.0" dependencies = [ "assert_matches", - "async-task 3.0.0", + "async-task", "async-trait", "bincode", + "bollard", "byte-unit", "bytes 0.4.12", "chrono", @@ -1538,6 +1715,7 @@ dependencies = [ "git-version", "io-uring", "ioctl-gen", + "ipnetwork", "jsonrpc", "libc", "log", @@ -1587,20 +1765,21 @@ checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "memoffset" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ "autocfg 1.0.1", ] [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" dependencies = [ "adler", + "autocfg 1.0.1", ] [[package]] @@ -1609,7 +1788,7 @@ version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", @@ -1669,9 +1848,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" +checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "nats" @@ -1702,9 +1881,9 @@ dependencies = [ [[package]] name = "nb-connect" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "701f47aeb98466d0a7fea67e2c2f667c33efa1f2e4fd7f76743aac1153196f72" +checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" dependencies = [ "libc", "winapi 0.3.9", @@ -1712,11 +1891,11 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] @@ -1729,7 +1908,7 @@ checksum = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 0.1.10", "libc", "void", ] @@ -1742,7 +1921,7 @@ checksum = "dd0eaf8df8bab402257e0a5c17a254e4cc1f72a93588a1ddfb5d356c801aa7cb" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 0.1.10", "libc", "void", ] @@ -1839,9 +2018,9 @@ dependencies = [ [[package]] name = "object" -version = "0.20.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693" [[package]] name = "once_cell" @@ -1905,29 +2084,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.23" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca4433fff2ae79342e497d9f8ee990d174071408f28f726d6d83af93e58e48aa" +checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.23" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" +checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "e555d9e657502182ac97b539fb3dae8b79cda19e3e4f8ffb5e8de4f18df93c95" [[package]] name = "pin-utils" @@ -1943,14 +2122,14 @@ checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "polling" -version = "1.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" +checksum = "ab773feb154f12c49ffcfd66ab8bdcf9a1843f950db48b0d8be9d4393783b058" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "log", - "wepoll-sys-stjepang", + "wepoll-sys", "winapi 0.3.9", ] @@ -1967,9 +2146,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", "version_check", ] @@ -1979,7 +2158,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", "version_check", ] @@ -2007,9 +2186,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.19" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" dependencies = [ "unicode-xid 0.2.1", ] @@ -2060,9 +2239,9 @@ checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" dependencies = [ "anyhow", "itertools 0.8.2", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] @@ -2102,7 +2281,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", ] [[package]] @@ -2277,11 +2456,22 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_users" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +dependencies = [ + "getrandom", + "redox_syscall", + "rust-argon2", +] + [[package]] name = "regex" -version = "1.3.9" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "36f45b719a674bf4b828ff318906d6c133264c793eff7a41e30074a45b5099e2" dependencies = [ "aho-corasick", "memchr", @@ -2301,9 +2491,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "c17be88d9eaa858870aa5e48cc406c206e4600e983fc4f06bbe5750d93d09761" [[package]] name = "remove_dir_all" @@ -2353,11 +2543,23 @@ dependencies = [ "fsio", ] +[[package]] +name = "rust-argon2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +dependencies = [ + "base64 0.12.3", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "b2610b7f643d18c87dff3b489950269617e6601a51f1f05aa5daefee36f64f0b" [[package]] name = "rustc-hash" @@ -2396,9 +2598,9 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] @@ -2458,35 +2660,69 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54c9a88f2da7238af84b5101443f0c0d0a3bbdc455e34a5c9497b1903ed55d5" +checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "609feed1d0a73cc36a0182a840a9b37b4a82f0b1150369f0536a9e3f2a31dc48" +checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] name = "serde_json" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c" +checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" dependencies = [ "itoa", "ryu", "serde", ] +[[package]] +name = "serde_urlencoded" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +dependencies = [ + "dtoa", + "itoa", + "serde", + "url", +] + +[[package]] +name = "serde_with" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bac272128fb3b1e98872dca27a05c18d8b78b9bd089d3edb7b5871501b50bce" +dependencies = [ + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c747a9ab2e833b807f74f6b6141530655010bfa9c9c06d5508bce75c8f8072f" +dependencies = [ + "darling 0.10.2", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.44", +] + [[package]] name = "serde_yaml" version = "0.8.13" @@ -2603,9 +2839,9 @@ checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" [[package]] name = "smol" -version = "1.2.2" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca2722989073e89917a575862fb49dba3321af152f0cf4a4164d9482aabdf28" +checksum = "aaf8ded16994c0ae59596c6e4733c76faeb0533c26fd5ca1b1bc89271a049a66" dependencies = [ "async-channel", "async-executor", @@ -2621,9 +2857,9 @@ dependencies = [ [[package]] name = "snafu" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f5aed652511f5c9123cf2afbe9c244c29db6effa2abb05c866e965c82405ce" +checksum = "9c4e6046e4691afe918fd1b603fd6e515bcda5388a1092a9edbada307d159f09" dependencies = [ "doc-comment", "snafu-derive", @@ -2631,22 +2867,22 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf8f7d5720104a9df0f7076a8682024e958bba0fe9848767bb44f251f3648e9" +checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "redox_syscall", "winapi 0.3.9", @@ -2678,11 +2914,17 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" + [[package]] name = "structopt" -version = "0.3.16" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5472fb24d7e80ae84a7801b7978f95a19ec32cb1876faea59ab711eb901976" +checksum = "a7a7159e7d0dbcab6f9c980d7971ef50f3ff5753081461eeda120d5974a4ee95" dependencies = [ "clap", "lazy_static", @@ -2691,15 +2933,15 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0eb37335aeeebe51be42e2dc07f031163fbabfa6ac67d7ea68b5c2f68d5f99" +checksum = "8fc47de4dfba76248d1e9169ccff240eea2a4dc1e34e309b95b2393109b4b383" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] @@ -2741,11 +2983,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.38" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" +checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", "unicode-xid 0.2.1", ] @@ -2765,9 +3007,9 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", "unicode-xid 0.2.1", ] @@ -2792,7 +3034,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "rand 0.7.3", "redox_syscall", @@ -2818,6 +3060,26 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "thiserror" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.44", +] + [[package]] name = "thread_local" version = "1.0.1" @@ -2829,11 +3091,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] @@ -2873,9 +3136,21 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", +] + +[[package]] +name = "tokio-rustls" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +dependencies = [ + "futures-core", + "rustls", + "tokio", + "webpki", ] [[package]] @@ -2942,10 +3217,10 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0436413ba71545bcc6c2b9a0f9d78d72deb0123c6a75ccdfe7c056f9930f5e52" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "prost-build", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] @@ -3128,12 +3403,13 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d79ca061b032d6ce30c660fded31189ca0b9922bf483cd70759f13a2d86786c" +checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "log", + "pin-project-lite", "tracing-attributes", "tracing-core", ] @@ -3144,16 +3420,16 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", ] [[package]] name = "tracing-core" -version = "0.1.14" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db63662723c316b43ca36d833707cc93dff82a02ba3d7e354f342682cc8b3545" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" dependencies = [ "lazy_static", ] @@ -3191,9 +3467,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd165311cc4d7a555ad11cc77a37756df836182db0d81aac908c8184c584f40" +checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -3205,6 +3481,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", "tracing-serde", @@ -3362,36 +3639,42 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" +checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" +checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" dependencies = [ "bumpalo", "lazy_static", "log", - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" +checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" dependencies = [ "quote 1.0.7", "wasm-bindgen-macro-support", @@ -3399,28 +3682,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" +checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" dependencies = [ - "proc-macro2 1.0.19", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.38", + "syn 1.0.44", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.67" +version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" +checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" [[package]] name = "web-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" +checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" dependencies = [ "js-sys", "wasm-bindgen", @@ -3446,10 +3729,10 @@ dependencies = [ ] [[package]] -name = "wepoll-sys-stjepang" -version = "1.0.6" +name = "wepoll-sys" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +checksum = "142bc2cba3fe88be1a8fcb55c727fa4cd5b0cf2d7438722792e22f26f04bc1e0" dependencies = [ "cc", ] @@ -3528,6 +3811,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" diff --git a/Cargo.toml b/Cargo.toml index 35a6fd066..09195831d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,9 @@ h2 = { git = "https://github.com/gila/h2", branch = "v0.2.6"} partition-identity = { git = "https://github.com/openebs/partition-identity.git" } +[profile.dev] +panic = "abort" + [workspace] members = [ "csi", diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index 8397c0a4d..45f5d505a 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -33,7 +33,7 @@ name = "casperf" path = "src/bin/casperf.rs" [dependencies] -async-task = "3.0" +async-task = "4.0.2" async-trait = "0.1.36" bincode = "1.2" byte-unit = "3.0.1" @@ -77,7 +77,8 @@ udev = "0.4" url = "2.1" smol = "1.0.0" dns-lookup = "1.0.4" - +ipnetwork = "0.17.0" +bollard = "0.8.0" [dependencies.rpc] path = "../rpc" diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index cce09bfe1..7b3400edc 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -70,7 +70,7 @@ impl NexusChannelInner { info!( "{}(tid:{:?}), refreshing IO channels", nexus.name, - std::thread::current().name().unwrap() + std::thread::current().name().unwrap_or("none") ); trace!( diff --git a/mayastor/src/core/reactor.rs b/mayastor/src/core/reactor.rs index 1b38fd075..7251f462d 100644 --- a/mayastor/src/core/reactor.rs +++ b/mayastor/src/core/reactor.rs @@ -115,7 +115,7 @@ pub struct Reactor { thread_local! { /// This queue holds any in coming futures from other cores - static QUEUE: (Sender>, Receiver>) = unbounded(); + static QUEUE: (Sender, Receiver) = unbounded(); } impl Reactors { @@ -304,7 +304,7 @@ impl Reactor { /// receive futures if any fn receive_futures(&self) { self.rx.try_iter().for_each(|m| { - self.spawn_local(m); + self.spawn_local(m).detach(); }); } @@ -316,8 +316,9 @@ impl Reactor { self.sx.send(Box::pin(future)).unwrap(); } - /// spawn a future locally on this core - pub fn spawn_local(&self, future: F) -> async_task::JoinHandle + /// spawn a future locally on this core; note that you can *not* use the + /// handle to complete the future with a different runtime. + pub fn spawn_local(&self, future: F) -> async_task::Task where F: Future + 'static, R: 'static, @@ -327,12 +328,12 @@ impl Reactor { // busy etc. let schedule = |t| QUEUE.with(|(s, _)| s.send(t).unwrap()); - let (task, handle) = async_task::spawn_local(future, schedule, ()); - task.schedule(); + let (runnable, task) = async_task::spawn_local(future, schedule); + runnable.schedule(); // the handler typically has no meaning to us unless we want to wait for // the spawned future to complete before we continue which is // done, in example with ['block_on'] - handle + task } /// spawn a future locally on the current core block until the future is @@ -345,17 +346,17 @@ impl Reactor { let _thread = Mthread::current(); Mthread::get_init().enter(); let schedule = |t| QUEUE.with(|(s, _)| s.send(t).unwrap()); - let (task, handle) = async_task::spawn_local(future, schedule, ()); + let (runnable, task) = async_task::spawn_local(future, schedule); - let waker = handle.waker(); + let waker = runnable.waker(); let cx = &mut Context::from_waker(&waker); - pin_utils::pin_mut!(handle); - task.schedule(); + pin_utils::pin_mut!(task); + runnable.schedule(); let reactor = Reactors::master(); loop { - match handle.as_mut().poll(cx) { + match task.as_mut().poll(cx) { Poll::Ready(output) => { Mthread::get_init().exit(); _thread.map(|t| { @@ -366,7 +367,7 @@ impl Reactor { ); t.enter() }); - return output; + return Some(output); } Poll::Pending => { reactor.poll_once(); diff --git a/mayastor/src/core/thread.rs b/mayastor/src/core/thread.rs index a97ef8723..071b30aeb 100644 --- a/mayastor/src/core/thread.rs +++ b/mayastor/src/core/thread.rs @@ -196,7 +196,7 @@ impl Mthread { }) } - fn unaffinitize() { + pub fn unaffinitize() { unsafe { let mut set: libc::cpu_set_t = std::mem::zeroed(); for i in 0 .. libc::sysconf(libc::_SC_NPROCESSORS_ONLN) { diff --git a/mayastor/src/grpc/bdev_grpc.rs b/mayastor/src/grpc/bdev_grpc.rs index 5953d5151..97b00fc81 100644 --- a/mayastor/src/grpc/bdev_grpc.rs +++ b/mayastor/src/grpc/bdev_grpc.rs @@ -135,7 +135,6 @@ impl BdevRpc for BdevSvc { _ => unreachable!(), } .await - .unwrap() .map(|share| { let bdev = Bdev::lookup_by_name(&name).unwrap(); Response::new(BdevShareReply { @@ -158,7 +157,7 @@ impl BdevRpc for BdevSvc { .map_err(|e| Status::internal(e.to_string())); }); - hdl.await.unwrap(); + hdl.await; Ok(Response::new(Null {})) }) .await diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 74efd70cd..41bc13cd2 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -11,7 +11,7 @@ use tonic::{Request, Response, Status}; use tracing::instrument; -use rpc::mayastor::*; +use ::rpc::mayastor::*; use crate::{ bdev::{ diff --git a/mayastor/src/grpc/mod.rs b/mayastor/src/grpc/mod.rs index 9c96e8cec..cfd0d0e3d 100644 --- a/mayastor/src/grpc/mod.rs +++ b/mayastor/src/grpc/mod.rs @@ -32,7 +32,7 @@ fn print_error_chain(err: &dyn std::error::Error) -> String { macro_rules! locally { ($body:expr) => {{ let hdl = crate::core::Reactors::current().spawn_local($body); - match hdl.await.unwrap() { + match hdl.await { Ok(res) => res, Err(err) => { error!("{}", crate::grpc::print_error_chain(&err)); @@ -56,8 +56,8 @@ pub fn rpc_call(future: G) -> Result, tonic::Status> where G: Future> + 'static, I: 'static, - A: 'static + From, L: Into + Error + 'static, + A: 'static + From, { assert_eq!(Cores::current(), Cores::first()); Reactor::block_on(future) diff --git a/mayastor/src/grpc/nexus_grpc.rs b/mayastor/src/grpc/nexus_grpc.rs index 8ffc361a1..ba4b0c2dc 100644 --- a/mayastor/src/grpc/nexus_grpc.rs +++ b/mayastor/src/grpc/nexus_grpc.rs @@ -1,6 +1,6 @@ //! Helpers related to nexus grpc methods. -use rpc::mayastor as rpc; +use ::rpc::mayastor as rpc; use std::convert::From; use uuid::Uuid; diff --git a/mayastor/src/pool.rs b/mayastor/src/pool.rs index 2f235326f..f2e69caa4 100644 --- a/mayastor/src/pool.rs +++ b/mayastor/src/pool.rs @@ -5,7 +5,7 @@ use std::{ffi::CStr, os::raw::c_char}; -use rpc::mayastor as rpc; +use ::rpc::mayastor as rpc; use spdk_sys::{ lvol_store_bdev, spdk_bs_free_cluster_count, diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index 6ae101436..af2bfe266 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -5,7 +5,7 @@ #![allow(dead_code)] use std::ffi::CStr; -use rpc::mayastor as rpc; +use ::rpc::mayastor as rpc; use snafu::{ResultExt, Snafu}; use spdk_sys::{spdk_lvol, vbdev_lvol_get_from_bdev}; diff --git a/mayastor/tests/common/bdev_io.rs b/mayastor/tests/common/bdev_io.rs index f2fd0c1e1..47da3aa84 100644 --- a/mayastor/tests/common/bdev_io.rs +++ b/mayastor/tests/common/bdev_io.rs @@ -5,7 +5,7 @@ pub async fn write_some( offset: u64, fill: u8, ) -> Result<(), CoreError> { - let h = BdevHandle::open(nexus_name, true, false).unwrap(); + let h = BdevHandle::open(nexus_name, true, false)?; let mut buf = h.dma_malloc(512).expect("failed to allocate buffer"); buf.fill(fill); @@ -21,7 +21,7 @@ pub async fn read_some( offset: u64, fill: u8, ) -> Result<(), CoreError> { - let h = BdevHandle::open(nexus_name, true, false).unwrap(); + let h = BdevHandle::open(nexus_name, true, false)?; let mut buf = h.dma_malloc(1024).expect("failed to allocate buffer"); let slice = buf.as_mut_slice(); diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs new file mode 100644 index 000000000..d9dad4812 --- /dev/null +++ b/mayastor/tests/common/compose.rs @@ -0,0 +1,658 @@ +use std::{ + collections::HashMap, + future::Future, + net::{Ipv4Addr, SocketAddr, TcpStream}, + thread, + time::Duration, +}; + +use crossbeam::crossbeam_channel::bounded; + +use bollard::{ + container::{ + Config, + CreateContainerOptions, + ListContainersOptions, + LogsOptions, + NetworkingConfig, + RemoveContainerOptions, + StopContainerOptions, + }, + errors::Error, + network::{CreateNetworkOptions, ListNetworksOptions}, + service::{ + ContainerSummaryInner, + EndpointIpamConfig, + EndpointSettings, + HostConfig, + Ipam, + Mount, + MountTypeEnum, + Network, + }, + Docker, +}; +use futures::TryStreamExt; +use ipnetwork::Ipv4Network; +use tokio::sync::oneshot::channel; +use tonic::transport::Channel; + +use crate::common; +use ::rpc::mayastor::{ + bdev_rpc_client::BdevRpcClient, + mayastor_client::MayastorClient, +}; +use bollard::models::ContainerInspectResponse; +use mayastor::core::{ + mayastor_env_stop, + MayastorCliArgs, + MayastorEnvironment, + Reactor, + Reactors, +}; + +#[derive(Clone)] +pub struct RpcHandle { + pub name: String, + pub endpoint: SocketAddr, + mayastor: MayastorClient, + pub bdev: BdevRpcClient, +} + +impl RpcHandle { + /// connect to the containers and construct a handle + async fn connect(name: String, endpoint: SocketAddr) -> Self { + loop { + if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) + .is_ok() + { + break; + } else { + thread::sleep(Duration::from_millis(101)); + } + } + + let mayastor = + MayastorClient::connect(format!("http://{}", endpoint.to_string())) + .await + .unwrap(); + let bdev = + BdevRpcClient::connect(format!("http://{}", endpoint.to_string())) + .await + .unwrap(); + + Self { + name, + mayastor, + bdev, + endpoint, + } + } +} + +pub struct Builder { + /// name of the experiment this name will be used as a network and labels + /// this way we can "group" all objects within docker to match this test + /// test. It is highly recommend you use a sane name for this as it will + /// help you during debugging + name: String, + /// containers we want to create, note these are mayastor containers + /// only + containers: Vec, + /// the network for the tests used + network: String, + /// delete the container and network when dropped + clean: bool, +} + +impl Default for Builder { + fn default() -> Self { + Builder::new() + } +} + +impl Builder { + /// construct a new builder for `[ComposeTest'] + pub fn new() -> Self { + Self { + name: "".to_string(), + containers: Default::default(), + network: "10.1.0.0".to_string(), + clean: false, + } + } + + /// set the network for this test + pub fn network(mut self, network: &str) -> Builder { + self.network = network.to_owned(); + self + } + + /// the name to be used as labels and network name + pub fn name(mut self, name: &str) -> Builder { + self.name = name.to_owned(); + self + } + + /// add a mayastor container with a name + pub fn add_container(mut self, name: &str) -> Builder { + self.containers.push(name.to_owned()); + self + } + + /// clean on drop? + pub fn with_clean(mut self, enable: bool) -> Builder { + self.clean = enable; + self + } + + /// build the config and start the containers + pub async fn build( + self, + ) -> Result> { + let net: Ipv4Network = self.network.parse()?; + + let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); + let srcdir = path.parent().unwrap().to_string_lossy().into(); + let binary = format!("{}/target/debug/mayastor", srcdir); + + let docker = Docker::connect_with_unix_defaults()?; + + let mut cfg = HashMap::new(); + cfg.insert( + "Subnet".to_string(), + format!("{}/{}", net.network().to_string(), net.prefix()), + ); + cfg.insert("Gateway".into(), net.nth(1).unwrap().to_string()); + + let ipam = Ipam { + driver: Some("default".into()), + config: Some(vec![cfg]), + options: None, + }; + + let mut compose = ComposeTest { + name: self.name.clone(), + srcdir, + binary, + docker, + network_id: "".to_string(), + containers: Default::default(), + ipam, + label: format!("io.mayastor.test.{}", self.name), + clean: self.clean, + }; + + compose.network_id = + compose.network_create().await.map_err(|e| e.to_string())?; + + // containers are created where the IPs are ordinal + for (i, name) in self.containers.iter().enumerate() { + compose + .create_container( + name, + &net.nth((i + 2) as u32).unwrap().to_string(), + ) + .await?; + } + + compose.start_all().await?; + Ok(compose) + } +} + +/// +/// Some types to avoid confusion when +/// +/// different networks are referred to, internally as networkId in docker +type NetworkId = String; +/// container name +type ContainerName = String; +/// container ID +type ContainerId = String; + +#[derive(Clone)] +pub struct ComposeTest { + /// used as the network name + name: String, + /// the source dir the tests are run in + srcdir: String, + /// the binary we are using relative to srcdir + binary: String, + /// handle to the docker daemon + docker: Docker, + /// the network id is used to attach containers to networks + network_id: NetworkId, + /// the name of containers and their (IDs, Ipv4) we have created + /// perhaps not an ideal data structure, but we can improve it later + /// if we need to + containers: HashMap, + /// the default network configuration we use for our test cases + ipam: Ipam, + /// set on containers and networks + label: String, + /// automatically clean up the things we have created for this test + clean: bool, +} + +impl Drop for ComposeTest { + /// destroy the containers and network. Notice that we use sync code here + fn drop(&mut self) { + if self.clean { + self.containers.keys().for_each(|c| { + std::process::Command::new("docker") + .args(&["stop", c]) + .output() + .unwrap(); + std::process::Command::new("docker") + .args(&["rm", c]) + .output() + .unwrap(); + }); + + std::process::Command::new("docker") + .args(&["network", "rm", &self.name]) + .output() + .unwrap(); + } + } +} + +impl ComposeTest { + /// Create a new network, with default settings. If a network with the same + /// name already exists it will be reused. Note that we do not check the + /// networking IP and/or subnets + async fn network_create(&mut self) -> Result { + let mut net = self.network_list().await?; + + if !net.is_empty() { + let first = net.pop().unwrap(); + self.network_id = first.id.unwrap(); + return Ok(self.network_id.clone()); + } + + let create_opts = CreateNetworkOptions { + name: self.name.as_str(), + check_duplicate: true, + driver: "bridge", + internal: true, + attachable: true, + ingress: false, + ipam: self.ipam.clone(), + enable_ipv6: false, + options: vec![("com.docker.network.bridge.name", "mayabridge0")] + .into_iter() + .collect(), + labels: vec![(self.label.as_str(), "true")].into_iter().collect(), + }; + + self.docker.create_network(create_opts).await.map(|r| { + self.network_id = r.id.unwrap(); + self.network_id.clone() + }) + } + + async fn network_remove(&mut self) -> Result<(), Error> { + // if the network is not found, its not an error, any other error is + // reported as such. Networks can only be destroyed when all containers + // attached to it are removed. To get a list of attached + // containers, use network_list() + if let Err(e) = self.docker.remove_network(&self.name).await { + if !matches!(e, Error::DockerResponseNotFoundError{..}) { + return Err(e); + } + } + + Ok(()) + } + + /// list all the docker networks + pub async fn network_list(&self) -> Result, Error> { + self.docker + .list_networks(Some(ListNetworksOptions { + filters: vec![("name", vec![self.name.as_str()])] + .into_iter() + .collect(), + })) + .await + } + + /// list containers + pub async fn list_containers( + &self, + ) -> Result, Error> { + self.docker + .list_containers(Some(ListContainersOptions { + all: true, + filters: vec![( + "label", + vec![format!("{}=true", self.label).as_str()], + )] + .into_iter() + .collect(), + ..Default::default() + })) + .await + } + + /// remove a container from the configuration + async fn remove_container(&self, name: &str) -> Result<(), Error> { + self.docker + .remove_container( + name, + Some(RemoveContainerOptions { + v: true, + force: true, + link: false, + }), + ) + .await?; + + Ok(()) + } + + /// remove all containers + pub async fn remove_all(&mut self) -> Result<(), Error> { + for k in &self.containers { + self.stop(&k.0).await?; + self.remove_container(&k.0).await?; + } + self.network_remove().await?; + Ok(()) + } + + /// we need to construct several objects to create a setup that meets our + /// liking: + /// + /// (1) hostconfig: that configures the host side of the container, i.e what + /// features/settings from the host perspective do we want too setup + /// for the container. (2) endpoints: this allows us to plugin in the + /// container into our network configuration (3) config: the actual + /// config which includes the above objects + async fn create_container( + &mut self, + name: &str, + ipv4: &str, + ) -> Result<(), Error> { + let host_config = HostConfig { + binds: Some(vec![ + format!("{}:{}", self.srcdir, self.srcdir), + "/nix:/nix:ro".into(), + "/dev/hugepages:/dev/hugepages:rw".into(), + ]), + mounts: Some(vec![ + // DPDK needs to have a /tmp + Mount { + target: Some("/tmp".into()), + typ: Some(MountTypeEnum::TMPFS), + ..Default::default() + }, + // mayastor needs to have a /var/tmp + Mount { + target: Some("/var/tmp".into()), + typ: Some(MountTypeEnum::TMPFS), + ..Default::default() + }, + ]), + cap_add: Some(vec![ + "SYS_ADMIN".to_string(), + "IPC_LOCK".into(), + "SYS_NICE".into(), + ]), + security_opt: Some(vec!["seccomp:unconfined".into()]), + ..Default::default() + }; + + let mut endpoints_config = HashMap::new(); + endpoints_config.insert( + self.name.as_str(), + EndpointSettings { + network_id: Some(self.network_id.to_string()), + ipam_config: Some(EndpointIpamConfig { + ipv4_address: Some(ipv4.into()), + ..Default::default() + }), + ..Default::default() + }, + ); + + let env = format!("MY_POD_IP={}", ipv4); + + let config = Config { + cmd: Some(vec![self.binary.as_str(), "-g", "0.0.0.0"]), + env: Some(vec![&env]), + image: None, // notice we do not have a base image here + hostname: Some(name), + host_config: Some(host_config), + networking_config: Some(NetworkingConfig { + endpoints_config, + }), + working_dir: Some(self.srcdir.as_str()), + volumes: Some( + vec![ + ("/dev/hugepages", HashMap::new()), + ("/nix", HashMap::new()), + (self.srcdir.as_str(), HashMap::new()), + ] + .into_iter() + .collect(), + ), + labels: Some( + vec![(self.label.as_str(), "true")].into_iter().collect(), + ), + ..Default::default() + }; + + let container = self + .docker + .create_container( + Some(CreateContainerOptions { + name, + }), + config, + ) + .await + .unwrap(); + + self.containers + .insert(name.to_string(), (container.id, ipv4.parse().unwrap())); + + Ok(()) + } + + /// start the container + async fn start(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + self.docker + .start_container::<&str>(id.0.as_str(), None) + .await?; + + Ok(()) + } + + /// stop the container + async fn stop(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + if let Err(e) = self + .docker + .stop_container( + id.0.as_str(), + Some(StopContainerOptions { + t: 5, + }), + ) + .await + { + // where already stopped + if !matches!(e, Error::DockerResponseNotModifiedError{..}) { + return Err(e); + } + } + + Ok(()) + } + + /// ge the logs from the container. It would be nice to make it implicit + /// that is, when you make a rpc call, whatever logs where created due to + /// that are returned + pub async fn logs(&self, name: &str) -> Result<(), Error> { + let logs = self + .docker + .logs( + name, + Some(LogsOptions { + follow: false, + stdout: true, + stderr: true, + since: 0, // TODO log lines since last call? + until: 0, + timestamps: false, + tail: "all", + }), + ) + .try_collect::>() + .await?; + + logs.iter().for_each(|l| print!("{}:{}", name, l)); + Ok(()) + } + + /// start all the containers + async fn start_all(&mut self) -> Result<(), Error> { + for k in &self.containers { + self.start(&k.0).await?; + } + + Ok(()) + } + + /// inspect the given container + pub async fn inspect( + &self, + name: &str, + ) -> Result { + self.docker.inspect_container(name, None).await + } + + /// pause the container; unfortunately, when the API returns it does not + /// mean that the container indeed is frozen completely, in the sense + /// that it's not to be assumed that right after a call -- the container + /// stops responding. + pub async fn pause(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + self.docker.pause_container(id.0.as_str()).await?; + + Ok(()) + } + + /// un_pause the container + pub async fn thaw(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + self.docker.unpause_container(id.0.as_str()).await + } + + /// return grpc handles to the containers + pub async fn grpc_handles(&self) -> Result, ()> { + let mut handles = Vec::new(); + for v in &self.containers { + handles.push( + RpcHandle::connect( + v.0.clone(), + format!("{}:10124", v.1.1).parse::().unwrap(), + ) + .await, + ); + } + + Ok(handles) + } +} + +/// Mayastor test structure that simplifies sending futures. Mayastor has +/// its own reactor, which is not tokio based, so we need to handle properly +pub struct MayastorTest<'a> { + reactor: &'a Reactor, + thdl: Option>, +} + +impl<'a> MayastorTest<'a> { + /// spawn a future on this mayastor instance collecting the output + /// notice that rust will not allow sending Bdevs as they are !Send + pub async fn spawn(&self, future: F) -> T + where + F: Future + 'static, + T: Send + 'static, + { + let (tx, rx) = channel::(); + self.reactor.send_future(async move { + let arg = future.await; + let _ = tx.send(arg); + }); + + rx.await.unwrap() + } + + pub fn send(&self, future: F) + where + F: Future + 'static, + { + self.reactor.send_future(future); + } + + /// starts mayastor, notice we start mayastor on a thread and return a + /// handle to the management core. This handle is used to spawn futures, + /// and the thread handle can be used to synchronize the shutdown + async fn _new(args: MayastorCliArgs) -> MayastorTest<'static> { + let (tx, rx) = channel::<&'static Reactor>(); + + let thdl = std::thread::spawn(|| { + MayastorEnvironment::new(args).init(); + tx.send(Reactors::master()).unwrap(); + Reactors::master().running(); + Reactors::master().poll_reactor(); + }); + + let reactor = rx.await.unwrap(); + MayastorTest { + reactor, + thdl: Some(thdl), + } + } + + pub fn new(args: MayastorCliArgs) -> MayastorTest<'static> { + common::mayastor_test_init(); + let (tx, rx) = bounded(1); + + let thdl = std::thread::Builder::new() + .name("mayastor_master".into()) + .spawn(move || { + MayastorEnvironment::new(args).init(); + tx.send(Reactors::master()).unwrap(); + Reactors::master().running(); + Reactors::master().poll_reactor(); + }) + .unwrap(); + + let reactor = rx.recv().unwrap(); + MayastorTest { + reactor, + thdl: Some(thdl), + } + } + + /// explicitly stop mayastor + pub async fn stop(mut self) { + self.spawn(async { mayastor_env_stop(0) }).await; + let hdl = self.thdl.take().unwrap(); + hdl.join().unwrap() + } +} + +impl<'a> Drop for MayastorTest<'a> { + fn drop(&mut self) { + self.reactor.send_future(async { mayastor_env_stop(0) }); + // wait for mayastor to stop + let hdl = self.thdl.take().unwrap(); + hdl.join().unwrap() + } +} diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index 0b793d47b..b1bb6e9b8 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -4,7 +4,7 @@ //! panic macros. The caller can decide how to handle the error appropriately. //! Panics and asserts in this file are still ok for usage & programming errors. -use std::{env, io, io::Write, process::Command, time::Duration}; +use std::{io, io::Write, process::Command, time::Duration}; use crossbeam::channel::{after, select, unbounded}; use once_cell::sync::OnceCell; @@ -21,9 +21,10 @@ use mayastor::{ use spdk_sys::spdk_get_thread; pub mod bdev_io; +mod compose; pub mod error_bdev; pub mod ms_exec; - +pub use compose::{Builder, ComposeTest, MayastorTest, RpcHandle}; /// call F cnt times, and sleep for a duration between each invocation pub fn retry(mut cnt: u32, timeout: Duration, mut f: F) -> T where @@ -132,9 +133,7 @@ pub fn mayastor_test_init() { panic!("binary: {} not present in path", binary); } }); - logger::init("DEBUG"); - env::set_var("MAYASTOR_LOGLEVEL", "4"); mayastor::CPS_INIT!(); } diff --git a/mayastor/tests/mayastor_compose_basic.rs b/mayastor/tests/mayastor_compose_basic.rs new file mode 100644 index 000000000..699e55899 --- /dev/null +++ b/mayastor/tests/mayastor_compose_basic.rs @@ -0,0 +1,96 @@ +use mayastor::{ + bdev::nexus_create, + core::{Bdev, MayastorCliArgs}, + nexus_uri::bdev_create, +}; +use rpc::mayastor::{BdevShareRequest, BdevUri, Null}; + +pub mod common; +use common::{Builder, MayastorTest}; + +#[tokio::test] +async fn compose_up_down() { + // create a new composeTest and run a basic example + let test = Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .add_container("ms2") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); + + // get the handles if needed, to invoke methods to the containers + let mut hdls = test.grpc_handles().await.unwrap(); + + // create and share a bdev on each container + for h in &mut hdls { + h.bdev.list(Null {}).await.unwrap(); + h.bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + h.bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) + .await + .unwrap(); + + test.logs(&h.name).await.unwrap(); + } + + // start mayastor and do something the container bdev, this will shutdown + // on drop. The main thread will not block as it used too. + let mayastor = MayastorTest::new(MayastorCliArgs::default()); + + // create a nexus over the bdevs + mayastor + .spawn(async move { + nexus_create( + "foo", + 1024 * 1024 * 50, + None, + &[ + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[1].endpoint.ip() + ), + ], + ) + .await + }) + .await + .unwrap(); + + // why not + mayastor + .spawn(async { + bdev_create("malloc:///malloc0?size_mb=100").await.unwrap(); + }) + .await; + + // this will not compile: -- as it should not compile as bdev is not !Send + // let bdev = mayastor.spawn(async { Bdev::lookup_by_name("foo") }).await; + + let bdevs = mayastor + .spawn(async { + Bdev::bdev_first() + .unwrap() + .into_iter() + .map(|b| b.name()) + .collect::>() + }) + .await; + + // should return 4 bdevs + assert_eq!(bdevs.len(), 4); +} diff --git a/mayastor/tests/replica_timeout.rs b/mayastor/tests/replica_timeout.rs index 7be08ad5f..cbd2d5f7f 100644 --- a/mayastor/tests/replica_timeout.rs +++ b/mayastor/tests/replica_timeout.rs @@ -1,156 +1,101 @@ #![allow(unused_assignments)] -use std::{thread, time}; - -use common::{bdev_io, ms_exec::MayastorProcess}; +use common::{bdev_io, Builder, MayastorTest}; use mayastor::{ bdev::{nexus_create, nexus_lookup}, - core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, - subsys, - subsys::Config, + core::MayastorCliArgs, }; +use rpc::mayastor::{BdevShareRequest, BdevUri, Null}; +use tokio::time::Duration; pub mod common; +static NXNAME: &str = "nexus"; + +#[tokio::test] +async fn replica_stop_cont() { + let test = Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .add_container("ms2") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); -static DISKNAME1: &str = "/tmp/disk1.img"; -static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; - -static DISKNAME2: &str = "/tmp/disk2.img"; -static BDEVNAME2: &str = "aio:///tmp/disk2.img?blk_size=512"; - -static DISKSIZE_KB: u64 = 64 * 1024; - -static CFGNAME1: &str = "/tmp/child1.yaml"; -static UUID1: &str = "00000000-76b6-4fcf-864d-1027d4038756"; -static CFGNAME2: &str = "/tmp/child2.yaml"; -static UUID2: &str = "11111111-76b6-4fcf-864d-1027d4038756"; - -static NXNAME: &str = "replica_timeout_test"; - -fn generate_config() { - let mut config = Config::default(); - - let child1_bdev = subsys::BaseBdev { - uri: format!("{}&uuid={}", BDEVNAME1, UUID1), - }; - - let child2_bdev = subsys::BaseBdev { - uri: format!("{}&uuid={}", BDEVNAME2, UUID2), - }; - - config.base_bdevs = Some(vec![child1_bdev]); - config.implicit_share_base = true; - config.nexus_opts.iscsi_enable = false; - config.nexus_opts.nvmf_replica_port = 8430; - config.nexus_opts.nvmf_nexus_port = 8440; - config.write(CFGNAME1).unwrap(); - - config.base_bdevs = Some(vec![child2_bdev]); - config.nexus_opts.nvmf_replica_port = 8431; - config.nexus_opts.nvmf_nexus_port = 8441; - config.write(CFGNAME2).unwrap(); -} - -fn start_mayastor(cfg: &str) -> MayastorProcess { - let args = vec![ - "-s".to_string(), - "128".to_string(), - "-y".to_string(), - cfg.to_string(), - ]; - - MayastorProcess::new(Box::from(args)).unwrap() -} - -#[test] -#[ignore] -fn replica_stop_cont() { - generate_config(); - - common::truncate_file(DISKNAME1, DISKSIZE_KB); - - let mut ms = start_mayastor(CFGNAME1); - - test_init!(); + // get the handles if needed, to invoke methods to the containers + let mut hdls = test.grpc_handles().await.unwrap(); - Reactor::block_on(async { - create_nexus(true).await; - bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); - bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); - ms.sig_stop(); - let handle = thread::spawn(move || { - // Sufficiently long to cause a controller reset - // see NvmeBdevOpts::Defaults::timeout_us - thread::sleep(time::Duration::from_secs(3)); - ms.sig_cont(); - ms - }); - bdev_io::read_some(NXNAME, 0, 0xff) + // create and share a bdev on each container + for h in &mut hdls { + h.bdev.list(Null {}).await.unwrap(); + h.bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) .await - .expect_err("should fail read after controller reset"); - ms = handle.join().unwrap(); - bdev_io::read_some(NXNAME, 0, 0xff) + .unwrap(); + h.bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) .await - .expect("should read again after Nexus child continued"); - nexus_lookup(NXNAME).unwrap().destroy().await.unwrap(); - assert!(nexus_lookup(NXNAME).is_none()); - }); - - common::delete_file(&[DISKNAME1.to_string()]); -} - -#[test] -#[ignore] -fn replica_term() { - generate_config(); - - common::truncate_file(DISKNAME1, DISKSIZE_KB); - common::truncate_file(DISKNAME2, DISKSIZE_KB); - - let mut ms1 = start_mayastor(CFGNAME1); - let mut ms2 = start_mayastor(CFGNAME2); - // Allow Mayastor processes to start listening on NVMf port - thread::sleep(time::Duration::from_millis(250)); - - test_init!(); + .unwrap(); + } - Reactor::block_on(async { - create_nexus(false).await; - bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); - bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); - }); - ms1.sig_term(); - thread::sleep(time::Duration::from_secs(1)); - Reactor::block_on(async { - bdev_io::read_some(NXNAME, 0, 0xff) - .await - .expect("should read with 1 Nexus child terminated"); - }); - ms2.sig_term(); - thread::sleep(time::Duration::from_secs(1)); - Reactor::block_on(async { - bdev_io::read_some(NXNAME, 0, 0xff) + let mayastor = MayastorTest::new(MayastorCliArgs::default()); + + mayastor + .spawn(async move { + nexus_create( + NXNAME, + 1024 * 1024 * 50, + None, + &[ + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[1].endpoint.ip() + ), + ], + ) .await - .expect_err("should fail read with 2 Nexus children terminated"); + .unwrap(); + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + }) + .await; + + test.pause("ms1").await.unwrap(); + let mut ticker = tokio::time::interval(Duration::from_secs(1)); + for i in 1 .. 6 { + ticker.tick().await; + println!("waiting for the container to be fully suspended... {}/5", i); + } + + mayastor.send(async { + // we do not determine if the IO completed with an error or not just + // that it completes. + let _ = dbg!(bdev_io::read_some(NXNAME, 0, 0xff).await); + let _ = dbg!(bdev_io::read_some(NXNAME, 0, 0xff).await); }); - mayastor_env_stop(0); - common::delete_file(&[DISKNAME1.to_string(), DISKNAME2.to_string()]); -} + println!("IO submitted unfreezing container..."); -async fn create_nexus(single: bool) { - let mut ch = vec![ - "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:".to_string() - + &UUID1.to_string(), - ]; - if !single { - ch.push( - "nvmf://127.0.0.1:8431/nqn.2019-05.io.openebs:".to_string() - + &UUID2.to_string(), - ); + for i in 1 .. 6 { + ticker.tick().await; + println!("unfreeze delay... {}/5", i); } - - nexus_create(NXNAME, DISKSIZE_KB * 1024, None, &ch) - .await - .unwrap(); + test.thaw("ms1").await.unwrap(); + println!("container thawed"); + mayastor + .spawn(async { + let nexus = nexus_lookup(NXNAME).unwrap(); + nexus.destroy().await.unwrap(); + }) + .await; } diff --git a/mayastor/tests/reset.rs b/mayastor/tests/reset.rs index a28a15a09..2bae40148 100644 --- a/mayastor/tests/reset.rs +++ b/mayastor/tests/reset.rs @@ -1,102 +1,55 @@ -use common::ms_exec::MayastorProcess; -use mayastor::{ - bdev::nexus_create, - core::{ - mayastor_env_stop, - BdevHandle, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, - subsys, - subsys::Config, -}; +use mayastor::bdev::nexus_create; -pub mod common; - -static DISKNAME1: &str = "/tmp/disk1.img"; -static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; - -static DISKNAME2: &str = "/tmp/disk2.img"; -static BDEVNAME2: &str = "aio:///tmp/disk2.img?blk_size=512"; - -static UUID1: &str = "00000000-76b6-4fcf-864d-1027d4038756"; -static UUID2: &str = "11111111-76b6-4fcf-864d-1027d4038756"; - -fn generate_config() { - let mut config = Config::default(); - - let child1_bdev = subsys::BaseBdev { - uri: format!("{}&uuid={}", BDEVNAME1, UUID1), - }; - - let child2_bdev = subsys::BaseBdev { - uri: format!("{}&uuid={}", BDEVNAME2, UUID2), - }; - - config.base_bdevs = Some(vec![child1_bdev]); - config.implicit_share_base = true; - config.nexus_opts.iscsi_enable = false; - config.nexus_opts.nvmf_replica_port = 8430; - config.nexus_opts.nvmf_nexus_port = 8440; - config.write("/tmp/child1.yaml").unwrap(); - - config.base_bdevs = Some(vec![child2_bdev]); - config.nexus_opts.nvmf_replica_port = 8431; - config.nexus_opts.nvmf_nexus_port = 8441; - config.write("/tmp/child2.yaml").unwrap(); -} - -#[test] -fn nexus_reset_mirror() { - generate_config(); +use mayastor::core::{BdevHandle, MayastorCliArgs}; +use rpc::mayastor::{BdevShareRequest, BdevUri}; - common::truncate_file(DISKNAME1, 64 * 1024); - common::truncate_file(DISKNAME2, 64 * 1024); - - let args = vec![ - "-s".to_string(), - "128".to_string(), - "-y".to_string(), - "/tmp/child1.yaml".to_string(), - "-g".to_string(), - "127.0.0.1:10124".to_string(), - ]; - - let _ms1 = MayastorProcess::new(Box::from(args)).unwrap(); - - let args = vec![ - "-s".to_string(), - "128".to_string(), - "-y".to_string(), - "/tmp/child2.yaml".to_string(), - "-g".to_string(), - "127.0.0.1:10125".to_string(), - ]; - - let _ms2 = MayastorProcess::new(Box::from(args)).unwrap(); - - test_init!(); - - Reactor::block_on(async { - create_nexus().await; - reset().await; - }); - mayastor_env_stop(0); -} - -async fn create_nexus() { - let ch = vec![ - "nvmf://127.0.0.1:8431/nqn.2019-05.io.openebs:11111111-76b6-4fcf-864d-1027d4038756".to_string(), - "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:00000000-76b6-4fcf-864d-1027d4038756".into() - ]; +pub mod common; - nexus_create("reset_test", 64 * 1024 * 1024, None, &ch) +#[tokio::test] +async fn nexus_reset_mirror() { + let test = common::Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .add_container("ms2") + .add_container("ms1") + .with_clean(true) + .build() .await .unwrap(); -} -async fn reset() { - let bdev = BdevHandle::open("reset_test", true, true).unwrap(); - bdev.reset().await.unwrap(); + let mut hdls = test.grpc_handles().await.unwrap(); + + let mut children: Vec = Vec::new(); + for h in &mut hdls { + h.bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + children.push( + h.bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) + .await + .unwrap() + .into_inner() + .uri, + ) + } + let mayastor = common::MayastorTest::new(MayastorCliArgs::default()); + + // test the reset + mayastor + .spawn(async move { + nexus_create("reset_test", 1024 * 1024 * 50, None, &children) + .await + .unwrap(); + + let bdev = BdevHandle::open("reset_test", true, true).unwrap(); + bdev.reset().await.unwrap(); + }) + .await } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 867915a89..5498d9ff1 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -41,7 +41,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1m8097h48zz4d20gk9q1aw25548m2aqfxjlr6nck7chrqccvwr54"; + cargoSha256 = "0mr03cr6i1n5g4dx8v651rq3zblym71cb2vkvg5nqgb34li4br9j"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 0d90f995b..6fd2c5135 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -33,6 +33,7 @@ fn build_wrapper() { .compile("logwrapper"); cc::Build::new() .include("spdk/include") + .include(".") .file("nvme_helper.c") .compile("nvme_helper"); } From 16fec82988904503e5c58701cab092007ababf3e Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Thu, 15 Oct 2020 19:57:41 +0200 Subject: [PATCH 03/92] core: rework main.rs A lot of the "main" code is done with in the start closure of mayastor. This PR moves this into main.rs where it should be. The older .start() method is still present as they are used by tests but over time we can phase it out. The need for this - is mostly to avoid spawning runtimes during init --- mayastor/src/bin/casperf.rs | 60 +++++++++++++-------------- mayastor/src/bin/cli/context.rs | 7 +++- mayastor/src/bin/initiator.rs | 51 +++++++++++------------ mayastor/src/bin/main.rs | 43 +++++++++++++------ mayastor/src/core/env.rs | 46 ++++++++------------ mayastor/src/grpc/nexus_grpc.rs | 4 +- mayastor/src/subsys/mbus/mbus_nats.rs | 3 +- 7 files changed, 109 insertions(+), 105 deletions(-) diff --git a/mayastor/src/bin/casperf.rs b/mayastor/src/bin/casperf.rs index f9a27cd4f..be4b2d7c4 100644 --- a/mayastor/src/bin/casperf.rs +++ b/mayastor/src/bin/casperf.rs @@ -368,36 +368,34 @@ fn main() { args.reactor_mask = "0x2".to_string(); //args.grpc_endpoint = Some("0.0.0.0".to_string()); - let ms = MayastorEnvironment::new(args); - ms.start(move || { - sig_override(); - - Reactors::master().send_future(async move { - let jobs = uris - .iter_mut() - .map(|u| Job::new(u, io_size, qd)) - .collect::>(); - - for j in jobs { - let job = j.await; - let thread = - Mthread::new(job.bdev.name(), Cores::current()).unwrap(); - thread.msg(job, |job| { - job.run(); - }); - } + MayastorEnvironment::new(args).init(); + sig_override(); + Reactors::master().send_future(async move { + let jobs = uris + .iter_mut() + .map(|u| Job::new(u, io_size, qd)) + .collect::>(); + + for j in jobs { + let job = j.await; + let thread = + Mthread::new(job.bdev.name(), Cores::current()).unwrap(); + thread.msg(job, |job| { + job.run(); + }); + } - unsafe { - PERF_TICK.with(|p| { - *p.borrow_mut() = - NonNull::new(spdk_sys::spdk_poller_register( - Some(perf_tick), - std::ptr::null_mut(), - 1_000_000, - )) - }); - } - }); - }) - .unwrap(); + unsafe { + PERF_TICK.with(|p| { + *p.borrow_mut() = NonNull::new(spdk_sys::spdk_poller_register( + Some(perf_tick), + std::ptr::null_mut(), + 1_000_000, + )) + }); + } + }); + + Reactors::master().running(); + Reactors::master().poll_reactor(); } diff --git a/mayastor/src/bin/cli/context.rs b/mayastor/src/bin/cli/context.rs index 350462afa..36422e27f 100644 --- a/mayastor/src/bin/cli/context.rs +++ b/mayastor/src/bin/cli/context.rs @@ -74,8 +74,11 @@ impl Context { headers .iter() .map(|h| { - (if h.starts_with('>') { &h[1 ..] } else { h }) - .to_string() + if let Some(stripped) = h.strip_prefix('>') { + stripped.to_string() + } else { + h.to_string() + } }) .collect(), ); diff --git a/mayastor/src/bin/initiator.rs b/mayastor/src/bin/initiator.rs index 47642f9f3..4ffb909ac 100644 --- a/mayastor/src/bin/initiator.rs +++ b/mayastor/src/bin/initiator.rs @@ -180,33 +180,30 @@ fn main() { cfg.nexus_opts.nvmf_enable = false; cfg }); - ms.start(move || { - let fut = async move { - let res = if let Some(matches) = matches.subcommand_matches("read") - { - read(&uri, offset, matches.value_of("FILE").unwrap()).await - } else if let Some(matches) = matches.subcommand_matches("write") { - write(&uri, offset, matches.value_of("FILE").unwrap()).await - } else if matches.subcommand_matches("create-snapshot").is_some() { - create_snapshot(&uri).await - } else { - connect(&uri).await - }; - if let Err(err) = res { - error!("{}", err); - -1 - } else { - 0 - } - }; - Reactor::block_on(async move { - let rc = fut.await; - info!("{}", rc); - std::process::exit(rc); - }); + ms.init(); + let fut = async move { + let res = if let Some(matches) = matches.subcommand_matches("read") { + read(&uri, offset, matches.value_of("FILE").unwrap()).await + } else if let Some(matches) = matches.subcommand_matches("write") { + write(&uri, offset, matches.value_of("FILE").unwrap()).await + } else if matches.subcommand_matches("create-snapshot").is_some() { + create_snapshot(&uri).await + } else { + connect(&uri).await + }; + if let Err(err) = res { + error!("{}", err); + -1 + } else { + 0 + } + }; - mayastor_env_stop(0) - }) - .unwrap(); + Reactor::block_on(async move { + let rc = fut.await; + info!("{}", rc); + mayastor_env_stop(0); + std::process::exit(rc); + }); } diff --git a/mayastor/src/bin/main.rs b/mayastor/src/bin/main.rs index b3f16f387..656442b73 100644 --- a/mayastor/src/bin/main.rs +++ b/mayastor/src/bin/main.rs @@ -1,21 +1,26 @@ #[macro_use] extern crate tracing; -use std::path::Path; - -use structopt::StructOpt; - +use futures::FutureExt; use mayastor::{ bdev::util::uring, - core::{MayastorCliArgs, MayastorEnvironment}, + core::{MayastorCliArgs, MayastorEnvironment, Reactors}, + grpc, logger, + subsys, }; - +use std::path::Path; +use structopt::StructOpt; mayastor::CPS_INIT!(); - -fn main() -> Result<(), std::io::Error> { +fn main() -> Result<(), Box> { let args = MayastorCliArgs::from_args(); + let mut rt = tokio::runtime::Builder::new() + .basic_scheduler() + .enable_all() + .build() + .unwrap(); + // setup our logger first if -L is passed, raise the log level // automatically. trace maps to debug at FFI level. If RUST_LOG is // passed, we will use it regardless. @@ -44,10 +49,22 @@ fn main() -> Result<(), std::io::Error> { if uring_supported { "yes" } else { "no" } ); info!("free_pages: {} nr_pages: {}", free_pages, nr_pages); - let env = MayastorEnvironment::new(args); - env.start(|| { - info!("Mayastor started {} ...", '\u{1F680}'); - }) - .unwrap(); + + let grpc_endpoint = grpc::endpoint(args.grpc_endpoint.clone()); + + let ms = rt.enter(|| MayastorEnvironment::new(args).init()); + + let master = Reactors::master(); + master.send_future(async { info!("Mayastor started {} ...", '\u{1F680}') }); + let mut futures = Vec::new(); + + futures.push(master.boxed_local()); + futures.push(subsys::Registration::run().boxed_local()); + futures.push(grpc::MayastorGrpcServer::run(grpc_endpoint).boxed_local()); + + rt.block_on(futures::future::try_join_all(futures)) + .expect_err("reactor exit in abnormal state"); + + ms.fini(); Ok(()) } diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index c5ab7ee7b..74f9d06fd 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -17,7 +17,7 @@ use futures::{channel::oneshot, future}; use once_cell::sync::{Lazy, OnceCell}; use snafu::Snafu; use structopt::StructOpt; -use tokio::{runtime::Builder, task}; +use tokio::runtime::Builder; use spdk_sys::{ maya_log, @@ -671,11 +671,6 @@ impl MayastorEnvironment { // bootstrap DPDK and its magic self.initialize_eal(); - if self.enable_coredump { - //TODO - warn!("rlimit configuration not implemented"); - } - info!( "Total number of cores available: {}", Cores::count().into_iter().count() @@ -733,7 +728,7 @@ impl MayastorEnvironment { } // finalize our environment - fn fini() { + pub fn fini(&self) { unsafe { spdk_trace_cleanup(); spdk_thread_lib_fini(); @@ -749,7 +744,7 @@ impl MayastorEnvironment { { type FutureResult = Result<(), ()>; let grpc_endpoint = self.grpc_endpoint; - self.init(); + let ms = self.init(); let mut rt = Builder::new() .basic_scheduler() @@ -757,27 +752,22 @@ impl MayastorEnvironment { .build() .unwrap(); - let local = task::LocalSet::new(); rt.block_on(async { - local - .run_until(async { - let master = Reactors::current(); - master.send_future(async { f() }); - let mut futures: Vec< - Pin>>, - > = Vec::new(); - if let Some(grpc_endpoint) = grpc_endpoint { - futures.push(Box::pin(grpc::MayastorGrpcServer::run( - grpc_endpoint, - ))); - } - futures.push(Box::pin(subsys::Registration::run())); - futures.push(Box::pin(master)); - let _out = future::try_join_all(futures).await; - info!("reactors stopped"); - Self::fini(); - }) - .await + let master = Reactors::current(); + master.send_future(async { f() }); + let mut futures: Vec< + Pin>>, + > = Vec::new(); + if let Some(grpc_endpoint) = grpc_endpoint { + futures.push(Box::pin(grpc::MayastorGrpcServer::run( + grpc_endpoint, + ))); + } + futures.push(Box::pin(subsys::Registration::run())); + futures.push(Box::pin(master)); + let _out = future::try_join_all(futures).await; + info!("reactors stopped"); + ms.fini(); }); Ok(*GLOBAL_RC.lock().unwrap()) diff --git a/mayastor/src/grpc/nexus_grpc.rs b/mayastor/src/grpc/nexus_grpc.rs index ba4b0c2dc..f45cbd6cf 100644 --- a/mayastor/src/grpc/nexus_grpc.rs +++ b/mayastor/src/grpc/nexus_grpc.rs @@ -80,8 +80,8 @@ impl Nexus { /// unconventional name that likely means it was not created using nexus /// rpc api, we return the whole name without modifications as it is. fn name_to_uuid(name: &str) -> &str { - if name.starts_with("nexus-") { - &name[6 ..] + if let Some(stripped) = name.strip_prefix("nexus-") { + stripped } else { name } diff --git a/mayastor/src/subsys/mbus/mbus_nats.rs b/mayastor/src/subsys/mbus/mbus_nats.rs index 4ef904164..2aa5c2c34 100644 --- a/mayastor/src/subsys/mbus/mbus_nats.rs +++ b/mayastor/src/subsys/mbus/mbus_nats.rs @@ -12,8 +12,7 @@ pub(super) static NATS_MSG_BUS: OnceCell = OnceCell::new(); pub(super) fn message_bus_init(server: String) { NATS_MSG_BUS.get_or_init(|| { // Waits for the message bus to become ready - tokio::runtime::Runtime::new() - .unwrap() + tokio::runtime::Handle::current() .block_on(async { NatsMessageBus::new(&server).await }) }); } From e9950472c679d81c6ed8142de96d37b8ca76a6a9 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 12 Oct 2020 15:20:48 +0100 Subject: [PATCH 04/92] Add message bus library Common api to be used by both mayastor and rust control plane services. The Message trait is expected to be used to easily publish/request messages with default channel and message bus. The Publish/Request traits are more flexible to allow the less typical use of the non default channel and specify a particular message bus. Added traits can be implemented for other types by using exported macros All the different message request/replies are kept in this library for simplicity. No timeout/retry mechanism is provided yet so request/reply may hang if the receiver does not reply back... --- Cargo.lock | 46 +++ Cargo.toml | 1 + csi/moac/mbus.js | 13 +- csi/moac/nats.js | 67 ++--- csi/moac/test/nats_test.js | 21 +- mayastor-test/test_nats.js | 15 +- mayastor/Cargo.toml | 2 + mayastor/src/core/env.rs | 18 +- mayastor/src/subsys/mbus/mod.rs | 64 +---- mayastor/src/subsys/mbus/registration.rs | 37 +-- mayastor/src/subsys/mod.rs | 1 - mbus-api/Cargo.toml | 22 ++ mbus-api/examples/client/main.rs | 84 ++++++ mbus-api/examples/server/main.rs | 125 ++++++++ mbus-api/src/lib.rs | 242 ++++++++++++++++ .../subsys/mbus => mbus-api/src}/mbus_nats.rs | 84 +++--- mbus-api/src/receive.rs | 161 +++++++++++ mbus-api/src/send.rs | 266 ++++++++++++++++++ 18 files changed, 1093 insertions(+), 176 deletions(-) create mode 100644 mbus-api/Cargo.toml create mode 100644 mbus-api/examples/client/main.rs create mode 100644 mbus-api/examples/server/main.rs create mode 100644 mbus-api/src/lib.rs rename {mayastor/src/subsys/mbus => mbus-api/src}/mbus_nats.rs (50%) create mode 100644 mbus-api/src/receive.rs create mode 100644 mbus-api/src/send.rs diff --git a/Cargo.lock b/Cargo.lock index fe6170c3e..0b855205e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -960,6 +960,33 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.44", +] + +[[package]] +name = "dyn-clone" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" + [[package]] name = "ed25519" version = "1.0.0-pre.1" @@ -1719,6 +1746,7 @@ dependencies = [ "jsonrpc", "libc", "log", + "mbus_api", "nats", "nix 0.16.1", "once_cell", @@ -1757,6 +1785,24 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "mbus_api" +version = "0.1.0" +dependencies = [ + "async-trait", + "dyn-clonable", + "env_logger", + "log", + "nats", + "once_cell", + "serde", + "serde_json", + "smol", + "snafu", + "structopt", + "tokio", +] + [[package]] name = "memchr" version = "2.3.3" diff --git a/Cargo.toml b/Cargo.toml index 09195831d..937a2731f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,4 +14,5 @@ members = [ "nvmeadm", "rpc", "sysfs", + "mbus-api" ] diff --git a/csi/moac/mbus.js b/csi/moac/mbus.js index 13daf71db..a00e2a0f1 100755 --- a/csi/moac/mbus.js +++ b/csi/moac/mbus.js @@ -51,17 +51,26 @@ const opts = yargs const nc = nats.connect(opts.s); nc.on('connect', () => { if (opts._[0] === 'register') { - nc.publish('register', JSON.stringify({ + nc.publish('registry', JSON.stringify({ + id: "register", + sender: "moac", + data: { id: opts.node, grpcEndpoint: opts.grpc + } })); } else if (opts._[0] === 'deregister') { - nc.publish('deregister', JSON.stringify({ + nc.publish('registry', JSON.stringify({ + id: "deregister", + sender: "moac", + data: { id: opts.node + } })); } else if (opts._[0] === 'raw') { nc.publish(opts.name, opts.payload); } + nc.flush(); nc.close(); process.exit(0); }); diff --git a/csi/moac/nats.js b/csi/moac/nats.js index cfcaa5644..154b1b240 100644 --- a/csi/moac/nats.js +++ b/csi/moac/nats.js @@ -92,46 +92,49 @@ class MessageBus { } } + _registrationReceived (data) { + const ep = data.grpcEndpoint; + if (typeof ep !== 'string' || ep.length === 0) { + log.error('Invalid grpc endpoint in registration message'); + return; + } + const id = data.id; + if (typeof id !== 'string' || id.length === 0) { + log.error('Invalid node name in registration message'); + return; + } + log.trace(`"${id}" with "${ep}" requested registration`); + this.registry.addNode(id, ep); + } + _deregistrationReceived (data) { + const id = data.id; + if (typeof id !== 'string' || id.length === 0) { + log.error('Invalid node name in deregistration message'); + return; + } + log.trace(`"${id}" requested deregistration`); + this.registry.removeNode(id); + } + _subscribe () { - this.nc.subscribe('register', (err, msg) => { + this.nc.subscribe('registry', (err, msg) => { if (err) { - log.error(`Error receiving a registration message: ${err}`); - return; - } - const data = this._parsePayload(msg); - if (!data) { - return; - } - const ep = data.grpcEndpoint; - if (typeof ep !== 'string' || ep.length === 0) { - log.error('Invalid grpc endpoint in registration message'); + log.error(`Error receiving a registry message: ${err}`); return; } - const id = data.id; - if (typeof id !== 'string' || id.length === 0) { - log.error('Invalid node name in registration message'); + const payload = this._parsePayload(msg); + if (!payload) { return; } - log.trace(`"${id}" with "${ep}" requested registration`); - this.registry.addNode(id, ep); - }); - this.nc.subscribe('deregister', (err, msg) => { - if (err) { - log.error(`Error receiving a deregistration message: ${err}`); - return; - } - const data = this._parsePayload(msg); - if (!data) { - return; - } - const id = data.id; - if (typeof id !== 'string' || id.length === 0) { - log.error('Invalid node name in deregistration message'); - return; + if (payload.id == "register") { + this._registrationReceived(payload.data); + } else if (payload.id == "deregister") { + this._deregistrationReceived(payload.data); + } else { + const id = payload.id; + log.error(`Unknown registry message: ${id}`); } - log.trace(`"${id}" requested deregistration`); - this.registry.removeNode(id); }); } } diff --git a/csi/moac/test/nats_test.js b/csi/moac/test/nats_test.js index 388bc5e0e..cbce5f69f 100644 --- a/csi/moac/test/nats_test.js +++ b/csi/moac/test/nats_test.js @@ -90,9 +90,9 @@ module.exports = function () { }); it('should register a node', async () => { - nc.publish('register', JSON.stringify({ - id: NODE_NAME, - grpcEndpoint: GRPC_ENDPOINT + nc.publish('registry', JSON.stringify({ + id: 'register', + data: { id: NODE_NAME, grpcEndpoint: GRPC_ENDPOINT } })); await waitUntil(async () => { return registry.getNode(NODE_NAME); @@ -103,16 +103,18 @@ module.exports = function () { }); it('should ignore register request with missing node name', async () => { - nc.publish('register', JSON.stringify({ - grpcEndpoint: GRPC_ENDPOINT + nc.publish('registry', JSON.stringify({ + id: 'register', + data: { grpcEndpoint: GRPC_ENDPOINT } })); // small delay to wait for a possible crash of moac await sleep(10); }); it('should ignore register request with missing grpc endpoint', async () => { - nc.publish('register', JSON.stringify({ - id: NODE_NAME + nc.publish('registry', JSON.stringify({ + id: 'register', + data: { id: NODE_NAME } })); // small delay to wait for a possible crash of moac await sleep(10); @@ -125,8 +127,9 @@ module.exports = function () { }); it('should deregister a node', async () => { - nc.publish('deregister', JSON.stringify({ - id: NODE_NAME + nc.publish('registry', JSON.stringify({ + id: 'deregister', + data: { id: NODE_NAME } })); await waitUntil(async () => { return !registry.getNode(NODE_NAME); diff --git a/mayastor-test/test_nats.js b/mayastor-test/test_nats.js index 9fbb65c3d..89f73e0dc 100644 --- a/mayastor-test/test_nats.js +++ b/mayastor-test/test_nats.js @@ -6,6 +6,7 @@ const assert = require('chai').assert; const { spawn } = require('child_process'); const common = require('./test_common'); const nats = require('nats'); +const util = require('util') const HB_INTERVAL = 1; const NATS_PORT = 14222; @@ -52,7 +53,8 @@ function stopNats (done) { } function assertRegisterMessage (msg) { - const args = JSON.parse(msg); + assert(JSON.parse(msg).id == "register" ); + const args = JSON.parse(msg).data; assert.hasAllKeys(args, ['id', 'grpcEndpoint']); assert.strictEqual(args.id, NODE_NAME); assert.strictEqual(args.grpcEndpoint, common.grpcEndpoint); @@ -89,7 +91,7 @@ describe('nats', function () { MAYASTOR_HB_INTERVAL: HB_INTERVAL }); // wait for the register message - const sid = client.subscribe('register', (msg) => { + const sid = client.subscribe('registry', (msg) => { client.unsubscribe(sid); assertRegisterMessage(msg); done(); @@ -98,7 +100,7 @@ describe('nats', function () { }); it('should keep sending registration messages', (done) => { - const sid = client.subscribe('register', (msg) => { + const sid = client.subscribe('registry', (msg) => { client.unsubscribe(sid); assertRegisterMessage(msg); done(); @@ -109,7 +111,7 @@ describe('nats', function () { // simulate outage of NATS server for a duration of two heartbeats stopNats(() => { setTimeout(() => { - const sid = client.subscribe('register', (msg) => { + const sid = client.subscribe('registry', (msg) => { client.unsubscribe(sid); assertRegisterMessage(msg); done(); @@ -122,9 +124,10 @@ describe('nats', function () { }); it('should send a deregistration message when mayastor is shut down', (done) => { - const sid = client.subscribe('deregister', (msg) => { + const sid = client.subscribe('registry', (msg) => { client.unsubscribe(sid); - const args = JSON.parse(msg); + assert(JSON.parse(msg).id == "deregister" ); + const args = JSON.parse(msg).data; assert.hasAllKeys(args, ['id']); assert.strictEqual(args.id, NODE_NAME); done(); diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index 45f5d505a..f6f1d6693 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -79,6 +79,8 @@ smol = "1.0.0" dns-lookup = "1.0.4" ipnetwork = "0.17.0" bollard = "0.8.0" +mbus_api = { path = "../mbus-api" } + [dependencies.rpc] path = "../rpc" diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 74f9d06fd..94b610cb6 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -14,6 +14,7 @@ use std::{ use byte_unit::{Byte, ByteUnit}; use futures::{channel::oneshot, future}; +use mbus_api::{ConfigGetCurrent, Message, ReplyConfig}; use once_cell::sync::{Lazy, OnceCell}; use snafu::Snafu; use structopt::StructOpt; @@ -601,7 +602,7 @@ impl MayastorEnvironment { } } - /// start the JSON rpc server which listens only to a local path + /// start the JSON rpc server which listens only to a local path extern "C" fn start_rpc(rc: i32, arg: *mut c_void) { let ctx = unsafe { Box::from_raw(arg as *mut SubsystemCtx) }; @@ -639,6 +640,21 @@ impl MayastorEnvironment { cfg.apply(); } + #[allow(dead_code)] + async fn get_service_config(&self) -> ReplyConfig { + if self.mbus_endpoint.is_some() { + ConfigGetCurrent { + kind: mbus_api::Config::MayastorConfig, + } + .request() + .await + // we need the library to be able to retry + .unwrap() + } else { + Default::default() + } + } + // load the child status file fn load_child_status(&self) { ChildStatusConfig::get_or_init(|| { diff --git a/mayastor/src/subsys/mbus/mod.rs b/mayastor/src/subsys/mbus/mod.rs index 8be611238..09ecc8fc5 100644 --- a/mayastor/src/subsys/mbus/mod.rs +++ b/mayastor/src/subsys/mbus/mod.rs @@ -5,16 +5,11 @@ //! A Registration subsystem is used to keep moac in the loop //! about the lifecycle of mayastor instances. -pub mod mbus_nats; pub mod registration; use crate::core::MayastorEnvironment; -use async_trait::async_trait; use dns_lookup::{lookup_addr, lookup_host}; -use mbus_nats::NATS_MSG_BUS; use registration::Registration; -use serde::Serialize; -use smol::io; use spdk_sys::{ spdk_add_subsystem, spdk_subsystem, @@ -95,65 +90,8 @@ impl MessageBusSubsystem { } } -/// Available Message Bus channels -pub enum Channel { - /// Registration of mayastor with the control plane - Register, - /// DeRegistration of mayastor with the control plane - DeRegister, -} - -impl std::fmt::Display for Channel { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match *self { - Channel::Register => write!(f, "register"), - Channel::DeRegister => write!(f, "deregister"), - } - } -} - -#[async_trait] -pub trait MessageBus { - /// publish a message - not guaranteed to be sent or received (fire and - /// forget) - async fn publish( - &self, - channel: Channel, - message: impl Serialize - + std::marker::Send - + std::marker::Sync - + 'async_trait, - ) -> std::io::Result<()>; - /// Send a message and wait for it to be received by the target component - async fn send( - &self, - channel: Channel, - message: impl Serialize - + std::marker::Send - + std::marker::Sync - + 'async_trait, - ) -> Result<(), ()>; - /// Send a message and request a reply from the target component - async fn request( - &self, - channel: Channel, - message: impl Serialize - + std::marker::Send - + std::marker::Sync - + 'async_trait, - ) -> Result, ()>; - /// Flush queued messages to the server - async fn flush(&self) -> io::Result<()>; -} - pub fn message_bus_init() { if let Some(nats) = MayastorEnvironment::global_or_default().mbus_endpoint { - mbus_nats::message_bus_init(nats); + mbus_api::message_bus_init_tokio(nats); } } - -pub fn message_bus() -> &'static impl MessageBus { - NATS_MSG_BUS - .get() - .expect("Should be initialised before use") -} diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs index 213384c7c..538388927 100644 --- a/mayastor/src/subsys/mbus/registration.rs +++ b/mayastor/src/subsys/mbus/registration.rs @@ -7,11 +7,9 @@ //! but can be overridden by the `MAYASTOR_HB_INTERVAL` environment variable. //! containing the node name and the grpc endpoint. -use super::MessageBus; -use crate::subsys::mbus::Channel; use futures::{select, FutureExt, StreamExt}; +use mbus_api::*; use once_cell::sync::OnceCell; -use serde::{Deserialize, Serialize}; use snafu::Snafu; use std::{env, time::Duration}; @@ -45,20 +43,6 @@ pub enum Error { QueueDeregister { cause: std::io::Error }, } -/// Register message payload -#[derive(Serialize, Deserialize, Debug)] -struct RegisterArgs { - id: String, - #[serde(rename = "grpcEndpoint")] - grpc_endpoint: String, -} - -/// Deregister message payload -#[derive(Serialize, Deserialize, Debug)] -struct DeregisterArgs { - id: String, -} - #[derive(Clone)] struct Configuration { /// Name of the node that mayastor is running on @@ -156,12 +140,13 @@ impl Registration { /// Send a register message to the MessageBus. async fn register(&self) -> Result<(), Error> { - let payload = RegisterArgs { + let payload = Register { id: self.config.node.clone(), grpc_endpoint: self.config.grpc_endpoint.clone(), }; - super::message_bus() - .publish(Channel::Register, &payload) + + payload + .publish() .await .map_err(|cause| Error::QueueRegister { cause, @@ -180,16 +165,18 @@ impl Registration { /// Send a deregister message to the MessageBus. async fn deregister(&self) -> Result<(), Error> { - let payload = DeregisterArgs { + let payload = Deregister { id: self.config.node.clone(), }; - super::message_bus() - .publish(Channel::DeRegister, &payload) + + payload + .publish() .await - .map_err(|cause| Error::QueueDeregister { + .map_err(|cause| Error::QueueRegister { cause, })?; - if let Err(e) = super::message_bus().flush().await { + + if let Err(e) = bus().flush().await { error!("Failed to explicitly flush: {}", e); } diff --git a/mayastor/src/subsys/mod.rs b/mayastor/src/subsys/mod.rs index 7210dd270..fc298c14d 100644 --- a/mayastor/src/subsys/mod.rs +++ b/mayastor/src/subsys/mod.rs @@ -28,7 +28,6 @@ pub use mbus::{ mbus_endpoint, message_bus_init, registration::Registration, - MessageBus, MessageBusSubsystem, }; diff --git a/mbus-api/Cargo.toml b/mbus-api/Cargo.toml new file mode 100644 index 000000000..dd67505d6 --- /dev/null +++ b/mbus-api/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "mbus_api" +version = "0.1.0" +authors = ["Tiago Castro "] +edition = "2018" + +[dependencies] +nats = "0.8" +structopt = "0.3.15" +log = "0.4.11" +tokio = { version = "0.2", features = ["full"] } +env_logger = "0.7" +serde_json = "1.0" +async-trait = "0.1.36" +dyn-clonable = "0.9.0" +smol = "1.0.0" +once_cell = "1.4.1" +snafu = "0.6" + +[dependencies.serde] +features = ["derive"] +version = "1.0" \ No newline at end of file diff --git a/mbus-api/examples/client/main.rs b/mbus-api/examples/client/main.rs new file mode 100644 index 000000000..c17ce0082 --- /dev/null +++ b/mbus-api/examples/client/main.rs @@ -0,0 +1,84 @@ +use log::info; +use mbus_api::{Message, *}; +use serde::{Deserialize, Serialize}; +use structopt::StructOpt; +use tokio::stream::StreamExt; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + url: String, + + /// Channel to send to + #[structopt(long, short, default_value = "default")] + channel: Channel, + + /// With server in this binary + #[structopt(long, short)] + server: bool, +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +struct DummyRequest {} + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +struct DummyReply { + name: String, +} + +// note: in this example we use the default message id +// because we're adding the message types outside of the +// library which should not be done so we have to fake +// out the message id as `Default`. +bus_impl_message_all!(DummyRequest, Default, DummyReply, Default); + +async fn start_server_side() { + let cli_args = CliArgs::from_args(); + + let mut sub = bus().subscribe(cli_args.channel).await.unwrap(); + + tokio::spawn(async move { + // server side + let mut count = 1; + loop { + let message = &sub.next().await.unwrap(); + let message: ReceivedRawMessage = message.into(); + message + .respond(DummyReply { + name: format!("example {}", count), + }) + .await + .unwrap(); + count += 1; + } + }); +} + +#[tokio::main] +async fn main() { + env_logger::init_from_env( + env_logger::Env::default() + .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + let cli_args = CliArgs::from_args(); + message_bus_init(cli_args.url).await; + + if cli_args.server { + // server side needs to subscribe first, unless a streaming model is + // used + start_server_side().await; + } + + let reply = DummyRequest {}.request().await.unwrap(); + info!("Received reply: {:?}", reply); + + // We can also use the following api to specify a different channel and bus + let reply = + DummyRequest::Request(&DummyRequest {}, Channel::Default, bus()) + .await + .unwrap(); + info!("Received reply: {:?}", reply); +} diff --git a/mbus-api/examples/server/main.rs b/mbus-api/examples/server/main.rs new file mode 100644 index 000000000..802eda80d --- /dev/null +++ b/mbus-api/examples/server/main.rs @@ -0,0 +1,125 @@ +use mbus_api::*; +use serde::{Deserialize, Serialize}; +use std::{convert::TryInto, str::FromStr}; +use structopt::StructOpt; +use tokio::stream::StreamExt; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + url: String, + + /// Channel to listen on + #[structopt(long, short, default_value = "default")] + channel: Channel, + + /// Receiver version + #[structopt(long, short, default_value = "1")] + version: Version, +} + +#[derive(Clone, Debug)] +enum Version { + V1, + V2, + V3, +} + +impl FromStr for Version { + type Err = String; + + fn from_str(source: &str) -> Result { + match source { + "1" => Ok(Self::V1), + "2" => Ok(Self::V2), + "3" => Ok(Self::V3), + _ => Err(format!("Could not parse the version: {}", source)), + } + } +} + +impl Default for Version { + fn default() -> Self { + Version::V1 + } +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +struct DummyRequest {} + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +struct DummyReply { + name: String, +} + +// note: in this example we use the default message id +// because we're adding the message types outside of the +// library which should not be done so we have to fake +// out the message id as `Default`. +bus_impl_message_all!(DummyRequest, Default, DummyReply, Default); + +#[tokio::main] +async fn main() { + env_logger::init_from_env( + env_logger::Env::default() + .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + let cli_args = CliArgs::from_args(); + message_bus_init(cli_args.url).await; + + let mut sub = bus().subscribe(cli_args.channel).await.unwrap(); + + let mut count = 1; + loop { + match cli_args.version { + Version::V1 => receive_v1(&mut sub, count).await, + Version::V2 => receive_v2(&mut sub, count).await, + Version::V3 => receive_v3(&mut sub, count).await, + } + count += 1; + } +} + +async fn receive_v1(sub: &mut nats::asynk::Subscription, count: u64) { + let message = &sub.next().await.unwrap(); + let message: ReceivedRawMessage = message.into(); + // notice that there is no type validation until we + // use something like: + // let data: DummyRequest = message.payload().unwrap(); + message + .respond(DummyReply { + name: format!("example {}", count), + }) + .await + .unwrap(); +} + +async fn receive_v2(sub: &mut nats::asynk::Subscription, count: u64) { + let message = &sub.next().await.unwrap(); + // notice that try_into can fail if the received type does not + // match the received message + let message: ReceivedMessage = + message.try_into().unwrap(); + message + .reply(DummyReply { + name: format!("example {}", count), + }) + .await + .unwrap(); +} + +async fn receive_v3(sub: &mut nats::asynk::Subscription, count: u64) { + let message = &sub.next().await.unwrap(); + let message: ReceivedMessage = + message.try_into().unwrap(); + message + // same function can receive an error + .reply(Err(Error::WithMessage { + message: format!("Fake Error {}", count), + })) + .await + .unwrap(); +} diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs new file mode 100644 index 000000000..2c748d2c0 --- /dev/null +++ b/mbus-api/src/lib.rs @@ -0,0 +1,242 @@ +#![warn(missing_docs)] +//! All the different messages which can be sent/received to/from the control +//! plane services and mayastor +//! We could split these out further into categories when they start to grow + +mod mbus_nats; +/// received message traits +pub mod receive; +/// send messages traits +pub mod send; + +use async_trait::async_trait; +use dyn_clonable::clonable; +pub use mbus_nats::{bus, message_bus_init, message_bus_init_tokio}; +pub use receive::*; +pub use send::*; +use serde::{Deserialize, Serialize}; +use smol::io; +use snafu::Snafu; +use std::{fmt::Debug, marker::PhantomData, str::FromStr}; + +/// Available Message Bus channels +#[derive(Clone, Debug)] +pub enum Channel { + /// Default + Default, + /// Registration of mayastor instances with the control plane + Registry, + /// Keep it In Sync Service + Kiiss, + /// Reply to requested Channel + Reply(String), +} + +impl FromStr for Channel { + type Err = String; + + fn from_str(source: &str) -> Result { + match source { + "default" => Ok(Self::Default), + "registry" => Ok(Self::Registry), + "kiiss" => Ok(Self::Kiiss), + _ => Err(format!("Could not parse the channel: {}", source)), + } + } +} + +impl Default for Channel { + fn default() -> Self { + Channel::Default + } +} + +impl std::fmt::Display for Channel { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Channel::Default => write!(f, "default"), + Channel::Registry => write!(f, "registry"), + Channel::Kiiss => write!(f, "kiiss"), + Channel::Reply(ch) => write!(f, "{}", ch), + } + } +} + +/// Message id which uniquely identifies every type of unsolicited message +/// The solicited (replies) message do not currently carry an id as they +/// are sent to a specific requested channel +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub enum MessageId { + /// Default + Default, + /// Update Config + ConfigUpdate, + /// Request current Config + ConfigGetCurrent, + /// Register mayastor + Register, + /// Deregister mayastor + Deregister, +} + +/// Sender identification (eg which mayastor instance sent the message) +pub type SenderId = String; + +/// Mayastor configurations +/// Currently, we have the global mayastor config and the child states config +#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] +pub enum Config { + /// Mayastor global config + MayastorConfig, + /// Mayastor child states config + ChildStatesConfig, +} +impl Default for Config { + fn default() -> Self { + Config::MayastorConfig + } +} + +/// Config Messages + +/// Update mayastor configuration +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct ConfigUpdate { + /// type of config being updated + pub kind: Config, + /// actual config data + pub data: Vec, +} +bus_impl_message_all!(ConfigUpdate, ConfigUpdate, (), Kiiss); + +/// Request message configuration used by mayastor to request configuration +/// from a control plane service +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct ConfigGetCurrent { + /// type of config requested + pub kind: Config, +} +/// Reply message configuration returned by a controle plane service to mayastor +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct ReplyConfig { + /// config data + pub config: Vec, +} +bus_impl_message_all!( + ConfigGetCurrent, + ConfigGetCurrent, + ReplyConfig, + Kiiss, + GetConfig +); + +/// Registration + +/// Register message payload +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct Register { + /// id of the mayastor instance + pub id: String, + /// grpc_endpoint of the mayastor instance + #[serde(rename = "grpcEndpoint")] + pub grpc_endpoint: String, +} +bus_impl_message_all!(Register, Register, (), Registry); + +/// Deregister message payload +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct Deregister { + /// id of the mayastor instance + pub id: String, +} +bus_impl_message_all!(Deregister, Deregister, (), Registry); + +/// This trait defines all Bus Messages which must: +/// 1 - be uniquely identifiable via MessageId +/// 2 - have a default Channel on which they are sent/received +#[async_trait(?Send)] +pub trait Message { + /// type which is sent back in response to a request + type Reply; + + /// identification of this object according to the `MessageId` + fn id(&self) -> MessageId; + /// default channel where this object is sent to + fn channel(&self) -> Channel; + + /// publish a message with no delivery guarantees + async fn publish(&self) -> io::Result<()>; + /// publish a message with a request for a `Self::Reply` reply + async fn request(&self) -> io::Result; +} + +/// The preamble is used to peek into messages so allowing for them to be routed +/// by their identifier +#[derive(Serialize, Deserialize, Debug)] +struct Preamble { + pub(crate) id: MessageId, +} + +/// Unsolicited (send) messages carry the message identifier, the sender +/// identifier and finally the message payload itself +#[derive(Serialize, Deserialize)] +struct SendPayload { + pub(crate) id: MessageId, + pub(crate) sender: SenderId, + pub(crate) data: T, +} + +/// Error type which is returned over the bus +/// todo: Use this Error not just for the "transport" but also +/// for any other operation +#[derive(Serialize, Deserialize, Debug, Snafu)] +#[allow(missing_docs)] +pub enum Error { + #[snafu(display("Generic Failure, message={}", message))] + WithMessage { message: String }, + #[snafu(display("Ill formed request when deserializing the request"))] + InvalidFormat, +} + +/// Payload returned to the sender +/// Includes an error as the operations may be fallible +#[derive(Serialize, Deserialize)] +pub struct ReplyPayload(pub Result); + +// todo: implement thin wrappers on these +/// MessageBus raw Message +pub type BusMessage = nats::asynk::Message; +/// MessageBus subscription +pub type BusSubscription = nats::asynk::Subscription; +/// MessageBus configuration options +pub type BusOptions = nats::Options; +/// Save on typing +pub type DynBus = Box; + +/// Messaging Bus trait with "generic" publish and request/reply semantics +#[async_trait] +#[clonable] +pub trait Bus: Clone + Send + Sync { + /// publish a message - not guaranteed to be sent or received (fire and + /// forget) + async fn publish( + &self, + channel: Channel, + message: &[u8], + ) -> std::io::Result<()>; + /// Send a message and wait for it to be received by the target component + async fn send(&self, channel: Channel, message: &[u8]) -> io::Result<()>; + /// Send a message and request a reply from the target component + async fn request( + &self, + channel: Channel, + message: &[u8], + ) -> io::Result; + /// Flush queued messages to the server + async fn flush(&self) -> io::Result<()>; + /// Create a subscription on the given channel which can be + /// polled for messages until it is either explicitly closed or + /// when the bus is closed + async fn subscribe(&self, channel: Channel) -> io::Result; +} diff --git a/mayastor/src/subsys/mbus/mbus_nats.rs b/mbus-api/src/mbus_nats.rs similarity index 50% rename from mayastor/src/subsys/mbus/mbus_nats.rs rename to mbus-api/src/mbus_nats.rs index 2aa5c2c34..833a7c088 100644 --- a/mayastor/src/subsys/mbus/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -1,24 +1,42 @@ -//! NATS implementation of the `MessageBus` connecting mayastor to the control -//! plane components. - -use super::{Channel, MessageBus}; -use async_trait::async_trait; +use super::*; +use log::{info, warn}; use nats::asynk::Connection; use once_cell::sync::OnceCell; -use serde::Serialize; use smol::io; -pub(super) static NATS_MSG_BUS: OnceCell = OnceCell::new(); -pub(super) fn message_bus_init(server: String) { +static NATS_MSG_BUS: OnceCell = OnceCell::new(); +/// Initialise the Nats Message Bus with the current tokio runtime +/// (the runtime MUST be setup already or we will panic) +pub fn message_bus_init_tokio(server: String) { NATS_MSG_BUS.get_or_init(|| { // Waits for the message bus to become ready - tokio::runtime::Handle::current() - .block_on(async { NatsMessageBus::new(&server).await }) + tokio::runtime::Handle::current().block_on(async { + NatsMessageBus::new(&server, BusOptions::new()).await + }) }); } +/// Initialise the Nats Message Bus +pub async fn message_bus_init(server: String) { + let nc = NatsMessageBus::new(&server, BusOptions::new()).await; + NATS_MSG_BUS + .set(nc) + .ok() + .expect("Expect to be initialised only once"); +} + +/// Get the static `NatsMessageBus` as a boxed `MessageBus` +pub fn bus() -> DynBus { + Box::new( + NATS_MSG_BUS + .get() + .expect("Should be initialised before use") + .clone(), + ) +} // Would we want to have both sync and async clients? -pub struct NatsMessageBus { +#[derive(Clone)] +struct NatsMessageBus { connection: Connection, } impl NatsMessageBus { @@ -29,7 +47,11 @@ impl NatsMessageBus { let interval = std::time::Duration::from_millis(500); let mut log_error = true; loop { - match nats::asynk::connect(server).await { + match BusOptions::new() + .max_reconnects(None) + .connect_async(server) + .await + { Ok(connection) => { info!( "Successfully connected to the nats server {}", @@ -52,7 +74,7 @@ impl NatsMessageBus { } } - async fn new(server: &str) -> Self { + async fn new(server: &str, _options: BusOptions) -> Self { Self { connection: Self::connect(server).await, } @@ -60,44 +82,32 @@ impl NatsMessageBus { } #[async_trait] -impl MessageBus for NatsMessageBus { +impl Bus for NatsMessageBus { async fn publish( &self, channel: Channel, - message: impl Serialize - + std::marker::Send - + std::marker::Sync - + 'async_trait, + message: &[u8], ) -> std::io::Result<()> { - let payload = serde_json::to_vec(&message)?; - self.connection - .publish(&channel.to_string(), &payload) - .await + self.connection.publish(&channel.to_string(), message).await } - async fn send( - &self, - _channel: Channel, - _message: impl Serialize - + std::marker::Send - + std::marker::Sync - + 'async_trait, - ) -> Result<(), ()> { + async fn send(&self, _channel: Channel, _message: &[u8]) -> io::Result<()> { unimplemented!() } async fn request( &self, - _channel: Channel, - _message: impl Serialize - + std::marker::Send - + std::marker::Sync - + 'async_trait, - ) -> Result, ()> { - unimplemented!() + channel: Channel, + message: &[u8], + ) -> io::Result { + self.connection.request(&channel.to_string(), message).await } async fn flush(&self) -> io::Result<()> { self.connection.flush().await } + + async fn subscribe(&self, channel: Channel) -> io::Result { + self.connection.subscribe(&channel.to_string()).await + } } diff --git a/mbus-api/src/receive.rs b/mbus-api/src/receive.rs new file mode 100644 index 000000000..1507fd30d --- /dev/null +++ b/mbus-api/src/receive.rs @@ -0,0 +1,161 @@ +use super::*; + +/// Type safe wrapper over a message bus message which decodes the raw +/// message into the actual payload `S` and allows only for a response type `R`. +/// +/// # Example: +/// ``` +/// let raw_msg = &subscriber.next().await?; +/// let msg: ReceivedMessage = +/// raw_msg.try_into()?; +/// +/// msg.respond(ReplyConfig {}).await.unwrap(); +/// // or we can also use the same fn to return an error +/// msg.respond(Err(Error::Message("failure".into()))).await.unwrap(); +/// ``` +pub struct ReceivedMessage<'a, S, R> { + request: SendPayload, + bus_message: &'a BusMessage, + reply_type: PhantomData, +} + +impl<'a, S, R> ReceivedMessage<'a, S, R> +where + for<'de> S: Deserialize<'de> + 'a + Debug + Clone + Message, + R: Serialize, +{ + /// Get a clone of the actual payload data which was received. + pub fn inner(&self) -> S { + self.request.data.clone() + } + /// Get the sender identifier + pub fn sender(&self) -> SenderId { + self.request.sender.clone() + } + + /// Reply back to the sender with the `reply` payload wrapped by + /// a Result-like type. + /// May fail if serialization of the reply fails or if the + /// message bus fails to respond. + /// Can receive either `R`, `Err()` or `ReplyPayload`. + pub async fn reply>>( + &self, + reply: T, + ) -> io::Result<()> { + let reply: ReplyPayload = reply.into(); + let payload = serde_json::to_vec(&reply)?; + self.bus_message.respond(&payload).await + } + + /// Create a new received message object which wraps the send and + /// receive types around a raw bus message. + fn new(bus_message: &'a BusMessage) -> Result { + let request: SendPayload = + serde_json::from_slice(&bus_message.data)?; + if request.id == request.data.id() { + log::info!( + "We have a message from '{}': {:?}", + request.sender, + request.data + ); + Ok(Self { + request, + bus_message, + reply_type: Default::default(), + }) + } else { + Err(io::Error::new( + io::ErrorKind::InvalidInput, + "invalid message id!", + )) + } + } +} + +/// Message received over the message bus with a reply serialization wrapper +/// For type safety refer to `ReceivedMessage<'a,S,R>`. +pub struct ReceivedRawMessage<'a> { + bus_msg: &'a BusMessage, +} + +impl<'a> ReceivedRawMessage<'a> { + /// Get a copy of the actual payload data which was sent + /// May fail if the raw data cannot be deserialized into `S` + pub fn inner>(&self) -> io::Result { + let request: SendPayload = + serde_json::from_slice(&self.bus_msg.data)?; + Ok(request.data) + } + + /// Get the identifier of this message. + /// May fail if the raw data cannot be deserialized into the preamble. + pub fn id(&self) -> io::Result { + let preamble: Preamble = serde_json::from_slice(&self.bus_msg.data)?; + Ok(preamble.id) + } + + /// Channel where this message traversed + pub fn channel(&self) -> Channel { + self.bus_msg.subject.clone().parse().unwrap() + } + + /// Respond back to the sender with the `reply` payload wrapped by + /// a Result-like type. + /// May fail if serialization of the reply fails or if the + /// message bus fails to respond. + /// Can receive either `Serialize`, `Err()` or `ReplyPayload`. + pub async fn respond>>( + &self, + reply: R, + ) -> io::Result<()> { + let reply: ReplyPayload = reply.into(); + let payload = serde_json::to_vec(&reply)?; + self.bus_msg.respond(&payload).await + } +} + +impl<'a> std::convert::From<&'a BusMessage> for ReceivedRawMessage<'a> { + fn from(value: &'a BusMessage) -> Self { + Self { + bus_msg: value, + } + } +} + +impl<'a, S, R> std::convert::TryFrom<&'a BusMessage> + for ReceivedMessage<'a, S, R> +where + for<'de> S: Deserialize<'de> + 'a + Debug + Clone + Message, + R: Serialize, +{ + type Error = io::Error; + + fn try_from(value: &'a BusMessage) -> Result { + ReceivedMessage::::new(value) + } +} + +impl<'a, S, R> std::convert::TryFrom> + for ReceivedMessage<'a, S, R> +where + for<'de> S: Deserialize<'de> + 'a + Debug + Clone + Message, + R: Serialize, +{ + type Error = io::Error; + + fn try_from(value: ReceivedRawMessage<'a>) -> Result { + ReceivedMessage::::new(value.bus_msg) + } +} + +impl From for ReplyPayload { + fn from(val: T) -> Self { + ReplyPayload(Ok(val)) + } +} + +impl From> for ReplyPayload { + fn from(val: Result) -> Self { + ReplyPayload(val) + } +} diff --git a/mbus-api/src/send.rs b/mbus-api/src/send.rs new file mode 100644 index 000000000..d82a56ef9 --- /dev/null +++ b/mbus-api/src/send.rs @@ -0,0 +1,266 @@ +use super::*; + +// todo: replace with proc-macros + +/// Main Message trait, which should tipically be used to send +/// MessageBus messages. +/// Implements Message trait for the type `S` with the reply type +/// `R`, the message id `I`, the default channel `C`. +/// If specified it makes use of the Request/Publish traits exported +/// by type `T`, otherwise it defaults to using `S`. +/// Also implements the said Request/Publish traits for type `T`, if +/// specified, otherwise it implements them for type `S`. +/// +/// # Example +/// ``` +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyRequest {} +/// bus_impl_message_all!(DummyRequest, DummyId, (), DummyChan); +/// +/// let reply = DummyRequest { }.request().await.unwrap(); +/// ``` +#[macro_export] +macro_rules! bus_impl_message_all { + ($S:ident, $I:ident, $R:tt, $C:ident) => { + bus_impl_all!($S, $R); + bus_impl_message!($S, $I, $R, $C); + }; + ($S:ident, $I:ident, $R:tt, $C:ident, $T:ident) => { + bus_impl_all!($T, $S, $R); + bus_impl_message!($S, $I, $R, $C, $T); + }; +} + +/// Implement Request/Reply traits for type `S`. +/// Otherwise, if `T` is specified, then it creates `T` and +/// implements said types for `T`. +/// `S` is the request payload and `R` is the reply payload. +/// # Example +/// ``` +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyRequest {} +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyReply {} +/// +/// bus_impl_all!(DummyRequest,DummyReply); +/// +/// let reply = DummyRequest::request(DummyRequest {}, channel, &bus) +/// .await +/// .unwrap(); +/// +/// bus_impl_all!(Dummy, DummyRequest,DummyReply); +/// +/// let reply = Dummy::request(DummyRequest {}, channel, &bus) +/// .await +/// .unwrap(); +/// ``` +#[macro_export] +macro_rules! bus_impl_all { + ($S:ident,$R:ty) => { + bus_impl_request!($S, $R); + bus_impl_publish!($S); + }; + ($T:ident,$S:ident,$R:ty) => { + /// place holder for the message traits, example: + /// $T::request(..).await + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct $T {} + + bus_impl_request!($T, $S, $R); + bus_impl_publish!($T, $S); + }; +} + +/// Implement the bus trait for requesting a response back from `T` where +/// `S` is the payload request type and `R` is the reply payload type. +/// Can optionally implement the trait for `S`. +/// # Example +/// ``` +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyRequest {} +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyReply {} +/// +/// bus_impl_request!(DummyRequest,DummyReply); +/// +/// let reply = DummyRequest::request(DummyRequest {}, channel, &bus) +/// .await +/// .unwrap(); +/// ``` +#[macro_export] +macro_rules! bus_impl_request { + ($S:ident,$R:ty) => { + impl<'a> MessageRequest<'a, $S, $R> for $S {} + }; + ($T:ty,$S:ident,$R:ty) => { + impl<'a> MessageRequest<'a, $S, $R> for $T {} + }; +} + +/// Implement the publish bus trait for type `T` which +/// publishes the payload type `S`. +/// Can optionally implement the trait for `S`. +/// # Example +/// ``` +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyPublish {} +/// +/// bus_impl_publish!(DummyPublish); +/// +/// DummyPublish::request(DummyPublish {}, channel, &bus).await.unwrap() +/// ``` +#[macro_export] +macro_rules! bus_impl_publish { + ($S:ty) => { + bus_impl_publish!($S, $S); + }; + ($T:ty,$S:tt) => { + impl<'a> MessagePublish<'a, $S, ()> for $T {} + }; +} + +/// Implement Message trait for the type `S` with the reply type +/// `R`, the message id `I`, the default channel `C`. +/// If specified it makes use of the Request/Publish traits exported +/// by type `T`, otherwise it defaults to using `S`. +/// # Example +/// ``` +/// #[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// struct DummyRequest {} +/// bus_impl_message!(DummyRequest, DummyId, (), DummyChan); +/// ``` +#[macro_export] +macro_rules! bus_impl_message { + ($S:ident, $I:ident, $R:tt, $C:ident) => { + bus_impl_message!($S, $I, $R, $C, $S); + }; + ($S:ident, $I:ident, $R:tt, $C:ident, $T:ident) => { + #[async_trait::async_trait(?Send)] + impl Message for $S { + type Reply = $R; + + fn id(&self) -> MessageId { + MessageId::$I + } + fn channel(&self) -> Channel { + Channel::$C + } + async fn publish(&self) -> smol::io::Result<()> { + $T::Publish(self, self.channel(), bus()).await + } + async fn request(&self) -> smol::io::Result<$R> { + $T::Request(self, self.channel(), bus()).await + } + } + }; +} + +/// Trait to send a message `bus` request with the `payload` type `S` via a +/// a `channel` and requesting a response back with the payload type `R` +/// via a specific reply channel. +/// Trait can be implemented using the macro helper `bus_impl_request`. +#[async_trait(?Send)] +pub trait MessageRequest<'a, S, R> +where + S: 'a + Sync + Message + Serialize, + for<'de> R: Deserialize<'de> + Default + 'a + Sync, +{ + /// Sends the message and requests a reply + /// May fail if the bus fails to publish the message. + #[allow(non_snake_case)] + async fn Request( + payload: &'a S, + channel: Channel, + bus: DynBus, + ) -> io::Result { + let msg = SendMessage::::new(payload, channel, bus); + msg.request().await + } +} + +/// Trait to send a message `bus` publish with the `payload` type `S` via a +/// a `channel`. No reply is requested. +/// Trait can be implemented using the macro helper `bus_impl_publish`. +#[async_trait(?Send)] +pub trait MessagePublish<'a, S, R> +where + S: 'a + Sync + Message + Serialize, + for<'de> R: Deserialize<'de> + Default + 'a + Sync, +{ + /// Publishes the Message - not guaranteed to be sent or received (fire and + /// forget) + /// May fail if the bus fails to publish the message + #[allow(non_snake_case)] + async fn Publish( + payload: &'a S, + channel: Channel, + bus: DynBus, + ) -> io::Result<()> { + let msg = SendMessage::::new(payload, channel, bus); + msg.publish().await + } +} + +/// Type specific Message Bus api used to send a message of type `S` over the +/// message bus with an additional type `R` use for request/reply semantics +/// # Example: +/// ``` +/// let msg = RequestToSend::::new(payload, channel, bus); +/// msg.request().await.unwrap(); +/// ``` +struct SendMessage<'a, S, R> { + payload: SendPayload<&'a S>, + bus: DynBus, + channel: Channel, + reply_type: PhantomData, +} + +impl<'a, S, R> SendMessage<'a, S, R> +where + S: Message + Serialize, + for<'de> R: Deserialize<'de> + 'a, +{ + /// each client needs a unique identification + /// should this be a creation argument? + fn name() -> SenderId { + match std::env::var("NODE_NAME") { + Ok(val) => val, + _ => "default".into(), + } + } + + /// Creates a new request `Message` with the required payload + /// using an existing `bus` which is used to sent the payload + /// via the `channel`. + pub(crate) fn new(payload: &'a S, channel: Channel, bus: DynBus) -> Self { + Self { + payload: SendPayload { + id: payload.id(), + data: payload, + sender: Self::name(), + }, + reply_type: Default::default(), + bus, + channel, + } + } + + /// Publishes the Message - not guaranteed to be sent or received (fire and + /// forget). + pub(crate) async fn publish(&self) -> io::Result<()> { + let payload = serde_json::to_vec(&self.payload)?; + self.bus.publish(self.channel.clone(), &payload).await + } + + /// Sends the message and requests a reply. + /// todo: add timeout with retry logic? + pub(crate) async fn request(&self) -> io::Result { + let payload = serde_json::to_vec(&self.payload)?; + let reply = + self.bus.request(self.channel.clone(), &payload).await?.data; + let reply: ReplyPayload = serde_json::from_slice(&reply)?; + reply.0.map_err(|error| { + io::Error::new(io::ErrorKind::Other, format!("{:?}", error)) + }) + } +} From d013d52c1fce4be0d41da5f015526bb36c694f4b Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 12 Oct 2020 15:51:52 +0100 Subject: [PATCH 05/92] Add control plane services library This library is specifically for running services over the message bus. It uses a builder pattern allowing for multiple service subscribers to be registered. Each ServiceSubscriber must implement a handlerFn which will receive the message as an argument as well as a filterFn to filter in messages with the desired message identifiers. Added example service to showcase how to use the service library --- Cargo.lock | 19 +++ Cargo.toml | 1 + services/Cargo.toml | 28 ++++ services/common/src/lib.rs | 253 ++++++++++++++++++++++++++++++ services/examples/service/main.rs | 80 ++++++++++ 5 files changed, 381 insertions(+) create mode 100644 services/Cargo.toml create mode 100644 services/common/src/lib.rs create mode 100644 services/examples/service/main.rs diff --git a/Cargo.lock b/Cargo.lock index 0b855205e..e45f14199 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2781,6 +2781,25 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "services" +version = "0.1.0" +dependencies = [ + "async-trait", + "dyn-clonable", + "env_logger", + "futures", + "log", + "mbus_api", + "nats", + "serde", + "serde_json", + "smol", + "snafu", + "structopt", + "tokio", +] + [[package]] name = "sha2" version = "0.8.2" diff --git a/Cargo.toml b/Cargo.toml index 937a2731f..bfabe1b8d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,5 +14,6 @@ members = [ "nvmeadm", "rpc", "sysfs", + "services", "mbus-api" ] diff --git a/services/Cargo.toml b/services/Cargo.toml new file mode 100644 index 000000000..acff9d350 --- /dev/null +++ b/services/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "services" +version = "0.1.0" +authors = ["Tiago Castro "] +edition = "2018" + +[lib] +name = "common" +path = "common/src/lib.rs" + +[dependencies] +mbus_api = { path = "../mbus-api" } +nats = "0.8" +structopt = "0.3.15" +log = "0.4.11" +tokio = { version = "0.2", features = ["full"] } +futures = "0.3.6" +env_logger = "0.7" +serde_json = "1.0" +async-trait = "0.1.36" +dyn-clonable = "0.9.0" +smol = "1.0.0" +snafu = "0.6" + + +[dependencies.serde] +features = ["derive"] +version = "1.0" \ No newline at end of file diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs new file mode 100644 index 000000000..04c414130 --- /dev/null +++ b/services/common/src/lib.rs @@ -0,0 +1,253 @@ +#![warn(missing_docs)] +//! Control Plane Services library with emphasis on the message bus interaction. +//! +//! It's meant to facilitate the creation of services with a helper builder to +//! subscribe handlers for different message identifiers. + +use async_trait::async_trait; +use dyn_clonable::clonable; +use futures::{future::join_all, stream::StreamExt}; +use mbus_api::*; +use smol::io; +use snafu::{OptionExt, ResultExt, Snafu}; +use std::collections::HashMap; + +#[derive(Debug, Snafu)] +#[allow(missing_docs)] +pub enum ServiceError { + #[snafu(display("Channel {} has been closed.", channel))] + GetMessage { + channel: Channel, + }, + #[snafu(display("Failed to subscribe on Channel {}", channel))] + Subscribe { + channel: Channel, + source: io::Error, + }, + GetMessageId { + channel: Channel, + source: io::Error, + }, + FindSubscription { + channel: Channel, + id: MessageId, + }, + HandleMessage { + channel: Channel, + id: MessageId, + source: io::Error, + }, +} + +/// Runnable service with N subscriptions which listen on a given +/// message bus channel on a specific ID +#[derive(Default)] +pub struct Service { + server: String, + channel: Channel, + subscriptions: HashMap>>, +} + +/// Service Arguments for the service handler callback +pub struct Arguments<'a> { + /// Service context, like access to the message bus + pub context: Context<'a>, + /// Access to the actual message bus request + pub request: Request<'a>, +} + +impl<'a> Arguments<'a> { + /// Returns a new Service Argument to be use by a Service Handler + pub fn new(bus: &'a DynBus, msg: &'a BusMessage) -> Self { + Self { + context: bus.into(), + request: msg.into(), + } + } +} + +/// Service handling context +/// the message bus which triggered the service callback +#[derive(Clone)] +pub struct Context<'a> { + bus: &'a DynBus, +} + +impl<'a> From<&'a DynBus> for Context<'a> { + fn from(bus: &'a DynBus) -> Self { + Self { + bus, + } + } +} + +impl<'a> Context<'a> { + /// get the message bus from the context + pub fn get_bus_as_ref(&self) -> &'a DynBus { + self.bus + } +} + +/// Service Request received via the message bus +pub type Request<'a> = ReceivedRawMessage<'a>; + +#[async_trait] +#[clonable] +/// Trait which must be implemented by each subscriber with the handler +/// which processes the messages and a filter to match message types +pub trait ServiceSubscriber: Clone + Send + Sync { + /// async handler which processes the messages + async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error>; + /// filter which identifies which messages may be routed to the handler + fn filter(&self) -> Vec; +} + +impl Service { + /// Setup default service connecting to `server` on subject `channel` + pub fn builder(server: String, channel: Channel) -> Self { + Self { + server, + channel, + ..Default::default() + } + } + + /// Setup default `channel` + pub fn with_channel(mut self, channel: Channel) -> Self { + self.channel = channel; + self + } + + /// Add a new subscriber on the default channel + pub fn with_subscription( + self, + service_subscriber: impl ServiceSubscriber + 'static, + ) -> Self { + let channel = self.channel.clone(); + self.with_subscription_channel(channel, service_subscriber) + } + + /// Add a new subscriber on the given `channel` + pub fn with_subscription_channel( + mut self, + channel: Channel, + service_subscriber: impl ServiceSubscriber + 'static, + ) -> Self { + match self.subscriptions.get_mut(&channel.to_string()) { + Some(entry) => { + entry.push(Box::from(service_subscriber)); + } + None => { + self.subscriptions.insert( + channel.to_string(), + vec![Box::from(service_subscriber)], + ); + } + }; + self + } + + async fn run_channel( + bus: DynBus, + channel: Channel, + subscriptions: &[Box], + ) -> Result<(), ServiceError> { + let mut handle = + bus.subscribe(channel.clone()).await.context(Subscribe { + channel: channel.clone(), + })?; + + loop { + let message = handle.next().await.context(GetMessage { + channel: channel.clone(), + })?; + let args = Arguments::new(&bus, &message); + + if let Err(error) = + Self::process_message(args, &subscriptions).await + { + log::error!("Error processing message: {}", error); + } + } + } + + async fn process_message<'a>( + arguments: Arguments<'a>, + subscriptions: &[Box], + ) -> Result<(), ServiceError> { + let channel = arguments.request.channel(); + let id = &arguments.request.id().context(GetMessageId { + channel: channel.clone(), + })?; + + let subscription = subscriptions + .iter() + .find(|&subscriber| { + subscriber.filter().iter().any(|find_id| find_id == id) + }) + .context(FindSubscription { + channel: channel.clone(), + id: id.clone(), + })?; + + let result = + subscription + .handler(arguments) + .await + .context(HandleMessage { + channel: channel.clone(), + id: id.clone(), + }); + + if let Err(error) = result.as_ref() { + // todo: should an error be returned to the sender? + log::error!( + "Error handling message id {:?}: {:?}", + subscription.filter(), + error + ); + } + + result + } + + /// Runs the server which services all subscribers asynchronously until all + /// subscribers are closed + /// + /// subscribers are sorted according to the channel they subscribe on + /// each channel benefits from a tokio thread which routes messages + /// accordingly todo: only one subscriber per message id supported at + /// the moment + pub async fn run(&self) { + let mut threads = vec![]; + // todo: parse connection options when nats has better support for it + mbus_api::message_bus_init(self.server.clone()).await; + let bus = mbus_api::bus(); + + for subscriptions in self.subscriptions.iter() { + let bus = bus.clone(); + let channel = subscriptions.0.clone(); + let subscriptions = subscriptions.1.clone(); + + let handle = tokio::spawn(async move { + Self::run_channel(bus, channel.parse().unwrap(), &subscriptions) + .await + }); + + threads.push(handle); + } + + join_all(threads) + .await + .iter() + .for_each(|result| match result { + Err(error) => { + log::error!("Failed to wait for thread: {:?}", error) + } + Ok(Err(error)) => { + log::error!("Error running channel thread: {:?}", error) + } + _ => {} + }); + } +} diff --git a/services/examples/service/main.rs b/services/examples/service/main.rs new file mode 100644 index 000000000..1eaa92222 --- /dev/null +++ b/services/examples/service/main.rs @@ -0,0 +1,80 @@ +use async_trait::async_trait; +use common::*; +use mbus_api::*; +use serde::{Deserialize, Serialize}; +use smol::io; +use std::{convert::TryInto, marker::PhantomData}; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + url: String, + + /// Act as a Server or a test client + #[structopt(long, short)] + client: bool, +} + +/// Needed so we can implement the ServiceSubscriber trait for +/// the message types external to the crate +#[derive(Clone, Default)] +struct ServiceHandler { + data: PhantomData, +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +struct GetSvcName {} + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +struct SvcName(String); + +bus_impl_message_all!(GetSvcName, Default, SvcName, Default); + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + let msg: ReceivedMessage = + args.request.try_into()?; + + let reply = SvcName("example".into()); + + println!("Received {:?} and replying {:?}", msg.inner(), reply); + + msg.reply(reply).await + } + fn filter(&self) -> Vec { + vec![GetSvcName::default().id()] + } +} + +#[tokio::main] +async fn main() { + let cli_args = CliArgs::from_args(); + + if cli_args.client { + client().await; + } else { + server().await; + } +} + +async fn client() { + let cli_args = CliArgs::from_args(); + message_bus_init(cli_args.url).await; + + let svc_name = GetSvcName {}.request().await.unwrap().0; + println!("Svc Name: {}", svc_name); +} + +async fn server() { + let cli_args = CliArgs::from_args(); + + Service::builder(cli_args.url, Channel::Default) + .with_subscription(ServiceHandler::::default()) + .run() + .await; +} From 994acfff5ac362f1272bb75b4b793dee6f3442c5 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 12 Oct 2020 15:58:32 +0100 Subject: [PATCH 06/92] Add initial mayastor config service skeleton Add kiiss config service code and the nix container image. This limited service is simply listening on config update requests and stores them in volatile memory. --- Cargo.lock | 1 + nix/pkgs/images/default.nix | 23 ++++ nix/pkgs/mayastor/default.nix | 7 +- services/Cargo.toml | 6 +- services/examples/kiiss-client/main.rs | 50 ++++++++ services/kiiss/src/server.rs | 151 +++++++++++++++++++++++++ 6 files changed, 235 insertions(+), 3 deletions(-) create mode 100644 services/examples/kiiss-client/main.rs create mode 100644 services/kiiss/src/server.rs diff --git a/Cargo.lock b/Cargo.lock index e45f14199..e98dfbe0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2789,6 +2789,7 @@ dependencies = [ "dyn-clonable", "env_logger", "futures", + "lazy_static", "log", "mbus_api", "nats", diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index 0963f07e3..a1eac0769 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -47,6 +47,17 @@ let mkdir -p var/tmp ''; }; + servicesImageProps = { + tag = version; + created = "now"; + config = { + Env = [ "PATH=${env}" ]; + }; + extraCommands = '' + mkdir tmp + mkdir -p var/tmp + ''; + }; in rec { mayastor-image = dockerTools.buildImage (mayastorImageProps // { @@ -99,4 +110,16 @@ rec { chmod u-w bin ''; }; + + services-kiiss-image = dockerTools.buildLayeredImage (servicesImageProps // { + name = "mayadata/services-kiiss"; + contents = [ busybox mayastor ]; + config = { Entrypoint = [ "/bin/kiiss" ]; }; + }); + + services-kiiss-dev-image = dockerTools.buildImage (servicesImageProps // { + name = "mayadata/services-kiiss-dev"; + contents = [ busybox mayastor ]; + config = { Entrypoint = [ "/bin/kiiss" ]; }; + }); } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 5498d9ff1..5f91e0557 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -41,7 +41,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0mr03cr6i1n5g4dx8v651rq3zblym71cb2vkvg5nqgb34li4br9j"; + cargoSha256 = "0r6cix3s60c3qaj3lsr3bbr6by28rhapclcs4jw5d68s882i2lh8"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" @@ -55,6 +55,8 @@ let "rpc" "spdk-sys" "sysfs" + "mbus-api" + "services" ]; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; @@ -92,7 +94,7 @@ in buildInputs = buildProps.buildInputs ++ [ libspdk-dev ]; SPDK_PATH = "${libspdk-dev}"; }); - # this is for image that does not do a build of mayastor + # this is for an image that does not do a build of mayastor adhoc = stdenv.mkDerivation { name = "mayastor-adhoc"; inherit version; @@ -101,6 +103,7 @@ in ../../../target/debug/mayastor-csi ../../../target/debug/mayastor-client ../../../target/debug/jsonrpc + ../../../target/debug/kiiss ]; buildInputs = [ diff --git a/services/Cargo.toml b/services/Cargo.toml index acff9d350..bcd6ded66 100644 --- a/services/Cargo.toml +++ b/services/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" authors = ["Tiago Castro "] edition = "2018" +[[bin]] +name = "kiiss" +path = "kiiss/src/server.rs" + [lib] name = "common" path = "common/src/lib.rs" @@ -21,7 +25,7 @@ async-trait = "0.1.36" dyn-clonable = "0.9.0" smol = "1.0.0" snafu = "0.6" - +lazy_static = "1.4.0" [dependencies.serde] features = ["derive"] diff --git a/services/examples/kiiss-client/main.rs b/services/examples/kiiss-client/main.rs new file mode 100644 index 000000000..98190fd65 --- /dev/null +++ b/services/examples/kiiss-client/main.rs @@ -0,0 +1,50 @@ +use log::info; +use mbus_api::*; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + url: String, +} + +#[tokio::main] +async fn main() { + env_logger::init_from_env( + env_logger::Env::default() + .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + client().await; +} + +async fn client() { + let cli_args = CliArgs::from_args(); + mbus_api::message_bus_init(cli_args.url).await; + + ConfigUpdate { + kind: Config::MayastorConfig, + data: "My config...".into(), + } + .request() + .await + .unwrap(); + + let config = GetConfig::Request( + &ConfigGetCurrent { + kind: Config::MayastorConfig, + }, + Channel::Kiiss, + bus(), + ) + .await + .unwrap(); + + info!( + "Received config: {:?}", + std::str::from_utf8(&config.config).unwrap() + ); +} diff --git a/services/kiiss/src/server.rs b/services/kiiss/src/server.rs new file mode 100644 index 000000000..6b6c77b88 --- /dev/null +++ b/services/kiiss/src/server.rs @@ -0,0 +1,151 @@ +#[macro_use] +extern crate lazy_static; + +use async_trait::async_trait; +use common::*; +use log::info; +use mbus_api::*; +use smol::io; +use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; +use structopt::StructOpt; +use tokio::sync::Mutex; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + url: String, +} + +/// Needed so we can implement the ServiceSubscriber trait for +/// the message types external to the crate +#[derive(Clone, Default)] +struct ServiceHandler { + data: PhantomData, +} + +#[derive(Default)] +struct ConfigState { + state: Mutex>>>, +} + +lazy_static! { + static ref CONFIGS: ConfigState = Default::default(); +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + let data: ConfigUpdate = args.request.inner()?; + info!("Received: {:?}", data); + + let msg: ReceivedMessage = args.request.try_into()?; + let config = msg.inner(); + + let mut state = CONFIGS.state.lock().await; + + match state.get_mut(&msg.sender()) { + Some(map) => { + map.insert(config.kind, config.data); + } + None => { + let mut config_map = HashMap::new(); + config_map.insert(config.kind, config.data); + state.insert(msg.sender(), config_map); + } + } + + msg.reply(()).await + } + fn filter(&self) -> Vec { + vec![ConfigUpdate::default().id()] + } +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + let data: ConfigGetCurrent = args.request.inner()?; + info!("Received: {:?}", data); + + let msg: ReceivedMessage = + args.request.try_into()?; + let request = msg.inner(); + + let state = CONFIGS.state.lock().await; + + match state.get(&msg.sender()) { + Some(config) => match config.get(&request.kind) { + Some(data) => { + msg.reply(ReplyConfig { + config: data.clone(), + }) + .await + } + None => { + msg.reply(Err(Error::WithMessage { + message: "Config is missing".into(), + })) + .await + } + }, + None => { + msg.reply(Err(Error::WithMessage { + message: "Config is missing".into(), + })) + .await + } + } + } + fn filter(&self) -> Vec { + vec![ConfigGetCurrent::default().id()] + } +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + let _: ReceivedMessage = args.request.try_into()?; + Ok(()) + } + fn filter(&self) -> Vec { + vec![Register::default().id()] + } +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + let _: ReceivedMessage = args.request.try_into()?; + Ok(()) + } + fn filter(&self) -> Vec { + vec![Deregister::default().id()] + } +} + +#[tokio::main] +async fn main() { + env_logger::init_from_env( + env_logger::Env::default() + .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let cli_args = CliArgs::from_args(); + info!("Using options: {:?}", &cli_args); + + server(cli_args).await; +} + +async fn server(cli_args: CliArgs) { + Service::builder(cli_args.url, Channel::Kiiss) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_channel(Channel::Registry) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .run() + .await; +} From f2385236448a52b1208edecfdca5f09c482f41a3 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Tue, 13 Oct 2020 18:55:19 +0100 Subject: [PATCH 07/92] snapshots: Implement create snapshots for local replicas Local replicas connected over loopback have an 'lvol' bdev, which does not support NVMe Admin commands so explicitly allow it for any bdev so that the call makes it as far as nexus_bdev::nvme_admin. From there, have a separate codepath for lvol bdevs that creates the snapshot. This requires a separate Nexus::io_completion_local for this case where there is no actual child IO so that the parent IO records the status and completes correctly. Improve error logging and introduce value-only versions of the cdw10/11 helpers to tidy up the snapshot time retrieval. --- mayastor-test/test_snapshot.js | 115 +++++++++++++++--- mayastor/src/bdev/nexus/nexus_bdev.rs | 65 +++++++++- .../src/bdev/nexus/nexus_bdev_snapshot.rs | 5 +- mayastor/src/bdev/nexus/nexus_fn_table.rs | 12 +- mayastor/src/core/mod.rs | 4 +- mayastor/src/lvs/lvol.rs | 43 ++++++- mayastor/src/subsys/mod.rs | 1 + mayastor/src/subsys/nvmf/admin_cmd.rs | 23 +++- mayastor/src/subsys/nvmf/mod.rs | 2 +- mayastor/tests/replica_snapshot.rs | 110 +++++++++-------- spdk-sys/nvme_helper.c | 10 ++ spdk-sys/nvme_helper.h | 2 + 12 files changed, 302 insertions(+), 90 deletions(-) diff --git a/mayastor-test/test_snapshot.js b/mayastor-test/test_snapshot.js index 586f08400..2ffc8768d 100644 --- a/mayastor-test/test_snapshot.js +++ b/mayastor-test/test_snapshot.js @@ -10,9 +10,11 @@ const enums = require('./grpc_enums'); const UUID = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff21'; const replicaUuid = '00000000-76b6-4fcf-864d-1027d4038756'; -const poolName = 'pool0'; +const poolName = 'pool1'; +const pool2Name = 'pool2'; // backend file for pool -const poolFile = '/tmp/pool-backend'; +const poolFile = '/tmp/pool1-backend'; +const pool2File = '/tmp/pool2-backend'; // 128MB is the size of pool const diskSize = 128 * 1024 * 1024; // 64MB is the size of replica @@ -31,14 +33,14 @@ nexus_opts: iscsi_nexus_port: 3260 iscsi_replica_port: 3262 pools: - - name: pool0 + - name: pool2 disks: - - aio:///tmp/pool-backend + - aio:///tmp/pool2-backend replicas: [] `; var client, client2; -var disks; +var disks, disks2; // URI of Nexus published over NVMf var nexusUri; @@ -56,6 +58,7 @@ describe('snapshot', function () { return done(new Error('Failed to initialize grpc client for 2nd Mayastor instance')); } disks = [poolFile]; + disks2 = [pool2File]; async.series( [ @@ -65,6 +68,12 @@ describe('snapshot', function () { (next) => { fs.truncate(poolFile, diskSize, next); }, + (next) => { + fs.writeFile(pool2File, '', next); + }, + (next) => { + fs.truncate(pool2File, diskSize, next); + }, // start this as early as possible to avoid mayastor getting connection refused. (next) => { // Start another mayastor instance for the remote nvmf target of the @@ -109,6 +118,12 @@ describe('snapshot', function () { if (err) console.log('unlink failed:', poolFile, err); next(); }); + }, + (next) => { + fs.unlink(pool2File, (err) => { + if (err) console.log('unlink failed:', pool2File, err); + next(); + }); } ], (err) => { @@ -125,7 +140,7 @@ describe('snapshot', function () { it('should destroy the pool loaded from yaml', (done) => { client2.destroyPool( - { name: poolName }, + { name: pool2Name }, (err, res) => { if (err) return done(err); done(); @@ -133,9 +148,9 @@ describe('snapshot', function () { ); }); - it('should create a pool with aio bdevs', (done) => { + it('should create a local pool with aio bdevs', (done) => { // explicitly specify aio as that always works - client2.createPool( + client.createPool( { name: poolName, disks: disks.map((d) => `aio://${d}`) }, (err, res) => { if (err) return done(err); @@ -151,12 +166,46 @@ describe('snapshot', function () { ); }); - it('should create a replica exported over nvmf', (done) => { - client2.createReplica( + it('should create a remote pool with aio bdevs', (done) => { + client2.createPool( + { name: pool2Name, disks: disks2.map((d) => `aio://${d}`) }, + (err, res) => { + if (err) return done(err); + assert.equal(res.name, pool2Name); + assert.equal(res.used, 0); + assert.equal(res.state, 'POOL_ONLINE'); + assert.equal(res.disks.length, disks2.length); + for (let i = 0; i < res.disks.length; ++i) { + assert.equal(res.disks[i].includes(disks2[i]), true); + } + done(); + } + ); + }); + + it('should create a local replica', (done) => { + client.createReplica( { uuid: replicaUuid, pool: poolName, thin: true, + share: 'REPLICA_NONE', + size: replicaSize + }, + (err, res) => { + if (err) return done(err); + assert.match(res.uri, /^bdev:\/\//); + done(); + } + ); + }); + + it('should create a remote replica exported over nvmf', (done) => { + client2.createReplica( + { + uuid: replicaUuid, + pool: pool2Name, + thin: true, share: 'REPLICA_NVMF', size: replicaSize }, @@ -168,11 +217,12 @@ describe('snapshot', function () { ); }); - it('should create a nexus with 1 nvmf replica', (done) => { + it('should create a nexus with a local replica and 1 remote nvmf replica', (done) => { const args = { uuid: UUID, size: 131072, - children: ['nvmf://' + common.getMyIp() + ':8430/nqn.2019-05.io.openebs:' + replicaUuid] + children: ['loopback:///' + replicaUuid, + 'nvmf://' + common.getMyIp() + ':8430/nqn.2019-05.io.openebs:' + replicaUuid] }; client.createNexus(args, (err) => { @@ -187,7 +237,7 @@ describe('snapshot', function () { assert.lengthOf(res.nexus_list, 1); const nexus = res.nexus_list[0]; - const expectedChildren = 1; + const expectedChildren = 2; assert.equal(nexus.uuid, UUID); assert.equal(nexus.state, 'NEXUS_ONLINE'); assert.lengthOf(nexus.children, expectedChildren); @@ -218,14 +268,29 @@ describe('snapshot', function () { }); }); - it('should list the snapshot as a replica', (done) => { - client2.listReplicas({}, (err, res) => { + it('should list the snapshot as a local replica', (done) => { + client.listReplicas({}, (err, res) => { if (err) return done(err); res = res.replicas.filter((ent) => ent.pool === poolName); assert.lengthOf(res, 2); res = res[1]; + assert.equal(res.uuid.startsWith(replicaUuid + '-snap-'), true); + assert.equal(res.share, 'REPLICA_NONE'); + assert.match(res.uri, /^bdev:\/\/\//); + done(); + }); + }); + + it('should list the snapshot as a remote replica', (done) => { + client2.listReplicas({}, (err, res) => { + if (err) return done(err); + + res = res.replicas.filter((ent) => ent.pool === pool2Name); + assert.lengthOf(res, 2); + res = res[1]; + assert.equal(res.uuid.startsWith(replicaUuid + '-snap-'), true); assert.equal(res.share, 'REPLICA_NONE'); assert.match(res.uri, /^bdev:\/\/\//); @@ -243,8 +308,8 @@ describe('snapshot', function () { ); }); - it('should list the 2 snapshots as replicas', (done) => { - client2.listReplicas({}, (err, res) => { + it('should list the 2 snapshots as local replicas', (done) => { + client.listReplicas({}, (err, res) => { if (err) return done(err); res = res.replicas.filter((ent) => ent.pool === poolName); @@ -259,6 +324,22 @@ describe('snapshot', function () { }); }); + it('should list the 2 snapshots as remote replicas', (done) => { + client2.listReplicas({}, (err, res) => { + if (err) return done(err); + + res = res.replicas.filter((ent) => ent.pool === pool2Name); + assert.lengthOf(res, 3); + var i; + for (i = 1; i < 3; i++) { + assert.equal(res[i].uuid.startsWith(replicaUuid + '-snap-'), true); + assert.equal(res[i].share, 'REPLICA_NONE'); + assert.match(res[i].uri, /^bdev:\/\/\//); + } + done(); + }); + }); + it('should remove the nexus', (done) => { const args = { uuid: UUID }; diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index be1e3574f..98d589f2b 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -5,6 +5,7 @@ //! application needs synchronous mirroring may be required. use std::{ + convert::TryFrom, fmt, fmt::{Display, Formatter}, os::raw::c_void, @@ -48,10 +49,12 @@ use crate::{ nexus_nvmf::{NexusNvmfError, NexusNvmfTarget}, }, }, - core::{Bdev, DmaError, Share}, + core::{Bdev, CoreError, DmaError, Share}, ffihelper::errno_result_from_i32, + lvs::Lvol, nexus_uri::{bdev_destroy, NexusBdevError}, rebuild::RebuildError, + subsys, subsys::Config, }; @@ -235,8 +238,8 @@ pub enum Error { }, #[snafu(display("Failed to get BdevHandle for snapshot operation"))] FailedGetHandle, - #[snafu(display("Failed to create snapshot"))] - FailedCreateSnapshot, + #[snafu(display("Failed to create snapshot on nexus {}", name))] + FailedCreateSnapshot { name: String, source: CoreError }, } impl From for tonic::Status { @@ -666,6 +669,32 @@ impl Nexus { chio.free(); } + /// IO completion for local replica + pub fn io_completion_local(success: bool, parent_io: *mut c_void) { + let mut pio = Bio::from(parent_io); + let pio_ctx = pio.ctx_as_mut_ref(); + + if !success { + pio_ctx.status = io_status::FAILED; + } + + // As there is no child IO, perform the IO accounting that Bio::assess + // does here, without error recording or retries. + pio_ctx.in_flight -= 1; + debug_assert!(pio_ctx.in_flight >= 0); + + if pio_ctx.in_flight == 0 { + if pio_ctx.status == io_status::FAILED { + pio_ctx.io_attempts -= 1; + if pio_ctx.io_attempts == 0 { + pio.fail(); + } + } else { + pio.ok(); + } + } + } + /// callback when the IO has buffer associated with itself extern "C" fn nexus_get_buf_cb( ch: *mut spdk_io_channel, @@ -861,12 +890,40 @@ impl Nexus { // FIXME: pause IO before dispatching debug!("Passing thru create snapshot as NVMe Admin command"); } - // for pools, pass thru only works with our vendor commands as the + // for replicas, passthru only works with our vendor commands as the // underlying bdev is not nvmf let results = channels .ch .iter() .map(|c| unsafe { + debug!("nvme_admin on {}", c.get_bdev().driver()); + if c.get_bdev().driver() == "lvol" { + // Local replica, vbdev_lvol does not support NVMe Admin + // so call function directly + let lvol = Lvol::try_from(c.get_bdev()).unwrap(); + match io.nvme_cmd().opc() as u8 { + nvme_admin_opc::CREATE_SNAPSHOT => { + subsys::create_snapshot( + lvol, + &io.nvme_cmd(), + io.as_ptr(), + ); + } + _ => { + error!( + "{}: Unsupported NVMe Admin command {:x}h from IO {:?}", + io.nexus_as_ref().name, + io.nvme_cmd().opc(), + io.as_ptr() + ); + Self::io_completion_local( + false, + io.as_ptr().cast(), + ); + } + } + return 0; + } let (desc, chan) = c.io_tuple(); spdk_bdev_nvme_admin_passthru( desc, diff --git a/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs b/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs index 0bbf7973f..881f4072e 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs @@ -16,7 +16,10 @@ impl Nexus { Ok(t) => Ok(CreateSnapshotReply { name: Lvol::format_snapshot_name(&self.bdev.name(), t), }), - Err(_e) => Err(Error::FailedCreateSnapshot), + Err(e) => Err(Error::FailedCreateSnapshot { + name: self.bdev.name(), + source: e, + }), } } else { Err(Error::FailedGetHandle) diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index ac12913fc..6c6affc93 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -61,11 +61,11 @@ impl NexusFnTable { let nexus = unsafe { Nexus::from_raw(ctx) }; match io_type { // we always assume the device supports read/write commands - io_type::READ | io_type::WRITE => true, + // allow NVMe Admin as it is needed for local replicas + io_type::READ | io_type::WRITE | io_type::NVME_ADMIN => true, io_type::FLUSH | io_type::RESET | io_type::UNMAP - | io_type::NVME_ADMIN | io_type::WRITE_ZEROES => { let supported = nexus.io_is_supported(io_type); if !supported { @@ -145,13 +145,7 @@ impl NexusFnTable { nio.fail() } } - io_type::NVME_ADMIN => { - if nexus.io_is_supported(io_type) { - nexus.nvme_admin(&nio, &ch) - } else { - nio.fail() - } - } + io_type::NVME_ADMIN => nexus.nvme_admin(&nio, &ch), _ => panic!( "{} Received unsupported IO! type {}", nexus.name, io_type diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index e9d8e6956..443b35962 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -79,7 +79,7 @@ pub enum CoreError { ResetDispatch { source: Errno, }, - #[snafu(display("Failed to dispatch NVMe Admin",))] + #[snafu(display("Failed to dispatch NVMe Admin command {:x}h", opcode))] NvmeAdminDispatch { source: Errno, opcode: u16, @@ -96,7 +96,7 @@ pub enum CoreError { }, #[snafu(display("Reset failed"))] ResetFailed {}, - #[snafu(display("NVMe Admin failed"))] + #[snafu(display("NVMe Admin command {:x}h failed", opcode))] NvmeAdminFailed { opcode: u16, }, diff --git a/mayastor/src/lvs/lvol.rs b/mayastor/src/lvs/lvol.rs index b7f345e78..84031e5e1 100644 --- a/mayastor/src/lvs/lvol.rs +++ b/mayastor/src/lvs/lvol.rs @@ -25,7 +25,8 @@ use spdk_sys::{ }; use crate::{ - core::{Bdev, CoreError, Protocol, Share}, + bdev::nexus::nexus_bdev::Nexus, + core::{Bdev, CoreError, Mthread, Protocol, Share}, ffihelper::{ cb_arg, errno_result_from_i32, @@ -377,7 +378,7 @@ impl Lvol { nvme_status.set_sc(match errno { 0 => 0, _ => { - debug!("vbdev_lvol_create_snapshot errno {}", errno); + error!("vbdev_lvol_create_snapshot errno {}", errno); 0x06 // SPDK_NVME_SC_INTERNAL_DEVICE_ERROR } }); @@ -398,6 +399,42 @@ impl Lvol { ) }; - info!("Creating snapshot {}", snapshot_name); + info!("Creating snapshot {} on {}", snapshot_name, &self); + } + + /// Create snapshot for local replica + pub async fn create_snapshot_local( + &self, + io: *mut spdk_sys::spdk_bdev_io, + snapshot_name: &str, + ) { + extern "C" fn snapshot_done_cb( + bio_ptr: *mut c_void, + _lvol_ptr: *mut spdk_lvol, + errno: i32, + ) { + if errno != 0 { + error!("vbdev_lvol_create_snapshot errno {}", errno); + } + // Must complete IO on thread IO was submitted from + let thread = Mthread::from_null_checked(unsafe { + spdk_sys::spdk_bdev_io_get_thread(bio_ptr.cast()) + }) + .expect("bio must have been submitted from an spdk_thread"); + thread.enter(); + Nexus::io_completion_local(errno == 0, bio_ptr); + } + + let c_snapshot_name = snapshot_name.into_cstring(); + unsafe { + vbdev_lvol_create_snapshot( + self.0.as_ptr(), + c_snapshot_name.as_ptr(), + Some(snapshot_done_cb), + io.cast(), + ) + }; + + info!("Creating snapshot {} on {}", snapshot_name, &self); } } diff --git a/mayastor/src/subsys/mod.rs b/mayastor/src/subsys/mod.rs index fc298c14d..34af5c67e 100644 --- a/mayastor/src/subsys/mod.rs +++ b/mayastor/src/subsys/mod.rs @@ -10,6 +10,7 @@ pub use config::{ Pool, }; pub use nvmf::{ + create_snapshot, set_snapshot_time, Error as NvmfError, NvmeCpl, diff --git a/mayastor/src/subsys/nvmf/admin_cmd.rs b/mayastor/src/subsys/nvmf/admin_cmd.rs index 19930ee27..a2441ce3d 100644 --- a/mayastor/src/subsys/nvmf/admin_cmd.rs +++ b/mayastor/src/subsys/nvmf/admin_cmd.rs @@ -104,7 +104,6 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { } let bd = Bdev::from(bdev); - let base_name = bd.name(); if bd.driver() == nexus_module::NEXUS_NAME { // Received command on a published Nexus set_snapshot_time(unsafe { @@ -117,11 +116,11 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { // Received command on a shared replica (lvol) let cmd = unsafe { spdk_sys::spdk_nvmf_request_get_cmd(req) }; let snapshot_time = unsafe { - *spdk_sys::nvme_cmd_cdw10_get(cmd) as u64 - | (*spdk_sys::nvme_cmd_cdw11_get(cmd) as u64) << 32 + spdk_sys::nvme_cmd_cdw10_get_val(cmd) as u64 + | (spdk_sys::nvme_cmd_cdw11_get_val(cmd) as u64) << 32 }; let snapshot_name = - Lvol::format_snapshot_name(&base_name, snapshot_time); + Lvol::format_snapshot_name(&lvol.name(), snapshot_time); let nvmf_req = NvmfReq(NonNull::new(req).unwrap()); // Blobfs operations must be on md_thread Reactors::master().send_future(async move { @@ -134,6 +133,22 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { } } +pub fn create_snapshot( + lvol: Lvol, + cmd: &spdk_sys::spdk_nvme_cmd, + io: *mut spdk_sys::spdk_bdev_io, +) { + let snapshot_time = unsafe { + spdk_sys::nvme_cmd_cdw10_get_val(&*cmd) as u64 + | (spdk_sys::nvme_cmd_cdw11_get_val(&*cmd) as u64) << 32 + }; + let snapshot_name = Lvol::format_snapshot_name(&lvol.name(), snapshot_time); + // Blobfs operations must be on md_thread + Reactors::master().send_future(async move { + lvol.create_snapshot_local(io, &snapshot_name).await; + }); +} + /// Register custom NVMe admin command handler pub fn setup_create_snapshot_hdlr() { unsafe { diff --git a/mayastor/src/subsys/nvmf/mod.rs b/mayastor/src/subsys/nvmf/mod.rs index b964e9353..3a3e51122 100644 --- a/mayastor/src/subsys/nvmf/mod.rs +++ b/mayastor/src/subsys/nvmf/mod.rs @@ -13,7 +13,7 @@ use std::cell::RefCell; use nix::errno::Errno; use snafu::Snafu; -pub use admin_cmd::{set_snapshot_time, NvmeCpl, NvmfReq}; +pub use admin_cmd::{create_snapshot, set_snapshot_time, NvmeCpl, NvmfReq}; use poll_groups::PollGroup; use spdk_sys::{ spdk_subsystem, diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs index 408ea4a52..6d2a2263e 100644 --- a/mayastor/tests/replica_snapshot.rs +++ b/mayastor/tests/replica_snapshot.rs @@ -11,7 +11,7 @@ use mayastor::{ MayastorEnvironment, Reactor, }, - lvs::Lvol, + lvs::{Lvol, Lvs}, subsys, subsys::Config, }; @@ -19,10 +19,12 @@ use mayastor::{ pub mod common; static DISKNAME1: &str = "/tmp/disk1.img"; +static DISKNAME2: &str = "/tmp/disk2.img"; static DISKSIZE_KB: u64 = 128 * 1024; static CFGNAME1: &str = "/tmp/child1.yaml"; +static CFGNAME2: &str = "/tmp/child2.yaml"; static UUID1: &str = "00000000-76b6-4fcf-864d-1027d4038756"; static NXNAME: &str = "replica_snapshot_test"; @@ -31,17 +33,23 @@ static NXNAME_SNAP: &str = "replica_snapshot_test-snap"; fn generate_config() { let mut config = Config::default(); - config.implicit_share_base = true; config.nexus_opts.iscsi_enable = false; - config.nexus_opts.nvmf_replica_port = 8430; - config.nexus_opts.nvmf_nexus_port = 8440; - let pool = subsys::Pool { - name: "pool0".to_string(), + let pool1 = subsys::Pool { + name: "pool1".to_string(), disks: vec!["aio://".to_string() + &DISKNAME1.to_string()], replicas: Default::default(), }; - config.pools = Some(vec![pool]); + config.pools = Some(vec![pool1]); config.write(CFGNAME1).unwrap(); + config.nexus_opts.nvmf_replica_port = 8430; + config.nexus_opts.nvmf_nexus_port = 8440; + let pool2 = subsys::Pool { + name: "pool2".to_string(), + disks: vec!["aio://".to_string() + &DISKNAME2.to_string()], + replicas: Default::default(), + }; + config.pools = Some(vec![pool2]); + config.write(CFGNAME2).unwrap(); } fn start_mayastor(cfg: &str) -> MayastorProcess { @@ -57,68 +65,68 @@ fn start_mayastor(cfg: &str) -> MayastorProcess { MayastorProcess::new(Box::from(args)).unwrap() } -fn conf_mayastor() { - // configuration yaml does not yet support creating replicas +fn conf_mayastor(msc_args: &[&str]) { let msc = "../target/debug/mayastor-client"; let output = Command::new(msc) - .args(&[ - "-p", - "10125", - "replica", - "create", - "--protocol", - "nvmf", - "pool0", - UUID1, - "--size", - "64M", - ]) + .args(&*msc_args) .output() .expect("could not exec mayastor-client"); - if !output.status.success() { io::stderr().write_all(&output.stderr).unwrap(); panic!("failed to configure mayastor"); } } -fn share_snapshot(t: u64) { - let msc = "../target/debug/mayastor-client"; - let output = Command::new(msc) - .args(&[ - "-p", - "10125", - "replica", - "share", - &Lvol::format_snapshot_name(UUID1, t), - "nvmf", - ]) - .output() - .expect("could not exec mayastor-client"); +fn create_replica() { + // configuration yaml does not yet support creating replicas + conf_mayastor(&[ + "-p", + "10125", + "replica", + "create", + "--protocol", + "nvmf", + "pool2", + UUID1, + "--size", + "64M", + ]); +} - if !output.status.success() { - io::stderr().write_all(&output.stderr).unwrap(); - panic!("failed to configure mayastor"); - } +fn share_snapshot(t: u64) { + conf_mayastor(&[ + "-p", + "10125", + "replica", + "share", + &Lvol::format_snapshot_name(UUID1, t), + "nvmf", + ]); } #[test] fn replica_snapshot() { generate_config(); - // Start with a fresh pool + // Start with fresh pools common::delete_file(&[DISKNAME1.to_string()]); common::truncate_file(DISKNAME1, DISKSIZE_KB); + common::delete_file(&[DISKNAME2.to_string()]); + common::truncate_file(DISKNAME2, DISKSIZE_KB); - let _ms1 = start_mayastor(CFGNAME1); + let _ms2 = start_mayastor(CFGNAME2); // Allow Mayastor process to start listening on NVMf port thread::sleep(time::Duration::from_millis(250)); - conf_mayastor(); + create_replica(); - test_init!(); + test_init!(CFGNAME1); Reactor::block_on(async { + let pool = Lvs::lookup("pool1").unwrap(); + pool.create_lvol(UUID1, 64 * 1024 * 1024, true) + .await + .unwrap(); create_nexus(0).await; bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); // Issue an unimplemented vendor command @@ -148,20 +156,24 @@ fn replica_snapshot() { mayastor_env_stop(0); common::delete_file(&[DISKNAME1.to_string()]); + common::delete_file(&[DISKNAME2.to_string()]); } async fn create_nexus(t: u64) { - let mut child_name = "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:" - .to_string() - + &UUID1.to_string(); + let mut children = vec![ + "loopback:///".to_string() + &UUID1.to_string(), + "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:".to_string() + + &UUID1.to_string(), + ]; let mut nexus_name = NXNAME; if t > 0 { - child_name = Lvol::format_snapshot_name(&child_name, t); + children + .iter_mut() + .for_each(|c| *c = Lvol::format_snapshot_name(&c, t)); nexus_name = NXNAME_SNAP; } - let ch = vec![child_name]; - nexus_create(&nexus_name, 64 * 1024 * 1024, None, &ch) + nexus_create(&nexus_name, 64 * 1024 * 1024, None, &children) .await .unwrap(); } diff --git a/spdk-sys/nvme_helper.c b/spdk-sys/nvme_helper.c index 4e97c8ee1..3c2a3ea39 100644 --- a/spdk-sys/nvme_helper.c +++ b/spdk-sys/nvme_helper.c @@ -15,6 +15,16 @@ nvme_cmd_cdw11_get(struct spdk_nvme_cmd *cmd) { return &cmd->cdw11; } +uint32_t +nvme_cmd_cdw10_get_val(const struct spdk_nvme_cmd *cmd) { + return cmd->cdw10; +} + +uint32_t +nvme_cmd_cdw11_get_val(const struct spdk_nvme_cmd *cmd) { + return cmd->cdw11; +} + struct spdk_nvme_status * nvme_status_get(struct spdk_nvme_cpl *cpl) { return &cpl->status; diff --git a/spdk-sys/nvme_helper.h b/spdk-sys/nvme_helper.h index 1f43d4fe6..cb7b3c0b1 100644 --- a/spdk-sys/nvme_helper.h +++ b/spdk-sys/nvme_helper.h @@ -7,6 +7,8 @@ struct spdk_nvme_cmd; struct spdk_nvme_cpl; struct spdk_nvme_status; +uint32_t nvme_cmd_cdw10_get_val(const struct spdk_nvme_cmd *cmd); +uint32_t nvme_cmd_cdw11_get_val(const struct spdk_nvme_cmd *cmd); uint32_t *nvme_cmd_cdw10_get(struct spdk_nvme_cmd *cmd); uint32_t *nvme_cmd_cdw11_get(struct spdk_nvme_cmd *cmd); From 1912bded45085e3c5bd92bd1483c51ccaab6a022 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Mon, 19 Oct 2020 18:36:57 +0100 Subject: [PATCH 08/92] uring: Re-enable bdev and pool tests The tests can be re-enabled as Ubuntu 20.04 has updated its kernel 5.4.0-50 which includes the fix for a NULL pointer dereference in io_sq_wq_submit_work(). The regression was originally introudced in 5.4.0-45, the corresponding upstream fix and first regressed versions are 5.4.62 and 5.4.51, respectively. Since the tests were last run, SPDK was upgraded to 20.07 and the uring bdev module no longer uses IORING_SETUP_IOPOLL so remove the restrictions on O_DIRECT and filesystem type from bdev::util::uring and the uring-support CLI. Also bump liburing to 0.7 and the io-uring crate to 0.4.0. --- Cargo.lock | 4 +- mayastor-test/test_nexus.js | 6 +-- mayastor-test/test_replica.js | 4 +- mayastor/Cargo.toml | 2 +- mayastor/src/bdev/util/uring.rs | 88 +------------------------------ mayastor/src/bin/uring-support.rs | 12 ++--- mayastor/tests/core.rs | 8 +-- nix/pkgs/mayastor/default.nix | 2 +- nix/sources.json | 8 +-- 9 files changed, 18 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e98dfbe0a..54a3fbd5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1544,9 +1544,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31e11f8867575fc79a3e73e5f554d0b7386bc4a6f469039e8a83136c724fd81" +checksum = "2f7589adca0ddd74f56ed83a5098b45e3abf264dc27e150a8bec3397fcc34338" dependencies = [ "bitflags", "libc", diff --git a/mayastor-test/test_nexus.js b/mayastor-test/test_nexus.js index b32d3b863..d37dc826f 100644 --- a/mayastor-test/test_nexus.js +++ b/mayastor-test/test_nexus.js @@ -187,13 +187,11 @@ var doUring = (function () { 'debug', 'uring-support' ); - const CMD = URING_SUPPORT_CMD + ' ' + uringFile; - exec(CMD, (error) => { + exec(URING_SUPPORT_CMD, (error) => { if (error) { return; } - // FIXME enable once a fixed Ubuntu kernel 5.4 is released - supportsUring = false; + supportsUring = true; }); } return supportsUring; diff --git a/mayastor-test/test_replica.js b/mayastor-test/test_replica.js index f276496f0..e570c450c 100644 --- a/mayastor-test/test_replica.js +++ b/mayastor-test/test_replica.js @@ -445,9 +445,7 @@ describe('replica', function () { if (error) { self.skip(); } - self.skip(); - // FIXME enable once a fixed Ubuntu kernel 5.4 is released - // done(); + done(); }); }); diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index f6f1d6693..e9c2ca402 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -48,7 +48,7 @@ env_logger = "0.7" futures = "0.3" futures-timer = "2.0" git-version = "0.3" -io-uring = "0.3.4" +io-uring = "0.4.0" ioctl-gen = "0.1.1" jsonrpc = { path = "../jsonrpc"} libc = "0.2" diff --git a/mayastor/src/bdev/util/uring.rs b/mayastor/src/bdev/util/uring.rs index 2786fe27f..096ab56b6 100644 --- a/mayastor/src/bdev/util/uring.rs +++ b/mayastor/src/bdev/util/uring.rs @@ -1,90 +1,6 @@ -use std::{ - fs, - fs::{File, OpenOptions}, - io::{BufRead, BufReader, ErrorKind}, - os::unix::fs::{FileTypeExt, OpenOptionsExt}, -}; - -pub fn fs_supports_direct_io(path: &str) -> bool { - // SPDK uring bdev uses IORING_SETUP_IOPOLL which is usable only on a file - // descriptor opened with O_DIRECT. The file system or block device must - // also support polling. - // This works on at least XFS filesystems - match OpenOptions::new() - .read(true) - .write(true) - .custom_flags(libc::O_DIRECT) - .open(path) - { - Ok(_f) => true, - Err(e) => { - assert_eq!(e.kind(), ErrorKind::InvalidInput); - println!("Skipping uring bdev, open: {:?}", e); - false - } - } -} - -fn get_mount_filesystem(path: &str) -> Option { - let mut path = std::path::Path::new(path); - loop { - let f = match File::open("/etc/mtab") { - Ok(f) => f, - Err(e) => { - eprintln!("open: {}", e); - return None; - } - }; - let reader = BufReader::new(f); - - let d = path.to_str().unwrap(); - for line in reader.lines() { - let l = match line { - Ok(l) => l, - Err(e) => { - eprintln!("line: {}", e); - return None; - } - }; - let parts: Vec<&str> = l.split_whitespace().collect(); - if !parts.is_empty() && parts[1] == d { - return Some(parts[2].to_string()); - } - } - - path = match path.parent() { - None => return None, - Some(p) => p, - } - } -} - -pub fn fs_type_supported(path: &str) -> bool { - let metadata = match fs::metadata(path) { - Ok(m) => m, - Err(e) => { - eprintln!("metadata: {}", e); - return false; - } - }; - if metadata.file_type().is_block_device() { - return true; - } - match get_mount_filesystem(path) { - None => { - println!("Skipping uring bdev, unknown fs"); - false - } - Some(d) => match d.as_str() { - "xfs" => true, - _ => { - println!("Skipping uring bdev, fs: {}", d); - false - } - }, - } -} +//! Utility functions for io_uring support +/// Returns true if the running kernel supports io_uring pub fn kernel_support() -> bool { // Match SPDK_URING_QUEUE_DEPTH let queue_depth = 512; diff --git a/mayastor/src/bin/uring-support.rs b/mayastor/src/bin/uring-support.rs index acc25fdae..6f3b3ccd9 100644 --- a/mayastor/src/bin/uring-support.rs +++ b/mayastor/src/bin/uring-support.rs @@ -1,23 +1,17 @@ extern crate clap; -use clap::{App, Arg}; +use clap::App; use mayastor::bdev::util::uring; fn main() { - let matches = App::new("io_uring support") + let _matches = App::new("io_uring support") .version("0.1.0") .author("Jonathan Teh ") .about("Determines io_uring support") - .arg(Arg::with_name("uring-path").help("Path to file").index(1)) .get_matches(); - let supported = match matches.value_of("uring-path") { - None => true, - Some(path) => { - uring::fs_supports_direct_io(path) && uring::fs_type_supported(path) - } - } && uring::kernel_support(); + let supported = uring::kernel_support(); std::process::exit(!supported as i32) } diff --git a/mayastor/tests/core.rs b/mayastor/tests/core.rs index 1ad074dcd..29f87952d 100644 --- a/mayastor/tests/core.rs +++ b/mayastor/tests/core.rs @@ -33,13 +33,9 @@ pub mod common; fn do_uring() -> bool { unsafe { INIT.call_once(|| { - DO_URING = uring::fs_supports_direct_io(DISKNAME3) - && uring::fs_type_supported(DISKNAME3) - && uring::kernel_support(); + DO_URING = uring::kernel_support(); }); - false - // FIXME enable once a fixed Ubuntu kernel 5.4 is released - // DO_URING + DO_URING } } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 5f91e0557..ea4c92033 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -41,7 +41,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0r6cix3s60c3qaj3lsr3bbr6by28rhapclcs4jw5d68s882i2lh8"; + cargoSha256 = "1al1gx0yn37fmp42hgyl9gn9cw91xd93aappdn84j2sa1nd7mgp0"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/nix/sources.json b/nix/sources.json index d9cb0ef32..70a2e26d7 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -12,15 +12,15 @@ "url_template": "https://github.com///archive/.tar.gz" }, "liburing": { - "branch": "liburing-0.6", + "branch": "liburing-0.7", "description": null, "homepage": null, "owner": "axboe", "repo": "liburing", - "rev": "f0c5c54945ae92a00cdbb43bdf3abaeab6bd3a23", - "sha256": "06lrqx0ch8yszy6ck5y0kj8wn7i1bnjlrdgxbmr3g32ymic1hyln", + "rev": "45f0735219a615ae848033c47c7e2d85d101d43e", + "sha256": "15z44l7y4c6s6dlf7v8lq4znlsjbja2r4ifbni0l8cdcnq0w3zh3", "type": "tarball", - "url": "https://github.com/axboe/liburing/archive/f0c5c54945ae92a00cdbb43bdf3abaeab6bd3a23.tar.gz", + "url": "https://github.com/axboe/liburing/archive/45f0735219a615ae848033c47c7e2d85d101d43e.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "niv": { From 3bb8d630af478e3dc7a66c1f554cd2080c583fdf Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Wed, 14 Oct 2020 14:41:49 +0100 Subject: [PATCH 09/92] Create service associated with CSI plugin. CAS-480 Create service associated with CSI plugin to service filesystem freeze unfreeze requests. To properly snapshot an filesystem volume, the FS must be frozen prior to creating the lvol snapshot and then be unfrozen. To do this use the linux utililty fsfreeze which must be run on the node on which the volume has been mounted. MOAC will make remote call to the service running on the node to do this. Add file missed out in initial commit. Fix error handling code. Use tokio::process::Command not std::.. rustacean fixes + review fixes Make service port configurable. Fixes for some reviews. tokio fixes Review fixes. Another review fix. Review request, fix a couple of typos. Reviview fix us expect Remove trailing `.` from .expect --- csi/build.rs | 6 ++- csi/proto/mayastornodeplugin.proto | 44 ++++++++++++++++ csi/src/freezefs.rs | 75 ++++++++++++++++++++++++++ csi/src/nodeplugin_grpc.rs | 85 ++++++++++++++++++++++++++++++ csi/src/server.rs | 66 ++++++++++++++++++----- deploy/csi-daemonset.yaml | 10 ++++ mayastor-test/test_common.js | 4 +- 7 files changed, 275 insertions(+), 15 deletions(-) create mode 100644 csi/proto/mayastornodeplugin.proto create mode 100644 csi/src/freezefs.rs create mode 100644 csi/src/nodeplugin_grpc.rs diff --git a/csi/build.rs b/csi/build.rs index 78c9462bd..362d8edcd 100644 --- a/csi/build.rs +++ b/csi/build.rs @@ -4,5 +4,9 @@ fn main() { tonic_build::configure() .build_server(true) .compile(&["proto/csi.proto"], &["proto"]) - .unwrap_or_else(|e| panic!("csi protobuf compilation failed: {}", e)); + .expect("csi protobuf compilation failed"); + tonic_build::configure() + .build_server(true) + .compile(&["proto/mayastornodeplugin.proto"], &["proto"]) + .expect("mayastor node grpc service protobuf compilation failed"); } diff --git a/csi/proto/mayastornodeplugin.proto b/csi/proto/mayastornodeplugin.proto new file mode 100644 index 000000000..b02ae3676 --- /dev/null +++ b/csi/proto/mayastornodeplugin.proto @@ -0,0 +1,44 @@ +// The definition of mayastor node plugin gRPC interface. +// The node plugin service runs on all nodes running +// the Mayastor CSI node plugin, and is complementary +// to the CSI node plugin service. + +// This interface is supposed to be independent on particular computing +// environment (i.e. kubernetes). + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.mayastornodeplugin"; +option java_outer_classname = "MayastorNodePluginProto"; + +package mayastornodeplugin; + +// Service for freezing and unfreezing file systems. +service MayastorNodePlugin { + // Freeze the file system identified by the volume ID + // no check is made if the file system had been previously frozen. + rpc FreezeFS (FreezeFSRequest) returns (FreezeFSReply) {} + // Unfreeze the file system identified by the volume ID, + // no check is made if the file system had been previously frozen. + rpc UnfreezeFS (UnfreezeFSRequest) returns (UnfreezeFSReply) {} +} + +// The request message containing ID of the volume to be frozen +message FreezeFSRequest { + string volume_id = 1; +} + +// The response message for the freeze request. +message FreezeFSReply { +} + +// The request message containing ID of the volume to be unfrozen +message UnfreezeFSRequest { + + string volume_id = 1; +} + +// The response message for the unfreeze request. +message UnfreezeFSReply { +} diff --git a/csi/src/freezefs.rs b/csi/src/freezefs.rs new file mode 100644 index 000000000..0624edaf6 --- /dev/null +++ b/csi/src/freezefs.rs @@ -0,0 +1,75 @@ +//! The files system freeze support using linux utility fsfreeze +use crate::{ + dev::{Device, DeviceError}, + mount, +}; +use snafu::{ResultExt, Snafu}; +use tokio::process::Command; +use uuid::Uuid; + +#[derive(Debug, Snafu)] +#[snafu(visibility = "pub(crate)")] +pub enum FreezeFsError { + #[snafu(display("Cannot find volume: volume ID: {}", volid))] + VolumeNotFound { volid: String }, + #[snafu(display("Invalid volume ID: {}, {}", volid, source))] + InvalidVolumeId { + source: uuid::parser::ParseError, + volid: String, + }, + #[snafu(display("fsfreeze failed: volume ID: {}, {}", volid, error))] + FsfreezeFailed { volid: String, error: String }, + #[snafu(display("Internal failure: volume ID:{}, {}", volid, source))] + InternalFailure { source: DeviceError, volid: String }, + #[snafu(display("IO error: volume ID: {}, {}", volid, source))] + IOError { + source: std::io::Error, + volid: String, + }, +} + +const FSFREEZE: &str = "fsfreeze"; + +async fn fsfreeze( + volume_id: &str, + freeze_op: &str, +) -> Result<(), FreezeFsError> { + let uuid = Uuid::parse_str(volume_id).context(InvalidVolumeId { + volid: volume_id.to_string(), + })?; + + if let Some(device) = + Device::lookup(&uuid).await.context(InternalFailure { + volid: volume_id.to_string(), + })? + { + let device_path = device.devname(); + if let Some(mnt) = mount::find_mount(Some(&device_path), None) { + let args = [freeze_op, &mnt.dest]; + let output = + Command::new(FSFREEZE).args(&args).output().await.context( + IOError { + volid: volume_id.to_string(), + }, + )?; + if output.status.success() { + return Ok(()); + } else { + return Err(FreezeFsError::FsfreezeFailed { + volid: volume_id.to_string(), + error: String::from_utf8(output.stderr).unwrap(), + }); + } + } + } + Err(FreezeFsError::VolumeNotFound { + volid: volume_id.to_string(), + }) +} +pub async fn freeze_volume(volume_id: &str) -> Result<(), FreezeFsError> { + fsfreeze(volume_id, "--freeze").await +} + +pub async fn unfreeze_volume(volume_id: &str) -> Result<(), FreezeFsError> { + fsfreeze(volume_id, "--unfreeze").await +} diff --git a/csi/src/nodeplugin_grpc.rs b/csi/src/nodeplugin_grpc.rs new file mode 100644 index 000000000..4df9433e0 --- /dev/null +++ b/csi/src/nodeplugin_grpc.rs @@ -0,0 +1,85 @@ +//! The mayastor node plugin gRPC service +//! This provides access to functionality that needs to be executed on the same +//! node as a Mayastor CSI node plugin, but it is not possible to do so within +//! the CSI framework. This service must be deployed on all nodes the +//! Mayastor CSI node plugin is deployed. +use crate::freezefs; +use freezefs::{freeze_volume, unfreeze_volume, FreezeFsError}; +use mayastor_node_plugin::*; +use tonic::{transport::Server, Code, Request, Response, Status}; + +pub mod mayastor_node_plugin { + tonic::include_proto!("mayastornodeplugin"); +} + +#[derive(Debug, Default)] +pub struct MayastorNodePluginSvc {} + +impl From for Status { + fn from(err: FreezeFsError) -> Self { + match err { + FreezeFsError::VolumeNotFound { + .. + } => Status::new(Code::NotFound, err.to_string()), + FreezeFsError::FsfreezeFailed { + .. + } => Status::new(Code::Internal, err.to_string()), + FreezeFsError::InvalidVolumeId { + .. + } => Status::new(Code::InvalidArgument, err.to_string()), + FreezeFsError::InternalFailure { + .. + } => Status::new(Code::Internal, err.to_string()), + FreezeFsError::IOError { + .. + } => Status::new(Code::Unknown, err.to_string()), + } + } +} + +#[tonic::async_trait] +impl mayastor_node_plugin_server::MayastorNodePlugin for MayastorNodePluginSvc { + async fn freeze_fs( + &self, + request: Request, + ) -> Result, Status> { + let volume_id = request.into_inner().volume_id; + debug!("freeze_fs({})", volume_id); + freeze_volume(&volume_id).await?; + Ok(Response::new(mayastor_node_plugin::FreezeFsReply {})) + } + + async fn unfreeze_fs( + &self, + request: Request, + ) -> Result, Status> { + let volume_id = request.into_inner().volume_id; + debug!("unfreeze_fs({})", volume_id); + unfreeze_volume(&volume_id).await?; + Ok(Response::new(mayastor_node_plugin::UnfreezeFsReply {})) + } +} + +pub struct MayastorNodePluginGrpcServer {} + +impl MayastorNodePluginGrpcServer { + pub async fn run(endpoint: std::net::SocketAddr) -> Result<(), ()> { + info!( + "Mayastor node plugin gRPC server configured at address {:?}", + endpoint + ); + if let Err(e) = Server::builder() + .add_service( + mayastor_node_plugin_server::MayastorNodePluginServer::new( + MayastorNodePluginSvc {}, + ), + ) + .serve(endpoint) + .await + { + error!("gRPC server failed with error: {}", e); + return Err(()); + } + Ok(()) + } +} diff --git a/csi/src/server.rs b/csi/src/server.rs index aa11c8061..117e9745c 100644 --- a/csi/src/server.rs +++ b/csi/src/server.rs @@ -19,6 +19,7 @@ use clap::{App, Arg}; use csi::{identity_server::IdentityServer, node_server::NodeServer}; use env_logger::{Builder, Env}; use futures::stream::TryStreamExt; +use nodeplugin_grpc::MayastorNodePluginGrpcServer; use std::{ path::Path, pin::Pin, @@ -38,16 +39,17 @@ pub mod csi { tonic::include_proto!("csi.v1"); } +mod block_vol; mod dev; mod error; - -mod block_vol; mod filesystem_vol; mod format; +mod freezefs; mod identity; mod match_dev; mod mount; mod node; +mod nodeplugin_grpc; use snafu::Snafu; @@ -109,6 +111,8 @@ impl AsyncWrite for UnixStream { } } +const GRPC_PORT: u16 = 10199; + #[tokio::main] async fn main() -> Result<(), String> { let matches = App::new("Mayastor CSI plugin") @@ -135,6 +139,15 @@ async fn main() -> Result<(), String> { .required(true) .takes_value(true), ) + .arg( + Arg::with_name("grpc-endpoint") + .short("g") + .long("grpc-endpoint") + .value_name("NAME") + .help("ip address where this instance runs, and optionally the gRPC port") + .required(true) + .takes_value(true), + ) .arg( Arg::with_name("v") .short("v") @@ -144,6 +157,7 @@ async fn main() -> Result<(), String> { .get_matches(); let node_name = matches.value_of("node-name").unwrap(); + let endpoint = matches.value_of("grpc-endpoint").unwrap(); let csi_socket = matches .value_of("csi-socket") .unwrap_or("/var/tmp/csi.sock"); @@ -191,16 +205,42 @@ async fn main() -> Result<(), String> { } } - let mut uds_sock = UnixListener::bind(csi_socket).unwrap(); - info!("CSI plugin bound to {}", csi_socket); - - let uds = Server::builder() - .add_service(NodeServer::new(Node { - node_name: node_name.into(), - filesystems: probe_filesystems(), - })) - .add_service(IdentityServer::new(Identity {})) - .serve_with_incoming(uds_sock.incoming().map_ok(UnixStream)); - let _ = uds.await; + let sock_addr = if endpoint.contains(':') { + endpoint.to_string() + } else { + format!("{}:{}", endpoint, GRPC_PORT) + }; + + let _ = tokio::join!( + CSIServer::run(csi_socket, node_name), + MayastorNodePluginGrpcServer::run( + sock_addr.parse().expect("Invalid gRPC endpoint") + ), + ); + Ok(()) } + +struct CSIServer {} + +impl CSIServer { + pub async fn run(csi_socket: &str, node_name: &str) -> Result<(), ()> { + let mut uds_sock = UnixListener::bind(csi_socket).unwrap(); + info!("CSI plugin bound to {}", csi_socket); + + if let Err(e) = Server::builder() + .add_service(NodeServer::new(Node { + node_name: node_name.into(), + filesystems: probe_filesystems(), + })) + .add_service(IdentityServer::new(Identity {})) + .serve_with_incoming(uds_sock.incoming().map_ok(UnixStream)) + .await + { + error!("CSI server failed with error: {}", e); + return Err(()); + } + + Ok(()) + } +} diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index dcf68c04e..a8c91b687 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -38,11 +38,16 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: RUST_BACKTRACE value: "1" args: - "--csi-socket=/csi/csi.sock" - "--node-name=$(MY_NODE_NAME)" + - "--grpc-endpoint=$(MY_POD_IP):10199" - "-v" volumeMounts: - name: device @@ -87,6 +92,11 @@ spec: requests: cpu: "100m" memory: "50Mi" + # Mayastor node plugin gRPC server + ports: + - containerPort: 10199 + protocol: TCP + name: mayastor-node volumes: - name: device hostPath: diff --git a/mayastor-test/test_common.js b/mayastor-test/test_common.js index 044db6f28..519cf9274 100644 --- a/mayastor-test/test_common.js +++ b/mayastor-test/test_common.js @@ -203,7 +203,9 @@ function startMayastorCsi () { '-n', 'test-node-id', '-c', - CSI_ENDPOINT + CSI_ENDPOINT, + '-g', + LOCALHOST ]); } From 7e3b08210a779fda61e820b3ac9a78935619c5d6 Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Thu, 15 Oct 2020 17:03:35 +0100 Subject: [PATCH 10/92] code clean up --- jsonrpc/src/error.rs | 38 ++++++++---------- jsonrpc/src/lib.rs | 80 +++++++++++++++----------------------- services/common/src/lib.rs | 4 +- 3 files changed, 49 insertions(+), 73 deletions(-) diff --git a/jsonrpc/src/error.rs b/jsonrpc/src/error.rs index 02f44cb72..6763074d8 100644 --- a/jsonrpc/src/error.rs +++ b/jsonrpc/src/error.rs @@ -26,26 +26,25 @@ pub enum Error { GenericError(String), } -impl Error { - /// Conversion from jsonrpc error to grpc status. - /// - /// NOTE: normally we would have a From trait for Status type, but - /// we can't since both Status type and From trait are external. - pub fn into_status(self) -> Status { - match self { +impl From for Code { + fn from(code: RpcCode) -> Code { + match code { + RpcCode::InvalidParams => Code::InvalidArgument, + RpcCode::NotFound => Code::NotFound, + RpcCode::AlreadyExists => Code::AlreadyExists, + _ => Code::Internal, + } + } +} + +impl From for Status { + fn from(error: Error) -> Status { + match error { Error::RpcError { code, msg, - } => { - let code = match code { - RpcCode::InvalidParams => Code::InvalidArgument, - RpcCode::NotFound => Code::NotFound, - RpcCode::AlreadyExists => Code::AlreadyExists, - _ => Code::Internal, - }; - Status::new(code, msg) - } - _ => Status::new(Code::Internal, self.to_string()), + } => Status::new(code.into(), msg), + _ => Status::new(Code::Internal, error.to_string()), } } } @@ -70,11 +69,6 @@ impl fmt::Display for Error { } } -impl From for Status { - fn from(e: Error) -> Self { - e.into_status() - } -} // Automatic conversion functions for simply using .into() on various return // types follow diff --git a/jsonrpc/src/lib.rs b/jsonrpc/src/lib.rs index e27717a31..aede4506b 100644 --- a/jsonrpc/src/lib.rs +++ b/jsonrpc/src/lib.rs @@ -59,7 +59,7 @@ pub struct RpcError { pub data: Option, } -/// Make json-rpc request and parse reply and return user data to caller. +/// Make json-rpc request, parse reply, and return user data to caller. pub async fn call( sock_path: &str, method: &str, @@ -69,21 +69,15 @@ where A: serde::ser::Serialize, R: 'static + serde::de::DeserializeOwned + Send, { - let params = match args { - Some(val) => Some(serde_json::to_value(val).unwrap()), - None => None, - }; - let request = Request { method, - params, + params: args.map(serde_json::to_value).transpose()?, id: From::from(0), jsonrpc: Some("2.0"), }; let mut buf = serde_json::to_vec(&request)?; - let sock = sock_path.to_string(); - let mut socket = UnixStream::connect(sock).await?; + let mut socket = UnixStream::connect(String::from(sock_path)).await?; trace!("JSON request: {}", String::from_utf8_lossy(&buf)); @@ -94,10 +88,7 @@ where socket.read_to_end(&mut buf).await?; socket.shutdown(Shutdown::Both)?; - match parse_reply::(&buf) { - Ok(val) => Ok(val), - Err(err) => Err(err), - } + parse_reply(&buf) } /// Parse json-rpc reply (defined by spec) and return user data embedded in @@ -110,52 +101,43 @@ where match serde_json::from_slice::(reply_raw) { Ok(reply) => { - if let Some(vers) = reply.jsonrpc { - if vers != "2.0" { + if let Some(version) = reply.jsonrpc { + if version != "2.0" { return Err(Error::InvalidVersion); } } + if !reply.id.is_number() || reply.id.as_i64().unwrap() != 0 { return Err(Error::InvalidReplyId); } - if let Some(err) = reply.error { - Err(Error::RpcError { - code: match err.code { - -32700 => RpcCode::ParseError, - -32600 => RpcCode::InvalidRequest, - -32601 => RpcCode::MethodNotFound, - -32602 => RpcCode::InvalidParams, - -32603 => RpcCode::InternalError, - val => { - if val == -(Errno::ENOENT as i32) { - RpcCode::NotFound - } else if val == -(Errno::EEXIST as i32) { - RpcCode::AlreadyExists - } else { - error!("Unknown json-rpc error code {}", val); - RpcCode::InternalError - } + if let Some(error) = reply.error { + let code = match -error.code { + 32700 => RpcCode::ParseError, + 32600 => RpcCode::InvalidRequest, + 32601 => RpcCode::MethodNotFound, + 32602 => RpcCode::InvalidParams, + 32603 => RpcCode::InternalError, + value => match Errno::from_i32(value) { + Errno::ENOENT => RpcCode::NotFound, + Errno::EEXIST => RpcCode::AlreadyExists, + _ => { + error!("Unknown json-rpc error code {}", value); + RpcCode::InternalError } }, - msg: err.message, - }) - } else { - match reply.result { - Some(result) => match serde_json::from_value::(result) { - Ok(val) => Ok(val), - Err(err) => Err(Error::ParseError(err)), - }, - // if there is no result fabricate null value == () - None => match serde_json::from_value::( - serde_json::value::Value::Null, - ) { - Ok(val) => Ok(val), - Err(err) => Err(Error::ParseError(err)), - }, - } + }; + return Err(Error::RpcError { + code, + msg: error.message, + }); } + + serde_json::from_value( + reply.result.unwrap_or(serde_json::value::Value::Null), + ) + .map_err(Error::ParseError) } - Err(err) => Err(Error::ParseError(err)), + Err(error) => Err(Error::ParseError(error)), } } diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index 04c414130..d6cbac12f 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -171,8 +171,8 @@ impl Service { } } - async fn process_message<'a>( - arguments: Arguments<'a>, + async fn process_message( + arguments: Arguments<'_>, subscriptions: &[Box], ) -> Result<(), ServiceError> { let channel = arguments.request.channel(); From 6a34dc65983a6d46d59d9c4bb978e9037635ce99 Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Fri, 16 Oct 2020 00:44:05 +0100 Subject: [PATCH 11/92] CAS-418 support for json-rpc proxy/passthrough --- mayastor/src/bin/cli/cli.rs | 7 +++ mayastor/src/bin/cli/context.rs | 9 +++- mayastor/src/bin/cli/jsonrpc_cli.rs | 42 ++++++++++++++++ mayastor/src/bin/main.rs | 5 +- mayastor/src/core/env.rs | 2 + mayastor/src/grpc/bdev_grpc.rs | 2 +- mayastor/src/grpc/json_grpc.rs | 56 +++++++++++++++++++++ mayastor/src/grpc/mayastor_grpc.rs | 2 +- mayastor/src/grpc/mod.rs | 1 + mayastor/src/grpc/server.rs | 21 ++++++-- rpc/proto/mayastor.proto | 76 +++++++++++++++++------------ 11 files changed, 183 insertions(+), 40 deletions(-) create mode 100644 mayastor/src/bin/cli/jsonrpc_cli.rs create mode 100644 mayastor/src/grpc/json_grpc.rs diff --git a/mayastor/src/bin/cli/cli.rs b/mayastor/src/bin/cli/cli.rs index 6ea82f30a..1ab573b20 100644 --- a/mayastor/src/bin/cli/cli.rs +++ b/mayastor/src/bin/cli/cli.rs @@ -7,6 +7,7 @@ use tonic::{transport::Channel, Status}; use ::rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, + json_rpc_client::JsonRpcClient, mayastor_client::MayastorClient, }; @@ -15,6 +16,7 @@ use crate::context::Context; mod bdev_cli; mod context; mod device_cli; +mod jsonrpc_cli; mod nexus_child_cli; mod nexus_cli; mod pool_cli; @@ -24,6 +26,7 @@ mod snapshot_cli; type MayaClient = MayastorClient; type BdevClient = BdevRpcClient; +type JsonClient = JsonRpcClient; pub(crate) fn parse_size(src: &str) -> Result { Byte::from_str(src).map_err(|_| src.to_string()) @@ -81,6 +84,7 @@ async fn main() -> Result<(), Status> { .subcommand(device_cli::subcommands()) .subcommand(rebuild_cli::subcommands()) .subcommand(snapshot_cli::subcommands()) + .subcommand(jsonrpc_cli::subcommands()) .get_matches(); let ctx = Context::new(&matches).await; @@ -93,6 +97,9 @@ async fn main() -> Result<(), Status> { ("replica", Some(args)) => replica_cli::handler(ctx, args).await?, ("rebuild", Some(args)) => rebuild_cli::handler(ctx, args).await?, ("snapshot", Some(args)) => snapshot_cli::handler(ctx, args).await?, + ("jsonrpc", Some(args)) => { + jsonrpc_cli::json_rpc_call(ctx, args).await? + } _ => eprintln!("Internal Error: Not implemented"), }; diff --git a/mayastor/src/bin/cli/context.rs b/mayastor/src/bin/cli/context.rs index 36422e27f..8cd1b2a9b 100644 --- a/mayastor/src/bin/cli/context.rs +++ b/mayastor/src/bin/cli/context.rs @@ -1,4 +1,4 @@ -use crate::{BdevClient, MayaClient}; +use crate::{BdevClient, JsonClient, MayaClient}; use byte_unit::Byte; use clap::ArgMatches; use std::cmp::max; @@ -6,6 +6,7 @@ use std::cmp::max; pub struct Context { pub(crate) client: MayaClient, pub(crate) bdev: BdevClient, + pub(crate) json: JsonClient, verbosity: u64, units: char, } @@ -32,9 +33,13 @@ impl Context { } let client = MayaClient::connect(uri.clone()).await.unwrap(); + let bdev = BdevClient::connect(uri.clone()).await.unwrap(); + let json = JsonClient::connect(uri).await.unwrap(); + Context { client, - bdev: BdevClient::connect(uri).await.unwrap(), + bdev, + json, verbosity, units, } diff --git a/mayastor/src/bin/cli/jsonrpc_cli.rs b/mayastor/src/bin/cli/jsonrpc_cli.rs new file mode 100644 index 000000000..6935cfe7b --- /dev/null +++ b/mayastor/src/bin/cli/jsonrpc_cli.rs @@ -0,0 +1,42 @@ +use super::context::Context; +use ::rpc::mayastor as rpc; +use clap::{App, Arg, ArgMatches, SubCommand}; +use colored_json::ToColoredJson; +use tonic::Status; + +pub fn subcommands<'a, 'b>() -> App<'a, 'b> { + SubCommand::with_name("jsonrpc") + .about("Call a json-rpc method with a raw JSON payload") + .arg( + Arg::with_name("method") + .required(true) + .index(1) + .help("Name of method to call"), + ) + .arg( + Arg::with_name("params") + .default_value("") + .index(2) + .help("Parameters (JSON string) to pass to method call"), + ) +} + +pub async fn json_rpc_call( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> Result<(), Status> { + let method = matches.value_of("method").unwrap().to_owned(); + let params = matches.value_of("params").unwrap().to_owned(); + + let reply = ctx + .json + .json_rpc_call(rpc::JsonRpcRequest { + method, + params, + }) + .await?; + + println!("{}", reply.get_ref().result.to_colored_json_auto().unwrap()); + + Ok(()) +} diff --git a/mayastor/src/bin/main.rs b/mayastor/src/bin/main.rs index 656442b73..9556629cc 100644 --- a/mayastor/src/bin/main.rs +++ b/mayastor/src/bin/main.rs @@ -51,6 +51,7 @@ fn main() -> Result<(), Box> { info!("free_pages: {} nr_pages: {}", free_pages, nr_pages); let grpc_endpoint = grpc::endpoint(args.grpc_endpoint.clone()); + let rpc_address = args.rpc_address.clone(); let ms = rt.enter(|| MayastorEnvironment::new(args).init()); @@ -60,7 +61,9 @@ fn main() -> Result<(), Box> { futures.push(master.boxed_local()); futures.push(subsys::Registration::run().boxed_local()); - futures.push(grpc::MayastorGrpcServer::run(grpc_endpoint).boxed_local()); + futures.push( + grpc::MayastorGrpcServer::run(grpc_endpoint, rpc_address).boxed_local(), + ); rt.block_on(futures::future::try_join_all(futures)) .expect_err("reactor exit in abnormal state"); diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 94b610cb6..ae52689b2 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -760,6 +760,7 @@ impl MayastorEnvironment { { type FutureResult = Result<(), ()>; let grpc_endpoint = self.grpc_endpoint; + let rpc_addr = self.rpc_addr.clone(); let ms = self.init(); let mut rt = Builder::new() @@ -777,6 +778,7 @@ impl MayastorEnvironment { if let Some(grpc_endpoint) = grpc_endpoint { futures.push(Box::pin(grpc::MayastorGrpcServer::run( grpc_endpoint, + rpc_addr, ))); } futures.push(Box::pin(subsys::Registration::run())); diff --git a/mayastor/src/grpc/bdev_grpc.rs b/mayastor/src/grpc/bdev_grpc.rs index 97b00fc81..4e95b3e4d 100644 --- a/mayastor/src/grpc/bdev_grpc.rs +++ b/mayastor/src/grpc/bdev_grpc.rs @@ -56,7 +56,7 @@ impl From for RpcBdev { } #[derive(Debug)] -pub struct BdevSvc {} +pub struct BdevSvc; #[tonic::async_trait] impl BdevRpc for BdevSvc { diff --git a/mayastor/src/grpc/json_grpc.rs b/mayastor/src/grpc/json_grpc.rs new file mode 100644 index 000000000..0314a76f9 --- /dev/null +++ b/mayastor/src/grpc/json_grpc.rs @@ -0,0 +1,56 @@ +//! +//! gRPC method to proxy calls to (local) SPDK json-rpc service + +use crate::grpc::GrpcResult; +use ::rpc::mayastor::{json_rpc_server::JsonRpc, JsonRpcReply, JsonRpcRequest}; +use jsonrpc::error::Error; +use tonic::{Request, Response}; + +#[derive(Debug)] +pub struct JsonRpcSvc { + pub rpc_addr: String, +} + +#[tonic::async_trait] +impl JsonRpc for JsonRpcSvc { + /// Invoke a json-rpc method and return the result + #[instrument(level = "debug", err)] + async fn json_rpc_call( + &self, + request: Request, + ) -> GrpcResult { + let args = request.into_inner(); + + let result = self + .spdk_jsonrpc_call(&args.method, empty_as_none(&args.params)) + .await?; + + Ok(Response::new(JsonRpcReply { + result, + })) + } +} + +fn empty_as_none(value: &str) -> Option<&str> { + if value.is_empty() { + None + } else { + Some(value) + } +} + +impl JsonRpcSvc { + async fn spdk_jsonrpc_call( + &self, + method: &str, + arg: Option<&str>, + ) -> Result { + let params: Option = + arg.map(serde_json::from_str).transpose()?; + + let result: serde_json::Value = + jsonrpc::call(&self.rpc_addr, method, params).await?; + + serde_json::to_string_pretty(&result).map_err(Error::ParseError) + } +} diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 41bc13cd2..3e96940eb 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -37,7 +37,7 @@ use crate::{ struct UnixStream(tokio::net::UnixStream); #[derive(Debug)] -pub struct MayastorSvc {} +pub struct MayastorSvc; #[tonic::async_trait] impl mayastor_server::Mayastor for MayastorSvc { diff --git a/mayastor/src/grpc/mod.rs b/mayastor/src/grpc/mod.rs index cfd0d0e3d..79094350c 100644 --- a/mayastor/src/grpc/mod.rs +++ b/mayastor/src/grpc/mod.rs @@ -43,6 +43,7 @@ macro_rules! locally { } mod bdev_grpc; +mod json_grpc; mod mayastor_grpc; mod nexus_grpc; mod server; diff --git a/mayastor/src/grpc/server.rs b/mayastor/src/grpc/server.rs index c793c4e84..1832a35f3 100644 --- a/mayastor/src/grpc/server.rs +++ b/mayastor/src/grpc/server.rs @@ -1,19 +1,30 @@ use tonic::transport::Server; -use crate::grpc::{bdev_grpc::BdevSvc, mayastor_grpc::MayastorSvc}; +use crate::grpc::{ + bdev_grpc::BdevSvc, + json_grpc::JsonRpcSvc, + mayastor_grpc::MayastorSvc, +}; use rpc::mayastor::{ bdev_rpc_server::BdevRpcServer, + json_rpc_server::JsonRpcServer, mayastor_server::MayastorServer as MayastorRpcServer, }; -pub struct MayastorGrpcServer {} +pub struct MayastorGrpcServer; impl MayastorGrpcServer { - pub async fn run(endpoint: std::net::SocketAddr) -> Result<(), ()> { + pub async fn run( + endpoint: std::net::SocketAddr, + rpc_addr: String, + ) -> Result<(), ()> { info!("gRPC server configured at address {}", endpoint); let svc = Server::builder() - .add_service(MayastorRpcServer::new(MayastorSvc {})) - .add_service(BdevRpcServer::new(BdevSvc {})) + .add_service(MayastorRpcServer::new(MayastorSvc)) + .add_service(BdevRpcServer::new(BdevSvc)) + .add_service(JsonRpcServer::new(JsonRpcSvc { + rpc_addr, + })) .serve(endpoint); match svc.await { diff --git a/rpc/proto/mayastor.proto b/rpc/proto/mayastor.proto index 614dc50ef..0dced23be 100644 --- a/rpc/proto/mayastor.proto +++ b/rpc/proto/mayastor.proto @@ -326,6 +326,42 @@ message CreateSnapshotReply { string name = 1; // name of snapshot created } +message BlockDevice { + message Partition { + string parent = 1; // devname of parent device to which this partition belongs + uint32 number = 2; // partition number + string name = 3; // partition name + string scheme = 4; // partition scheme: gpt, dos, ... + string typeid = 5; // partition type identifier + string uuid = 6; // UUID identifying partition + } + message Filesystem { + string fstype = 1; // filesystem type: ext3, ntfs, ... + string label = 2; // volume label + string uuid = 3; // UUID identifying the volume (filesystem) + string mountpoint = 4; // path where filesystem is currently mounted + } + string devname = 1; // entry in /dev associated with device + string devtype = 2; // currently "disk" or "partition" + uint32 devmajor = 3; // major device number + uint32 devminor = 4; // minor device number + string model = 5; // device model - useful for identifying mayastor devices + string devpath = 6; // official device path + repeated string devlinks = 7; // list of udev generated symlinks by which device may be identified + uint64 size = 8; // size of device in (512 byte) blocks + Partition partition = 9; // partition information in case where device represents a partition + Filesystem filesystem = 10; // filesystem information in case where a filesystem is present + bool available = 11; // identifies if device is available for use (ie. is not "currently" in use) +} + +message ListBlockDevicesRequest { + bool all = 1; // list "all" block devices found (not just "available" ones) +} + +message ListBlockDevicesReply { + repeated BlockDevice devices = 1; +} + // Anything what follows here are private interfaces used for interacting with // mayastor outside the scope of CSI. @@ -372,38 +408,18 @@ message CreateReply { string name = 1; } -message BlockDevice { - message Partition { - string parent = 1; // devname of parent device to which this partition belongs - uint32 number = 2; // partition number - string name = 3; // partition name - string scheme = 4; // partition scheme: gpt, dos, ... - string typeid = 5; // partition type identifier - string uuid = 6; // UUID identifying partition - } - message Filesystem { - string fstype = 1; // filesystem type: ext3, ntfs, ... - string label = 2; // volume label - string uuid = 3; // UUID identifying the volume (filesystem) - string mountpoint = 4; // path where filesystem is currently mounted - } - string devname = 1; // entry in /dev associated with device - string devtype = 2; // currently "disk" or "partition" - uint32 devmajor = 3; // major device number - uint32 devminor = 4; // minor device number - string model = 5; // device model - useful for identifying mayastor devices - string devpath = 6; // official device path - repeated string devlinks = 7; // list of udev generated symlinks by which device may be identified - uint64 size = 8; // size of device in (512 byte) blocks - Partition partition = 9; // partition information in case where device represents a partition - Filesystem filesystem = 10; // filesystem information in case where a filesystem is present - bool available = 11; // identifies if device is available for use (ie. is not "currently" in use) +// SPDK json-rpc proxy service + +service JsonRpc { + // Call a (SPDK) json-rpc method + rpc JsonRpcCall (JsonRpcRequest) returns (JsonRpcReply) {} } -message ListBlockDevicesRequest { - bool all = 1; // list "all" block devices found (not just "available" ones) +message JsonRpcRequest { + string method = 1; + string params = 2; } -message ListBlockDevicesReply { - repeated BlockDevice devices = 1; +message JsonRpcReply { + string result = 1; } From d950c450ff751f42af6722496260bb31dfd21ce3 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 16 Oct 2020 16:19:27 +0200 Subject: [PATCH 12/92] tests: use new method for testing Some (not all) test have been updated to use the new MayastorTest struct for testing. --- mayastor/src/core/env.rs | 4 +- mayastor/tests/add_child.rs | 178 ++++---- mayastor/tests/common/compose.rs | 7 +- mayastor/tests/core.rs | 279 ++++++------ mayastor/tests/error_count.rs | 129 +++--- mayastor/tests/error_count_retry.rs | 56 ++- mayastor/tests/error_fault_child.rs | 55 ++- mayastor/tests/fault_child.rs | 38 +- mayastor/tests/io.rs | 23 +- mayastor/tests/iscsi_tgt.rs | 71 ++- mayastor/tests/lvs_pool.rs | 670 +++++++++++++--------------- mayastor/tests/lvs_pool_rpc.rs | 265 ++++++----- mayastor/tests/malloc_bdev.rs | 91 ++-- mayastor/tests/nexus_share.rs | 135 +++--- mayastor/tests/reactor.rs | 1 - mayastor/tests/reactor_block_on.rs | 24 +- 16 files changed, 978 insertions(+), 1048 deletions(-) diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 94b610cb6..a68dade86 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -90,7 +90,7 @@ pub struct MayastorCliArgs { #[structopt(short = "L")] /// Enable logging for sub components pub log_components: Vec, - #[structopt(short = "m", default_value = "0x1")] + #[structopt(short = "m", default_value = "0x3")] /// The reactor mask to be used for starting up the instance pub reactor_mask: String, #[structopt(short = "N")] @@ -135,7 +135,7 @@ impl Default for MayastorCliArgs { mbus_endpoint: None, node_name: None, env_context: None, - reactor_mask: "0x1".into(), + reactor_mask: "0x3".into(), mem_size: 0, rpc_address: "/var/tmp/mayastor.sock".to_string(), no_pci: true, diff --git a/mayastor/tests/add_child.rs b/mayastor/tests/add_child.rs index 61ed18bcb..066bc5528 100644 --- a/mayastor/tests/add_child.rs +++ b/mayastor/tests/add_child.rs @@ -3,7 +3,7 @@ extern crate assert_matches; use mayastor::{ bdev::{nexus_create, nexus_lookup, ChildState, Reason}, - core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, + core::MayastorCliArgs, }; static NEXUS_NAME: &str = "nexus"; @@ -17,9 +17,9 @@ static DISKNAME2: &str = "/tmp/disk2.img"; static BDEVNAME2: &str = "aio:///tmp/disk2.img?blk_size=512"; pub mod common; +use common::MayastorTest; fn test_start() { - common::mayastor_test_init(); common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]); common::truncate_file(DISKNAME1, FILE_SIZE); common::truncate_file(DISKNAME2, FILE_SIZE); @@ -30,92 +30,94 @@ fn test_finish() { common::delete_file(&disks); } -#[test] -fn add_child() { +#[tokio::test] +async fn add_child() { test_start(); - let rc = MayastorEnvironment::new(MayastorCliArgs::default()) - .start(|| { - // Create a nexus with a single child - Reactor::block_on(async { - let children = vec![BDEVNAME1.to_string()]; - nexus_create(NEXUS_NAME, 512 * 131_072, None, &children) - .await - .expect("Failed to create nexus"); - }); - - // Test adding a child to an unshared nexus - Reactor::block_on(async { - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - nexus - .add_child(BDEVNAME2, false) - .await - .expect("Failed to add child"); - assert_eq!(nexus.children.len(), 2); - - // Expect the added child to be in the out-of-sync state - assert_matches!( - nexus.children[1].state(), - ChildState::Faulted(Reason::OutOfSync) - ); - }); - - // Test removing a child from an unshared nexus - Reactor::block_on(async { - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - nexus - .remove_child(BDEVNAME2) - .await - .expect("Failed to remove child"); - assert_eq!(nexus.children.len(), 1); - }); - - // Share nexus - Reactor::block_on(async { - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - nexus - .share(rpc::mayastor::ShareProtocolNexus::NexusIscsi, None) - .await - .expect("Failed to share nexus"); - }); - - // Test adding a child to a shared nexus - Reactor::block_on(async { - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - nexus - .add_child(BDEVNAME2, false) - .await - .expect("Failed to add child"); - assert_eq!(nexus.children.len(), 2); - - // Expect the added child to be in the out-of-sync state - assert_matches!( - nexus.children[1].state(), - ChildState::Faulted(Reason::OutOfSync) - ); - }); - - // Test removing a child from a shared nexus - Reactor::block_on(async { - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - nexus - .remove_child(BDEVNAME2) - .await - .expect("Failed to remove child"); - assert_eq!(nexus.children.len(), 1); - }); - - // Unshare nexus - Reactor::block_on(async { - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - nexus - .unshare_nexus() - .await - .expect("Failed to unshare nexus"); - }); - - mayastor_env_stop(0); - }) - .unwrap(); - assert_eq!(rc, 0); + let ms = MayastorTest::new(MayastorCliArgs::default()); + // Create a nexus with a single child + ms.spawn(async { + let children = vec![BDEVNAME1.to_string()]; + nexus_create(NEXUS_NAME, 512 * 131_072, None, &children) + .await + .expect("Failed to create nexus"); + }) + .await; + + // Test adding a child to an unshared nexus + ms.spawn(async { + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + nexus + .add_child(BDEVNAME2, false) + .await + .expect("Failed to add child"); + assert_eq!(nexus.children.len(), 2); + + // Expect the added child to be in the out-of-sync state + assert_matches!( + nexus.children[1].state(), + ChildState::Faulted(Reason::OutOfSync) + ); + }) + .await; + + // Test removing a child from an unshared nexus + ms.spawn(async { + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + nexus + .remove_child(BDEVNAME2) + .await + .expect("Failed to remove child"); + assert_eq!(nexus.children.len(), 1); + }) + .await; + + // Share nexus + ms.spawn(async { + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + nexus + .share(rpc::mayastor::ShareProtocolNexus::NexusIscsi, None) + .await + .expect("Failed to share nexus"); + }) + .await; + + // Test adding a child to a shared nexus + ms.spawn(async { + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + nexus + .add_child(BDEVNAME2, false) + .await + .expect("Failed to add child"); + assert_eq!(nexus.children.len(), 2); + + // Expect the added child to be in the out-of-sync state + assert_matches!( + nexus.children[1].state(), + ChildState::Faulted(Reason::OutOfSync) + ); + }) + .await; + + // Test removing a child from a shared nexus + ms.spawn(async { + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + nexus + .remove_child(BDEVNAME2) + .await + .expect("Failed to remove child"); + assert_eq!(nexus.children.len(), 1); + }) + .await; + + // Unshare nexus + ms.spawn(async { + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + nexus + .unshare_nexus() + .await + .expect("Failed to unshare nexus"); + }) + .await; + test_finish(); } diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index d9dad4812..72bc4bf53 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -37,7 +37,7 @@ use ipnetwork::Ipv4Network; use tokio::sync::oneshot::channel; use tonic::transport::Channel; -use crate::common; +use crate::{common, common::mayastor_test_init}; use ::rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, mayastor_client::MayastorClient, @@ -55,7 +55,7 @@ use mayastor::core::{ pub struct RpcHandle { pub name: String, pub endpoint: SocketAddr, - mayastor: MayastorClient, + pub mayastor: MayastorClient, pub bdev: BdevRpcClient, } @@ -620,9 +620,8 @@ impl<'a> MayastorTest<'a> { } pub fn new(args: MayastorCliArgs) -> MayastorTest<'static> { - common::mayastor_test_init(); let (tx, rx) = bounded(1); - + mayastor_test_init(); let thdl = std::thread::Builder::new() .name("mayastor_master".into()) .spawn(move || { diff --git a/mayastor/tests/core.rs b/mayastor/tests/core.rs index 29f87952d..1cb8775c3 100644 --- a/mayastor/tests/core.rs +++ b/mayastor/tests/core.rs @@ -1,17 +1,12 @@ use std::sync::Once; +use once_cell::sync::OnceCell; use uuid::Uuid; +use common::MayastorTest; use mayastor::{ bdev::{nexus_create, nexus_lookup, util::uring}, - core::{ - mayastor_env_stop, - Bdev, - BdevHandle, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, + core::{Bdev, BdevHandle, MayastorCliArgs, Reactor}, nexus_uri::{bdev_create, bdev_destroy}, }; use rpc::mayastor::ShareProtocolNexus; @@ -54,16 +49,24 @@ async fn create_nexus() { .unwrap(); } -#[test] -fn core() { - test_init!(); +static MS: OnceCell = OnceCell::new(); + +fn mayastor() -> &'static MayastorTest<'static> { + let ms = MS.get_or_init(|| MayastorTest::new(MayastorCliArgs::default())); + &ms +} + +#[tokio::test] +async fn core() { common::truncate_file(DISKNAME1, 64 * 1024); common::truncate_file(DISKNAME2, 64 * 1024); common::truncate_file(DISKNAME3, 64 * 1024); - Reactor::block_on(async { - works().await; - }); + mayastor() + .spawn(async { + works().await; + }) + .await; } async fn works() { @@ -80,54 +83,53 @@ async fn works() { n.destroy().await.unwrap(); } -#[test] -fn core_2() { - test_init!(); - Reactor::block_on(async { - create_nexus().await; - - let n = nexus_lookup("core_nexus").expect("failed to lookup nexus"); - - let d1 = Bdev::open_by_name("core_nexus", true) - .expect("failed to open first desc to nexus"); - let d2 = Bdev::open_by_name("core_nexus", true) - .expect("failed to open second desc to nexus"); - - let ch1 = d1.get_channel().expect("failed to get channel!"); - let ch2 = d2.get_channel().expect("failed to get channel!"); - drop(ch1); - drop(ch2); - - // we must drop the descriptors before we destroy the nexus - drop(dbg!(d1)); - drop(dbg!(d2)); - n.destroy().await.unwrap(); - }); +#[tokio::test] +async fn core_2() { + mayastor() + .spawn(async { + create_nexus().await; + + let n = nexus_lookup("core_nexus").expect("failed to lookup nexus"); + + let d1 = Bdev::open_by_name("core_nexus", true) + .expect("failed to open first desc to nexus"); + let d2 = Bdev::open_by_name("core_nexus", true) + .expect("failed to open second desc to nexus"); + + let ch1 = d1.get_channel().expect("failed to get channel!"); + let ch2 = d2.get_channel().expect("failed to get channel!"); + drop(ch1); + drop(ch2); + + // we must drop the descriptors before we destroy the nexus + drop(dbg!(d1)); + drop(dbg!(d2)); + n.destroy().await.unwrap(); + }) + .await; } -#[test] -fn core_3() { - test_init!(); - Reactor::block_on(async { - bdev_create(BDEVNAME1).await.expect("failed to create bdev"); - let hdl2 = BdevHandle::open(BDEVNAME1, true, true) - .expect("failed to create the handle!"); - let hdl3 = BdevHandle::open(BDEVNAME1, true, true); - assert_eq!(hdl3.is_err(), true); - - // we must drop the descriptors before we destroy the nexus - drop(hdl2); - drop(hdl3); - - bdev_destroy(BDEVNAME1).await.unwrap(); - }); +#[tokio::test] +async fn core_3() { + mayastor() + .spawn(async { + bdev_create(BDEVNAME1).await.expect("failed to create bdev"); + let hdl2 = BdevHandle::open(BDEVNAME1, true, true) + .expect("failed to create the handle!"); + let hdl3 = BdevHandle::open(BDEVNAME1, true, true); + assert_eq!(hdl3.is_err(), true); + + // we must drop the descriptors before we destroy the nexus + drop(hdl2); + drop(hdl3); + + bdev_destroy(BDEVNAME1).await.unwrap(); + }) + .await; } -#[test] -// Test nexus with children with different sizes -fn core_4() { - test_init!(); - +#[tokio::test] +async fn core_4() { common::delete_file(&[DISKNAME1.to_string()]); common::delete_file(&[DISKNAME2.to_string()]); @@ -151,60 +153,59 @@ fn core_4() { let nexus_ok = test_case.1; let child_ok = test_case.3; - Reactor::block_on(async move { - let create = nexus_create( - nexus_name, - nexus_size, - None, - &[BDEVNAME1.to_string()], - ) - .await; - if nexus_ok { - create.unwrap_or_else(|_| { - panic!( - "Case {} - Nexus should have have been created", - test_case_index - ) - }); - let nexus = nexus_lookup(nexus_name).unwrap(); - - if child_ok { - nexus.add_child(&BDEVNAME2, true).await.unwrap_or_else( - |_| { - panic!( + mayastor() + .spawn(async move { + let create = nexus_create( + nexus_name, + nexus_size, + None, + &[BDEVNAME1.to_string()], + ) + .await; + if nexus_ok { + create.unwrap_or_else(|_| { + panic!( + "Case {} - Nexus should have have been created", + test_case_index + ) + }); + let nexus = nexus_lookup(nexus_name).unwrap(); + + if child_ok { + nexus.add_child(&BDEVNAME2, true).await.unwrap_or_else( + |_| { + panic!( + "Case {} - Child should have been added", + test_case_index + ) + }, + ); + } else { + nexus.add_child(&BDEVNAME2, true).await.expect_err( + &format!( "Case {} - Child should have been added", test_case_index - ) - }, - ); + ), + ); + } + + nexus.destroy().await.unwrap(); } else { - nexus.add_child(&BDEVNAME2, true).await.expect_err( - &format!( - "Case {} - Child should have been added", - test_case_index - ), - ); + create.expect_err(&format!( + "Case {} - Nexus should not have been created", + test_case_index + )); } - - nexus.destroy().await.unwrap(); - } else { - create.expect_err(&format!( - "Case {} - Nexus should not have been created", - test_case_index - )); - } - }); + }) + .await; common::delete_file(&[DISKNAME1.to_string()]); common::delete_file(&[DISKNAME2.to_string()]); } } -#[test] -// Test nexus bdev size when created with children of the same size and larger -fn core_5() { - test_init!(); - +#[tokio::test] +async fn core_5() { common::delete_file(&[DISKNAME1.to_string()]); let nexus_size: u64 = 100 * 1024 * 1024; // 100MiB let nexus_name: &str = "nexus_size"; @@ -218,41 +219,43 @@ fn core_5() { common::truncate_file(DISKNAME1, child_size / 1024); - Reactor::block_on(async move { - nexus_create( - nexus_name, - nexus_size, - None, - &[BDEVNAME1.to_string()], - ) - .await - .unwrap(); - let nexus = nexus_lookup(nexus_name).unwrap(); - let device = common::device_path_from_uri( - nexus - .share(ShareProtocolNexus::NexusNbd, None) - .await - .unwrap(), - ); - - let size = common::get_device_size(&device); - // size of the shared device: - // if the child is sufficiently large it should match the requested - // nexus_size or a little less (smallest child size - // minus partition metadata) - assert!(size <= nexus_size); - - nexus.destroy().await.unwrap(); - }); + mayastor() + .spawn(async move { + nexus_create( + nexus_name, + nexus_size, + None, + &[BDEVNAME1.to_string()], + ) + .await + .unwrap(); + let nexus = nexus_lookup(nexus_name).unwrap(); + let device = common::device_path_from_uri( + nexus + .share(ShareProtocolNexus::NexusNbd, None) + .await + .unwrap(), + ); + + let size = common::get_device_size(&device); + // size of the shared device: + // if the child is sufficiently large it should match the + // requested nexus_size or a little less + // (smallest child size minus partition + // metadata) + assert!(size <= nexus_size); + + nexus.destroy().await.unwrap(); + }) + .await; common::delete_file(&[DISKNAME1.to_string()]); } } -#[test] +#[tokio::test] // Test nexus with inaccessible bdev for 2nd child -fn core_6() { - test_init!(); +async fn core_6() { common::truncate_file(DISKNAME1, 64 * 1024); let file_uuid = Uuid::new_v4(); @@ -260,11 +263,11 @@ fn core_6() { BDEVNAME1.to_string(), "aio:///tmp/disk2".to_string() + &file_uuid.to_simple().to_string(), ]; - Reactor::block_on(async move { - nexus_create("nexus_child_2_missing", 64 * 1024 * 1024, None, &ch) - .await - .expect_err("Nexus should not be created"); - }); - - mayastor_env_stop(1); + mayastor() + .spawn(async move { + nexus_create("nexus_child_2_missing", 64 * 1024 * 1024, None, &ch) + .await + .expect_err("Nexus should not be created"); + }) + .await; } diff --git a/mayastor/tests/error_count.rs b/mayastor/tests/error_count.rs index 8651a1346..332d66d06 100644 --- a/mayastor/tests/error_count.rs +++ b/mayastor/tests/error_count.rs @@ -5,16 +5,10 @@ pub use common::error_bdev::{ SPDK_BDEV_IO_TYPE_WRITE, VBDEV_IO_FAILURE, }; - +use common::MayastorTest; use mayastor::{ bdev::{nexus_create, nexus_lookup, ActionType, NexusErrStore, QueryType}, - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, + core::{Bdev, MayastorCliArgs}, subsys::Config, }; @@ -28,13 +22,14 @@ static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; static DISKNAME2: &str = "/tmp/disk2.img"; static ERROR_DEVICE: &str = "error_device"; -static EE_ERROR_DEVICE: &str = "EE_error_device"; // The prefix is added by the vbdev_error module +static EE_ERROR_DEVICE: &str = "EE_error_device"; +// The prefix is added by the vbdev_error module static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_device"; static YAML_CONFIG_FILE: &str = "/tmp/error_count_test_nexus.yaml"; -#[test] -fn nexus_error_count_test() { +#[tokio::test] +async fn nexus_error_count_test() { common::truncate_file(DISKNAME1, 64 * 1024); common::truncate_file(DISKNAME2, 64 * 1024); @@ -43,38 +38,45 @@ fn nexus_error_count_test() { config.err_store_opts.action = ActionType::Ignore; config.err_store_opts.err_store_size = 256; config.write(YAML_CONFIG_FILE).unwrap(); - test_init!(YAML_CONFIG_FILE); + let ms = MayastorTest::new(MayastorCliArgs { + mayastor_config: Some(YAML_CONFIG_FILE.to_string()), + reactor_mask: "0x3".to_string(), + ..Default::default() + }); - Reactor::block_on(async { + ms.spawn(async { create_error_bdev(ERROR_DEVICE, DISKNAME2); create_nexus().await; err_write_nexus(true).await; err_read_nexus_both(true).await; - }); - - common::reactor_run_millis(10); // give time for any errors to be added to the error store + }) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::READ_FLAG, 0, Some(1_000_000_000), - ); + )) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 0, Some(1_000_000_000), - ); - nexus_err_query_and_test( + )) + .await; + + ms.spawn(nexus_err_query_and_test( BDEVNAME1, NexusErrStore::READ_FLAG | NexusErrStore::WRITE_FLAG, 0, Some(1_000_000_000), - ); + )) + .await; - Reactor::block_on(async { + ms.spawn(async { inject_error( EE_ERROR_DEVICE, SPDK_BDEV_IO_TYPE_WRITE, @@ -83,31 +85,34 @@ fn nexus_error_count_test() { ); err_write_nexus(false).await; err_read_nexus_both(true).await; - }); + }) + .await; - common::reactor_run_millis(10); // give time for any errors to be added to the error store - - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::READ_FLAG, 0, Some(1_000_000_000), - ); + )) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 1, Some(1_000_000_000), - ); - nexus_err_query_and_test( + )) + .await; + + ms.spawn(nexus_err_query_and_test( BDEVNAME1, NexusErrStore::READ_FLAG | NexusErrStore::WRITE_FLAG, 0, Some(1_000_000_000), - ); + )) + .await; - Reactor::block_on(async { + ms.spawn(async { inject_error( EE_ERROR_DEVICE, SPDK_BDEV_IO_TYPE_READ, @@ -116,33 +121,36 @@ fn nexus_error_count_test() { ); err_read_nexus_both(false).await; err_write_nexus(true).await; - }); - - common::reactor_run_millis(10); // give time for any errors to be added to the error store + }) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::READ_FLAG, 1, Some(1_000_000_000), - ); + )) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 1, Some(1_000_000_000), - ); - nexus_err_query_and_test( + )) + .await; + + ms.spawn(nexus_err_query_and_test( BDEVNAME1, NexusErrStore::READ_FLAG | NexusErrStore::WRITE_FLAG, 0, Some(1_000_000_000), - ); + )) + .await; // overflow the error store with errored reads and writes, assumes default // buffer size of 256 records - Reactor::block_on(async { + ms.spawn(async { inject_error( EE_ERROR_DEVICE, SPDK_BDEV_IO_TYPE_READ, @@ -161,45 +169,48 @@ fn nexus_error_count_test() { for _ in 0 .. 100 { err_write_nexus(false).await; } - }); + }) + .await; - common::reactor_run_millis(100); // give time for any errors to be added to the error store - - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::READ_FLAG, 156, Some(10_000_000_000), - ); - nexus_err_query_and_test( + )) + .await; + + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 100, Some(10_000_000_000), - ); + )) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 0, Some(0), // too recent, so nothing there - ); + )) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 100, Some(1_000_000_000_000_000_000), // underflow, so assumes any age - ); + )) + .await; - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 100, None, // no time specified - ); - - mayastor_env_stop(0); + )) + .await; common::delete_file(&[DISKNAME1.to_string()]); common::delete_file(&[DISKNAME2.to_string()]); @@ -214,7 +225,7 @@ async fn create_nexus() { .unwrap(); } -fn nexus_err_query_and_test( +async fn nexus_err_query_and_test( child_bdev: &str, io_type_flags: u32, expected_count: u32, diff --git a/mayastor/tests/error_count_retry.rs b/mayastor/tests/error_count_retry.rs index 2575174ff..cb8e9ec2a 100644 --- a/mayastor/tests/error_count_retry.rs +++ b/mayastor/tests/error_count_retry.rs @@ -9,13 +9,7 @@ pub use common::error_bdev::{ }; use mayastor::{ bdev::{nexus_create, nexus_lookup, ActionType, NexusErrStore, QueryType}, - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, + core::{Bdev, MayastorCliArgs}, subsys::Config, }; @@ -31,8 +25,8 @@ static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_retry_device"; static YAML_CONFIG_FILE: &str = "/tmp/error_count_retry_nexus.yaml"; -#[test] -fn nexus_error_count_retry_test() { +#[tokio::test] +async fn nexus_error_count_retry_test() { common::truncate_file(DISKNAME1, 64 * 1024); let mut config = Config::default(); @@ -42,28 +36,32 @@ fn nexus_error_count_retry_test() { config.err_store_opts.max_io_attempts = 2; config.write(YAML_CONFIG_FILE).unwrap(); - test_init!(YAML_CONFIG_FILE); + let ms = common::MayastorTest::new(MayastorCliArgs { + mayastor_config: Some(YAML_CONFIG_FILE.to_string()), + reactor_mask: "0x3".to_string(), + ..Default::default() + }); // baseline test with no errors injected - Reactor::block_on(async { + ms.spawn(async { create_error_bdev(ERROR_DEVICE, DISKNAME1); create_nexus().await; err_write_nexus(true).await; err_read_nexus(true).await; - }); + }) + .await; - common::reactor_run_millis(10); // give time for any errors to be added to the error store - - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::READ_FLAG | NexusErrStore::WRITE_FLAG, 0, Some(1_000_000_000), - ); + )) + .await; // 1 write error injected, 2 attempts allowed, 1 write error should be // logged and the IO should succeed - Reactor::block_on(async { + ms.spawn(async { inject_error( EE_ERROR_DEVICE, SPDK_BDEV_IO_TYPE_WRITE, @@ -71,20 +69,20 @@ fn nexus_error_count_retry_test() { 1, ); err_write_nexus(true).await; - }); + }) + .await; - common::reactor_run_millis(10); // give time for any errors to be added to the error store - - nexus_err_query_and_test( + ms.spawn(nexus_err_query_and_test( BDEV_EE_ERROR_DEVICE, NexusErrStore::WRITE_FLAG, 1, Some(1_000_000_000), - ); + )) + .await; // 2 errors injected, 2 attempts allowed, 1 read attempt, 2 read errors // should be logged and the IO should fail - Reactor::block_on(async { + ms.spawn(async { inject_error( EE_ERROR_DEVICE, SPDK_BDEV_IO_TYPE_READ, @@ -92,14 +90,14 @@ fn nexus_error_count_retry_test() { 2, ); err_read_nexus(false).await; - }); + }) + .await; // IO should now succeed - Reactor::block_on(async { + ms.spawn(async { err_read_nexus(true).await; - }); - - mayastor_env_stop(0); + }) + .await; common::delete_file(&[DISKNAME1.to_string()]); common::delete_file(&[YAML_CONFIG_FILE.to_string()]); @@ -113,7 +111,7 @@ async fn create_nexus() { .unwrap(); } -fn nexus_err_query_and_test( +async fn nexus_err_query_and_test( child_bdev: &str, io_type_flags: u32, expected_count: u32, diff --git a/mayastor/tests/error_fault_child.rs b/mayastor/tests/error_fault_child.rs index b73043663..9382ed3cf 100644 --- a/mayastor/tests/error_fault_child.rs +++ b/mayastor/tests/error_fault_child.rs @@ -1,5 +1,3 @@ -pub mod common; - pub use common::error_bdev::{ create_error_bdev, inject_error, @@ -7,19 +5,14 @@ pub use common::error_bdev::{ SPDK_BDEV_IO_TYPE_WRITE, VBDEV_IO_FAILURE, }; - use mayastor::{ - bdev::{nexus_create, nexus_lookup, ActionType, NexusStatus}, - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, + bdev::{ActionType, nexus_create, nexus_lookup, NexusStatus}, + core::{Bdev, MayastorCliArgs}, subsys::Config, }; +pub mod common; + static ERROR_COUNT_TEST_NEXUS: &str = "error_fault_child_test_nexus"; static DISKNAME1: &str = "/tmp/disk1.img"; @@ -28,13 +21,14 @@ static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; static DISKNAME2: &str = "/tmp/disk2.img"; static ERROR_DEVICE: &str = "error_device"; -static EE_ERROR_DEVICE: &str = "EE_error_device"; // The prefix is added by the vbdev_error module +static EE_ERROR_DEVICE: &str = "EE_error_device"; +// The prefix is added by the vbdev_error module static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_device"; static YAML_CONFIG_FILE: &str = "/tmp/error_fault_child_test_nexus.yaml"; -#[test] -fn nexus_fault_child_test() { +#[tokio::test] +async fn nexus_fault_child_test() { common::truncate_file(DISKNAME1, 64 * 1024); common::truncate_file(DISKNAME2, 64 * 1024); @@ -46,10 +40,13 @@ fn nexus_fault_child_test() { config.err_store_opts.max_errors = 4; config.write(YAML_CONFIG_FILE).unwrap(); + let ms = common::MayastorTest::new(MayastorCliArgs { + mayastor_config: Some(YAML_CONFIG_FILE.to_string()), + reactor_mask: "0x3".to_string(), + ..Default::default() + }); - test_init!(YAML_CONFIG_FILE); - - Reactor::block_on(async { + ms.spawn(async { create_error_bdev(ERROR_DEVICE, DISKNAME2); create_nexus().await; @@ -68,34 +65,36 @@ fn nexus_fault_child_test() { 10, ); - for _ in 0 .. 3 { + for _ in 0..3 { err_read_nexus_both(false).await; common::reactor_run_millis(1); } - for _ in 0 .. 2 { + for _ in 0..2 { // the second iteration causes the error count to exceed the max no // of retry errors (4) for the read and causes the child to be // removed err_read_nexus_both(false).await; common::reactor_run_millis(1); } - }); + }) + .await; // error child should be removed from the IO path here - check_nexus_state_is(NexusStatus::Degraded); + ms.spawn(async { check_nexus_state_is(NexusStatus::Degraded) }) + .await; - Reactor::block_on(async { + ms.spawn(async { err_read_nexus_both(true).await; // should succeed because both IOs go to the remaining child err_write_nexus(true).await; // should succeed because the IO goes to - // the remaining child - }); + // the remaining child + }) + .await; - Reactor::block_on(async { + ms.spawn(async { delete_nexus().await; - }); - - mayastor_env_stop(0); + }) + .await; common::delete_file(&[DISKNAME1.to_string()]); common::delete_file(&[DISKNAME2.to_string()]); diff --git a/mayastor/tests/fault_child.rs b/mayastor/tests/fault_child.rs index 3e127590e..9d793cd2c 100644 --- a/mayastor/tests/fault_child.rs +++ b/mayastor/tests/fault_child.rs @@ -1,6 +1,6 @@ use mayastor::{ bdev::{nexus_create, nexus_lookup, Reason}, - core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, + core::MayastorCliArgs, }; pub mod common; @@ -10,26 +10,20 @@ static NEXUS_SIZE: u64 = 10 * 1024 * 1024; static CHILD_1: &str = "malloc:///malloc0?blk_size=512&size_mb=10"; static CHILD_2: &str = "malloc:///malloc1?blk_size=512&size_mb=10"; -#[test] -fn fault_child() { - common::mayastor_test_init(); - let ms = MayastorEnvironment::new(MayastorCliArgs::default()); - ms.start(|| { - Reactor::block_on(async { - nexus_create(NEXUS_NAME, NEXUS_SIZE, None, &[CHILD_1.to_string()]) - .await - .unwrap(); - let nexus = nexus_lookup(NEXUS_NAME).unwrap(); - // child will stay in a degraded state because we are not rebuilding - nexus.add_child(CHILD_2, true).await.unwrap(); - - // it should not be possible to fault the only healthy child - assert!(nexus.fault_child(CHILD_1, Reason::Unknown).await.is_err()); - // it should be possible to fault an unhealthy child - assert!(nexus.fault_child(CHILD_2, Reason::Unknown).await.is_ok()); - - mayastor_env_stop(0); - }); +#[tokio::test] +async fn fault_child() { + let ms = common::MayastorTest::new(MayastorCliArgs::default()); + ms.spawn(async { + nexus_create(NEXUS_NAME, NEXUS_SIZE, None, &[CHILD_1.to_string()]) + .await + .unwrap(); + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + // child will stay in a degraded state because we are not rebuilding + nexus.add_child(CHILD_2, true).await.unwrap(); + // it should not be possible to fault the only healthy child + assert!(nexus.fault_child(CHILD_1, Reason::Unknown).await.is_err()); + // it should be possible to fault an unhealthy child + assert!(nexus.fault_child(CHILD_2, Reason::Unknown).await.is_ok()); }) - .unwrap(); + .await; } diff --git a/mayastor/tests/io.rs b/mayastor/tests/io.rs index 6719d39b4..f3362b50f 100644 --- a/mayastor/tests/io.rs +++ b/mayastor/tests/io.rs @@ -1,34 +1,24 @@ use std::process::Command; use common::bdev_io; -use mayastor::{ - core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, - nexus_uri::bdev_create, -}; +use mayastor::{core::MayastorCliArgs, nexus_uri::bdev_create}; static DISKNAME: &str = "/tmp/disk.img"; static BDEVNAME: &str = "aio:///tmp/disk.img?blk_size=512"; pub mod common; -#[test] -fn io_test() { - common::mayastor_test_init(); +#[tokio::test] +async fn io_test() { + let ms = common::MayastorTest::new(MayastorCliArgs::default()); + let output = Command::new("truncate") .args(&["-s", "64m", DISKNAME]) .output() .expect("failed exec truncate"); assert_eq!(output.status.success(), true); + ms.spawn(async { start().await }).await; - let rc = MayastorEnvironment::new(MayastorCliArgs::default()) - .start(|| { - Reactor::block_on(async { - start().await; - }); - }) - .unwrap(); - - assert_eq!(rc, 0); let output = Command::new("rm") .args(&["-rf", DISKNAME]) .output() @@ -43,5 +33,4 @@ async fn start() { bdev_create(BDEVNAME).await.expect("failed to create bdev"); bdev_io::write_some(BDEVNAME, 0, 0xff).await.unwrap(); bdev_io::read_some(BDEVNAME, 0, 0xff).await.unwrap(); - mayastor_env_stop(0); } diff --git a/mayastor/tests/iscsi_tgt.rs b/mayastor/tests/iscsi_tgt.rs index d4fb21ed3..231ef2e6b 100644 --- a/mayastor/tests/iscsi_tgt.rs +++ b/mayastor/tests/iscsi_tgt.rs @@ -1,11 +1,5 @@ use mayastor::{ - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, + core::{Bdev, MayastorCliArgs}, nexus_uri::bdev_create, target::{iscsi, Side}, }; @@ -13,48 +7,35 @@ use mayastor::{ pub mod common; static BDEV: &str = "malloc:///malloc0?size_mb=64"; -#[test] -fn iscsi_target() { - common::mayastor_test_init(); +#[tokio::test] +async fn iscsi_target() { let mut args = MayastorCliArgs::default(); args.reactor_mask = "0x3".into(); - MayastorEnvironment::new(args) - .start(|| { - // test we can create a nvmf subsystem - Reactor::block_on(async { - let b = bdev_create(BDEV).await.unwrap(); - let bdev = Bdev::lookup_by_name(&b).unwrap(); - iscsi::share(&b, &bdev, Side::Nexus).unwrap(); - }); + let ms = common::MayastorTest::new(args); + ms.spawn(async { + // test we can create a nvmf subsystem + let b = bdev_create(BDEV).await.unwrap(); + let bdev = Bdev::lookup_by_name(&b).unwrap(); + iscsi::share(&b, &bdev, Side::Nexus).unwrap(); - // test we can not create the same one again - Reactor::block_on(async { - let bdev = Bdev::lookup_by_name("malloc0").unwrap(); - let should_err = iscsi::share("malloc0", &bdev, Side::Nexus); - assert_eq!(should_err.is_err(), true); - }); + // test we can not create the same one again + let bdev = Bdev::lookup_by_name("malloc0").unwrap(); + let should_err = iscsi::share("malloc0", &bdev, Side::Nexus); + assert_eq!(should_err.is_err(), true); - // verify the bdev is claimed by our target - Reactor::block_on(async { - let bdev = Bdev::bdev_first().unwrap(); - assert_eq!(bdev.is_claimed(), true); - assert_eq!(bdev.claimed_by().unwrap(), "iSCSI Target"); - }); + // verify the bdev is claimed by our target + let bdev = Bdev::bdev_first().unwrap(); + assert_eq!(bdev.is_claimed(), true); + assert_eq!(bdev.claimed_by().unwrap(), "iSCSI Target"); - // unshare the iSCSI target - Reactor::block_on(async { - let bdev = Bdev::lookup_by_name("malloc0").unwrap(); - let should_err = iscsi::unshare(&bdev.name()).await; - assert_eq!(should_err.is_err(), false); - }); + // unshare the iSCSI target + let bdev = Bdev::lookup_by_name("malloc0").unwrap(); + let should_err = iscsi::unshare(&bdev.name()).await; + assert_eq!(should_err.is_err(), false); - // verify the bdev is not claimed by our target anymore - Reactor::block_on(async { - let bdev = Bdev::bdev_first().unwrap(); - assert_eq!(bdev.is_claimed(), false); - }); - - mayastor_env_stop(0); - }) - .unwrap(); + // verify the bdev is not claimed by our target anymore + let bdev = Bdev::bdev_first().unwrap(); + assert_eq!(bdev.is_claimed(), false); + }) + .await } diff --git a/mayastor/tests/lvs_pool.rs b/mayastor/tests/lvs_pool.rs index 8f65c3969..7d6a6d95c 100644 --- a/mayastor/tests/lvs_pool.rs +++ b/mayastor/tests/lvs_pool.rs @@ -17,361 +17,335 @@ use mayastor::{ use rpc::mayastor::CreatePoolRequest; pub mod common; - +use common::MayastorTest; static DISKNAME1: &str = "/tmp/disk1.img"; -#[test] -fn lvs_pool_test() { +#[tokio::test] +async fn lvs_pool_test() { common::delete_file(&[DISKNAME1.into()]); common::truncate_file(DISKNAME1, 64 * 1024); - common::mayastor_test_init(); let mut args = MayastorCliArgs::default(); args.reactor_mask = "0x3".into(); - - let result = catch_unwind(|| { - MayastorEnvironment::new(args) - .start(|| { - // should fail to import a pool that does not exist on disk - Reactor::block_on(async { - assert_eq!( - Lvs::import("tpool", "aio:///tmp/disk1.img") - .await - .is_err(), - true - ) - }); - - // should succeed to create a pool we can not import - Reactor::block_on(async { - Lvs::create_or_import(CreatePoolRequest { - name: "tpool".into(), - disks: vec!["aio:///tmp/disk1.img".into()], - }) - .await - .unwrap(); - }); - - // returns OK when the pool is already there and we create - // it again - Reactor::block_on(async { - assert_eq!( - Lvs::create_or_import(CreatePoolRequest { - name: "tpool".into(), - disks: vec!["aio:///tmp/disk1.img".into()], - }) - .await - .is_ok(), - true - ) - }); - - // should fail to create the pool again, notice that we use - // create directly here to ensure that if we - // have an idempotent snafu, we dont crash and - // burn - Reactor::block_on(async { - assert_eq!( - Lvs::create("tpool", "aio:///tmp/disk1.img") - .await - .is_err(), - true - ) - }); - - // should fail to import the pool that is already imported - // similar to above, we use the import directly - Reactor::block_on(async { - assert_eq!( - Lvs::import("tpool", "aio:///tmp/disk1.img") - .await - .is_err(), - true - ) - }); - - // should be able to find our new LVS - Reactor::block_on(async { - assert_eq!(Lvs::iter().count(), 1); - let pool = Lvs::lookup("tpool").unwrap(); - assert_eq!(pool.name(), "tpool"); - assert_eq!(pool.used(), 0); - dbg!(pool.uuid()); - assert_eq!(pool.base_bdev().name(), "/tmp/disk1.img"); - }); - - // export the pool keeping the bdev alive and then - // import the pool and validate the uuid - - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - let uuid = pool.uuid(); - pool.export().await.unwrap(); - - // import and export implicitly destroy the base_bdev, for - // testing import and create we - // sometimes create the base_bdev manually - bdev_create("aio:///tmp/disk1.img").await.unwrap(); - - assert_eq!( - Lvs::import("tpool", "aio:///tmp/disk1.img") - .await - .is_ok(), - true - ); - - let pool = Lvs::lookup("tpool").unwrap(); - assert_eq!(pool.uuid(), uuid); - }); - - // destroy the pool, a import should now fail, creating a new - // pool should not having a matching UUID of the - // old pool - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - let uuid = pool.uuid(); - pool.destroy().await.unwrap(); - - bdev_create("aio:///tmp/disk1.img").await.unwrap(); - assert_eq!( - Lvs::import("tpool", "aio:///tmp/disk1.img") - .await - .is_err(), - true - ); - - assert_eq!(Lvs::iter().count(), 0); - assert_eq!( - Lvs::create("tpool", "aio:///tmp/disk1.img") - .await - .is_ok(), - true - ); - - let pool = Lvs::lookup("tpool").unwrap(); - assert_ne!(uuid, pool.uuid()); - assert_eq!(Lvs::iter().count(), 1); - }); - - // create 10 lvol on this pool - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - for i in 0 .. 10 { - pool.create_lvol(&format!("vol-{}", i), 4 * 1024, true) - .await - .unwrap(); - } - - assert_eq!(pool.lvols().unwrap().count(), 10); - }); - - // create a second pool and ensure it filters correctly - Reactor::block_on(async { - let pool2 = Lvs::create_or_import(CreatePoolRequest { - name: "tpool2".to_string(), - disks: vec!["malloc:///malloc0?size_mb=64".to_string()], - }) - .await - .unwrap(); - - for i in 0 .. 5 { - pool2 - .create_lvol( - &format!("pool2-vol-{}", i), - 4 * 1024, - false, - ) - .await - .unwrap(); - } - - assert_eq!(pool2.lvols().unwrap().count(), 5); - - let pool = Lvs::lookup("tpool").unwrap(); - assert_eq!(pool.lvols().unwrap().count(), 10); - }); - - // export the first pool and import it again, all replica's - // should be present, destroy all of them by name to - // ensure they are all there - - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - pool.export().await.unwrap(); - let pool = Lvs::create_or_import(CreatePoolRequest { - name: "tpool".to_string(), - disks: vec!["aio:///tmp/disk1.img".to_string()], - }) - .await - .unwrap(); - - assert_eq!(pool.lvols().unwrap().count(), 10); - - let df = pool - .lvols() - .unwrap() - .map(|r| r.destroy()) - .collect::>(); - assert_eq!(df.len(), 10); - futures::future::join_all(df).await; - }); - - // share all the replica's on the pool tpool2 - Reactor::block_on(async { - let pool2 = Lvs::lookup("tpool2").unwrap(); - for l in pool2.lvols().unwrap() { - l.share_nvmf().await.unwrap(); - } - }); - - // destroy the pool and verify that all nvmf shares are removed - Reactor::block_on(async { - let p = Lvs::lookup("tpool2").unwrap(); - p.destroy().await.unwrap(); - assert_eq!( - NvmfSubsystem::first().unwrap().into_iter().count(), - 1 // only the discovery system remains - ) - }); - - // test setting the share property that is stored on disk - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - let lvol = pool - .create_lvol("vol-1", 1024 * 4, false) - .await - .unwrap(); - - lvol.set(PropValue::Shared(true)).await.unwrap(); - assert_eq!( - lvol.get(PropName::Shared).await.unwrap(), - PropValue::Shared(true) - ); - - lvol.set(PropValue::Shared(false)).await.unwrap(); - assert_eq!( - lvol.get(PropName::Shared).await.unwrap(), - PropValue::Shared(false) - ); - - // sharing should set the property on disk - - lvol.share_nvmf().await.unwrap(); - - assert_eq!( - lvol.get(PropName::Shared).await.unwrap(), - PropValue::Shared(true) - ); - - lvol.unshare().await.unwrap(); - - assert_eq!( - lvol.get(PropName::Shared).await.unwrap(), - PropValue::Shared(false) - ); - - lvol.destroy().await.unwrap(); - }); - - // create 10 shares, 1 unshared lvol and export the pool - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - - for i in 0 .. 10 { - pool.create_lvol(&format!("vol-{}", i), 4 * 1024, true) - .await - .unwrap(); - } - - for l in pool.lvols().unwrap() { - l.share_nvmf().await.unwrap(); - } - - pool.create_lvol("notshared", 4 * 1024, true) - .await - .unwrap(); - - pool.export().await.unwrap(); - }); - - // import the pool all shares should be there, but also validate - // the share that not shared to be -- not shared - Reactor::block_on(async { - bdev_create("aio:///tmp/disk1.img").await.unwrap(); - let pool = Lvs::import("tpool", "aio:///tmp/disk1.img") - .await - .unwrap(); - - for l in pool.lvols().unwrap() { - if l.name() == "notshared" { - assert_eq!(l.shared().unwrap(), Protocol::Off); - } else { - assert_eq!(l.shared().unwrap(), Protocol::Nvmf); - } - } - - assert_eq!( - NvmfSubsystem::first().unwrap().into_iter().count(), - 1 + 10 - ); - }); - - // lastly destroy the pool, import/create it again, no shares - // should be present - Reactor::block_on(async { - let pool = Lvs::lookup("tpool").unwrap(); - pool.destroy().await.unwrap(); - assert_eq!( - NvmfSubsystem::first().unwrap().into_iter().count(), - 1 - ); - - let pool = Lvs::create_or_import(CreatePoolRequest { - name: "tpool".into(), - disks: vec!["aio:///tmp/disk1.img".into()], - }) - .await - .unwrap(); - - assert_eq!( - NvmfSubsystem::first().unwrap().into_iter().count(), - 1 - ); - - assert_eq!(pool.lvols().unwrap().count(), 0); - pool.export().await.unwrap(); - }); - - // validate the expected state of mayastor - Reactor::block_on(async { - // no shares left except for the discovery controller - - assert_eq!( - NvmfSubsystem::first().unwrap().into_iter().count(), - 1 - ); - - // all pools destroyed - assert_eq!(Lvs::iter().count(), 0); - - // no bdevs left - - assert_eq!(Bdev::bdev_first().into_iter().count(), 0); - - // importing a pool with the wrong name should fail - Lvs::create_or_import(CreatePoolRequest { - name: "jpool".into(), - disks: vec!["aio:///tmp/disk1.img".into()], - }) - .await - .err() - .unwrap(); - }); - - mayastor_env_stop(0); + let ms = MayastorTest::new(args); + + // should fail to import a pool that does not exist on disk + ms.spawn(async { + assert_eq!( + Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err(), + true + ) + }) + .await; + + // should succeed to create a pool we can not import + ms.spawn(async { + Lvs::create_or_import(CreatePoolRequest { + name: "tpool".into(), + disks: vec!["aio:///tmp/disk1.img".into()], + }) + .await + .unwrap(); + }) + .await; + + // returns OK when the pool is already there and we create + // it again + ms.spawn(async { + assert_eq!( + Lvs::create_or_import(CreatePoolRequest { + name: "tpool".into(), + disks: vec!["aio:///tmp/disk1.img".into()], }) - .unwrap(); - }); + .await + .is_ok(), + true + ) + }) + .await; + + // should fail to create the pool again, notice that we use + // create directly here to ensure that if we + // have an idempotent snafu, we dont crash and + // burn + ms.spawn(async { + assert_eq!( + Lvs::create("tpool", "aio:///tmp/disk1.img").await.is_err(), + true + ) + }) + .await; + + // should fail to import the pool that is already imported + // similar to above, we use the import directly + ms.spawn(async { + assert_eq!( + Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err(), + true + ) + }) + .await; + + // should be able to find our new LVS + ms.spawn(async { + assert_eq!(Lvs::iter().count(), 1); + let pool = Lvs::lookup("tpool").unwrap(); + assert_eq!(pool.name(), "tpool"); + assert_eq!(pool.used(), 0); + dbg!(pool.uuid()); + assert_eq!(pool.base_bdev().name(), "/tmp/disk1.img"); + }) + .await; + + // export the pool keeping the bdev alive and then + // import the pool and validate the uuid + + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + let uuid = pool.uuid(); + pool.export().await.unwrap(); + + // import and export implicitly destroy the base_bdev, for + // testing import and create we + // sometimes create the base_bdev manually + bdev_create("aio:///tmp/disk1.img").await.unwrap(); + + assert_eq!( + Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_ok(), + true + ); + + let pool = Lvs::lookup("tpool").unwrap(); + assert_eq!(pool.uuid(), uuid); + }) + .await; + + // destroy the pool, a import should now fail, creating a new + // pool should not having a matching UUID of the + // old pool + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + let uuid = pool.uuid(); + pool.destroy().await.unwrap(); + + bdev_create("aio:///tmp/disk1.img").await.unwrap(); + assert_eq!( + Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err(), + true + ); + + assert_eq!(Lvs::iter().count(), 0); + assert_eq!( + Lvs::create("tpool", "aio:///tmp/disk1.img").await.is_ok(), + true + ); + + let pool = Lvs::lookup("tpool").unwrap(); + assert_ne!(uuid, pool.uuid()); + assert_eq!(Lvs::iter().count(), 1); + }) + .await; + + // create 10 lvol on this pool + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + for i in 0 .. 10 { + pool.create_lvol(&format!("vol-{}", i), 4 * 1024, true) + .await + .unwrap(); + } + + assert_eq!(pool.lvols().unwrap().count(), 10); + }) + .await; + + // create a second pool and ensure it filters correctly + ms.spawn(async { + let pool2 = Lvs::create_or_import(CreatePoolRequest { + name: "tpool2".to_string(), + disks: vec!["malloc:///malloc0?size_mb=64".to_string()], + }) + .await + .unwrap(); + + for i in 0 .. 5 { + pool2 + .create_lvol(&format!("pool2-vol-{}", i), 4 * 1024, false) + .await + .unwrap(); + } + + assert_eq!(pool2.lvols().unwrap().count(), 5); + + let pool = Lvs::lookup("tpool").unwrap(); + assert_eq!(pool.lvols().unwrap().count(), 10); + }) + .await; + + // export the first pool and import it again, all replica's + // should be present, destroy all of them by name to + // ensure they are all there + + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + pool.export().await.unwrap(); + let pool = Lvs::create_or_import(CreatePoolRequest { + name: "tpool".to_string(), + disks: vec!["aio:///tmp/disk1.img".to_string()], + }) + .await + .unwrap(); + + assert_eq!(pool.lvols().unwrap().count(), 10); + + let df = pool + .lvols() + .unwrap() + .map(|r| r.destroy()) + .collect::>(); + assert_eq!(df.len(), 10); + futures::future::join_all(df).await; + }) + .await; + + // share all the replica's on the pool tpool2 + ms.spawn(async { + let pool2 = Lvs::lookup("tpool2").unwrap(); + for l in pool2.lvols().unwrap() { + l.share_nvmf().await.unwrap(); + } + }) + .await; + + // destroy the pool and verify that all nvmf shares are removed + ms.spawn(async { + let p = Lvs::lookup("tpool2").unwrap(); + p.destroy().await.unwrap(); + assert_eq!( + NvmfSubsystem::first().unwrap().into_iter().count(), + 1 // only the discovery system remains + ) + }) + .await; + + // test setting the share property that is stored on disk + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + let lvol = pool.create_lvol("vol-1", 1024 * 4, false).await.unwrap(); + + lvol.set(PropValue::Shared(true)).await.unwrap(); + assert_eq!( + lvol.get(PropName::Shared).await.unwrap(), + PropValue::Shared(true) + ); + + lvol.set(PropValue::Shared(false)).await.unwrap(); + assert_eq!( + lvol.get(PropName::Shared).await.unwrap(), + PropValue::Shared(false) + ); + + // sharing should set the property on disk + + lvol.share_nvmf().await.unwrap(); + + assert_eq!( + lvol.get(PropName::Shared).await.unwrap(), + PropValue::Shared(true) + ); + + lvol.unshare().await.unwrap(); + + assert_eq!( + lvol.get(PropName::Shared).await.unwrap(), + PropValue::Shared(false) + ); + + lvol.destroy().await.unwrap(); + }) + .await; + + // create 10 shares, 1 unshared lvol and export the pool + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + + for i in 0 .. 10 { + pool.create_lvol(&format!("vol-{}", i), 4 * 1024, true) + .await + .unwrap(); + } + + for l in pool.lvols().unwrap() { + l.share_nvmf().await.unwrap(); + } + + pool.create_lvol("notshared", 4 * 1024, true).await.unwrap(); + + pool.export().await.unwrap(); + }) + .await; + + // import the pool all shares should be there, but also validate + // the share that not shared to be -- not shared + ms.spawn(async { + bdev_create("aio:///tmp/disk1.img").await.unwrap(); + let pool = Lvs::import("tpool", "aio:///tmp/disk1.img").await.unwrap(); + + for l in pool.lvols().unwrap() { + if l.name() == "notshared" { + assert_eq!(l.shared().unwrap(), Protocol::Off); + } else { + assert_eq!(l.shared().unwrap(), Protocol::Nvmf); + } + } + + assert_eq!(NvmfSubsystem::first().unwrap().into_iter().count(), 1 + 10); + }) + .await; + + // lastly destroy the pool, import/create it again, no shares + // should be present + ms.spawn(async { + let pool = Lvs::lookup("tpool").unwrap(); + pool.destroy().await.unwrap(); + assert_eq!(NvmfSubsystem::first().unwrap().into_iter().count(), 1); + + let pool = Lvs::create_or_import(CreatePoolRequest { + name: "tpool".into(), + disks: vec!["aio:///tmp/disk1.img".into()], + }) + .await + .unwrap(); + + assert_eq!(NvmfSubsystem::first().unwrap().into_iter().count(), 1); + + assert_eq!(pool.lvols().unwrap().count(), 0); + pool.export().await.unwrap(); + }) + .await; + + // validate the expected state of mayastor + ms.spawn(async { + // no shares left except for the discovery controller + + assert_eq!(NvmfSubsystem::first().unwrap().into_iter().count(), 1); + + // all pools destroyed + assert_eq!(Lvs::iter().count(), 0); + + // no bdevs left + + assert_eq!(Bdev::bdev_first().into_iter().count(), 0); + + // importing a pool with the wrong name should fail + Lvs::create_or_import(CreatePoolRequest { + name: "jpool".into(), + disks: vec!["aio:///tmp/disk1.img".into()], + }) + .await + .err() + .unwrap(); + }) + .await; common::delete_file(&[DISKNAME1.into()]); - result.unwrap(); } diff --git a/mayastor/tests/lvs_pool_rpc.rs b/mayastor/tests/lvs_pool_rpc.rs index 71840e002..49b0c5b04 100644 --- a/mayastor/tests/lvs_pool_rpc.rs +++ b/mayastor/tests/lvs_pool_rpc.rs @@ -1,148 +1,145 @@ -use std::panic::catch_unwind; - -use mayastor::{ - core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, - grpc::pool_grpc, -}; use rpc::mayastor::{ CreatePoolRequest, CreateReplicaRequest, DestroyPoolRequest, DestroyReplicaRequest, + Null, ShareReplicaRequest, }; pub mod common; - +use common::Builder; static DISKNAME1: &str = "/tmp/disk1.img"; -#[test] -fn lvs_pool_rpc() { - // testing basic rpc methods - - common::delete_file(&[DISKNAME1.into()]); - common::truncate_file(DISKNAME1, 64 * 1024); - common::mayastor_test_init(); - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x3".into(); - let _r = catch_unwind(|| { - MayastorEnvironment::new(args) - .start(|| { - Reactor::block_on(async { - // create a pool - pool_grpc::create(CreatePoolRequest { - name: "tpool".to_string(), - disks: vec!["aio:///tmp/disk1.img".into()], - }) - .await - .unwrap(); - - // should succeed - pool_grpc::create(CreatePoolRequest { - name: "tpool".to_string(), - disks: vec!["aio:///tmp/disk1.img".into()], - }) - .await - .unwrap(); - - //list the pool - let list = pool_grpc::list().unwrap(); - assert_eq!(list.into_inner().pools.len(), 1); - - // create replica not shared - pool_grpc::create_replica(CreateReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47" - .to_string(), - pool: "tpool".to_string(), - size: 4 * 1024, - thin: false, - share: 0, - }) - .await - .unwrap(); - - // should succeed - pool_grpc::create_replica(CreateReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47" - .to_string(), - pool: "tpool".to_string(), - size: 4 * 1024, - thin: false, - share: 0, - }) - .await - .unwrap(); - - // share replica - pool_grpc::share_replica(ShareReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47" - .to_string(), - share: 1, - }) - .await - .unwrap(); - - // share again, should succeed - pool_grpc::share_replica(ShareReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47" - .to_string(), - share: 1, - }) - .await - .unwrap(); - - // assert we are shared - assert_eq!( - pool_grpc::list_replicas() - .unwrap() - .into_inner() - .replicas[0] - .uri - .contains("nvmf://"), - true - ); - - // unshare it - pool_grpc::share_replica(ShareReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47" - .to_string(), - share: 0, - }) - .await - .unwrap(); - - // assert we are not shared - assert_eq!( - pool_grpc::list_replicas() - .unwrap() - .into_inner() - .replicas[0] - .uri - .contains("bdev://"), - true - ); - - // destroy the replica - pool_grpc::destroy_replica(DestroyReplicaRequest { - uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47" - .to_string(), - }) - .await - .unwrap(); - - // destroy the pool - pool_grpc::destroy(DestroyPoolRequest { - name: "tpool".to_string(), - }) - .await - .unwrap(); - }) - .unwrap(); - mayastor_env_stop(0); - }) - .unwrap(); - }); +#[tokio::test] +async fn lvs_pool_rpc() { + let test = Builder::new() + .name("lvs-pool-grpc") + .with_clean(true) + .network("10.1.0.0/16") + .add_container("ms1") + .build() + .await + .unwrap(); + // testing basic rpc methods + let mut handles = test.grpc_handles().await.unwrap(); + let gdl = handles.get_mut(0).unwrap(); + + // create a pool + gdl.mayastor + .create_pool(CreatePoolRequest { + name: "tpool".to_string(), + disks: vec!["malloc:///disk0?size_mb=64".into()], + }) + .await + .unwrap(); + + gdl.mayastor + .create_pool(CreatePoolRequest { + name: "tpool".to_string(), + disks: vec!["malloc:///disk0?size_mb=64".into()], + }) + .await + .unwrap(); + //list the pool + let list = gdl.mayastor.list_pools(Null {}).await.unwrap(); + + assert_eq!(list.into_inner().pools.len(), 1); + + // create replica not shared + gdl.mayastor + .create_replica(CreateReplicaRequest { + uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), + pool: "tpool".to_string(), + size: 4 * 1024, + thin: false, + share: 0, + }) + .await + .unwrap(); + + // should succeed + gdl.mayastor + .create_replica(CreateReplicaRequest { + uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), + pool: "tpool".to_string(), + size: 4 * 1024, + thin: false, + share: 0, + }) + .await + .unwrap(); + + // share replica + gdl.mayastor + .share_replica(ShareReplicaRequest { + uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), + share: 1, + }) + .await + .unwrap(); + + // share again, should succeed + gdl.mayastor + .share_replica(ShareReplicaRequest { + uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), + share: 1, + }) + .await + .unwrap(); + + // assert we are shared + assert_eq!( + gdl.mayastor + .list_replicas(Null {}) + .await + .unwrap() + .into_inner() + .replicas[0] + .uri + .contains("nvmf://"), + true + ); + + // unshare it + gdl.mayastor + .share_replica(ShareReplicaRequest { + uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), + share: 0, + }) + .await + .unwrap(); + + // assert we are not shared + assert_eq!( + gdl.mayastor + .list_replicas(Null {}) + .await + .unwrap() + .into_inner() + .replicas[0] + .uri + .contains("bdev://"), + true + ); + + // destroy the replica + gdl.mayastor + .destroy_replica(DestroyReplicaRequest { + uuid: "cdc2a7db-3ac3-403a-af80-7fadc1581c47".to_string(), + }) + .await + .unwrap(); + + // destroy the pool + gdl.mayastor + .destroy_pool(DestroyPoolRequest { + name: "tpool".to_string(), + }) + .await + .unwrap(); + + test.logs("ms1").await.unwrap(); common::delete_file(&[DISKNAME1.into()]); - _r.unwrap(); } diff --git a/mayastor/tests/malloc_bdev.rs b/mayastor/tests/malloc_bdev.rs index 00c78d819..ec3ce7ff2 100644 --- a/mayastor/tests/malloc_bdev.rs +++ b/mayastor/tests/malloc_bdev.rs @@ -1,35 +1,29 @@ +use common::MayastorTest; use mayastor::{ - core::{ - mayastor_env_stop, - Bdev, - DmaBuf, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, + core::{Bdev, DmaBuf, MayastorCliArgs}, nexus_uri::{bdev_create, bdev_destroy}, }; pub mod common; -#[test] -fn malloc_bdev() { - common::mayastor_test_init(); - let ms = MayastorEnvironment::new(MayastorCliArgs::default()); - ms.start(|| { - Reactor::block_on(async { - bdev_create("malloc:///malloc0?blk_size=512&size_mb=100") - .await - .unwrap(); - - bdev_create(&format!( - "malloc:///malloc1?blk_size=512&num_blocks={}", - (100 << 20) / 512 - )) +#[tokio::test] +async fn malloc_bdev() { + let ms = MayastorTest::new(MayastorCliArgs::default()); + ms.spawn(async { + bdev_create("malloc:///malloc0?blk_size=512&size_mb=100") .await .unwrap(); - }); + bdev_create(&format!( + "malloc:///malloc1?blk_size=512&num_blocks={}", + (100 << 20) / 512 + )) + .await + .unwrap(); + }) + .await; + + ms.spawn(async { let m0 = Bdev::open_by_name("malloc0", true).unwrap(); let m1 = Bdev::open_by_name("malloc1", true).unwrap(); @@ -49,38 +43,35 @@ fn malloc_bdev() { let mut buf = DmaBuf::new(4096, 9).unwrap(); buf.fill(3); - Reactor::block_on(async move { - h0.write_at(0, &buf).await.unwrap(); - h1.write_at(0, &buf).await.unwrap(); + h0.write_at(0, &buf).await.unwrap(); + h1.write_at(0, &buf).await.unwrap(); - let mut b0 = h0.dma_malloc(4096).unwrap(); - let mut b1 = h1.dma_malloc(4096).unwrap(); + let mut b0 = h0.dma_malloc(4096).unwrap(); + let mut b1 = h1.dma_malloc(4096).unwrap(); - b0.fill(1); - b0.fill(2); + b0.fill(1); + b0.fill(2); - h0.read_at(0, &mut b0).await.unwrap(); - h1.read_at(0, &mut b1).await.unwrap(); + h0.read_at(0, &mut b0).await.unwrap(); + h1.read_at(0, &mut b1).await.unwrap(); - let s0 = b0.as_slice(); - let s1 = b1.as_slice(); + let s0 = b0.as_slice(); + let s1 = b1.as_slice(); - for i in 0 .. s0.len() { - assert_eq!(s0[i], 3); - assert_eq!(s0[i], s1[i]) - } - }); - - Reactor::block_on(async { - bdev_destroy("malloc:///malloc0?blk_size=512&size_mb=100") - .await - .unwrap(); - bdev_destroy("malloc:///malloc1?blk_size=512&size_mb=100") - .await - .unwrap(); - }); + for i in 0 .. s0.len() { + assert_eq!(s0[i], 3); + assert_eq!(s0[i], s1[i]) + } + }) + .await; - mayastor_env_stop(0); + ms.spawn(async { + bdev_destroy("malloc:///malloc0?blk_size=512&size_mb=100") + .await + .unwrap(); + bdev_destroy("malloc:///malloc1?blk_size=512&size_mb=100") + .await + .unwrap(); }) - .unwrap(); + .await; } diff --git a/mayastor/tests/nexus_share.rs b/mayastor/tests/nexus_share.rs index 81fa7318c..1443f0518 100644 --- a/mayastor/tests/nexus_share.rs +++ b/mayastor/tests/nexus_share.rs @@ -14,87 +14,84 @@ use mayastor::{ }; pub mod common; +use common::MayastorTest; -#[test] -fn nexus_test() { - common::mayastor_test_init(); +#[tokio::test] +async fn nexus_test() { let mut args = MayastorCliArgs::default(); args.reactor_mask = "0x2".into(); - catch_unwind(|| { - MayastorEnvironment::new(args) - .start(|| { - // create a nexus and share it via iSCSI - Reactor::block_on(async { - nexus_create( - "nexus0", - 48 * 1024 * 1024, - None, - &[ - "malloc:///malloc0?size_mb=64".into(), - "malloc:///malloc1?size_mb=64".into(), - ], - ) - .await - .unwrap(); + MayastorTest::new(args) + .spawn(async { + // create a nexus and share it via iSCSI + Reactor::block_on(async { + nexus_create( + "nexus0", + 48 * 1024 * 1024, + None, + &[ + "malloc:///malloc0?size_mb=64".into(), + "malloc:///malloc1?size_mb=64".into(), + ], + ) + .await + .unwrap(); - let nexus = nexus_lookup("nexus0").unwrap(); + let nexus = nexus_lookup("nexus0").unwrap(); - // this should be idempotent so validate that sharing the - // same thing over the same protocol - // works - let share = nexus.share_iscsi().await.unwrap(); - let share2 = nexus.share_iscsi().await.unwrap(); - assert_eq!(share, share2); - assert_eq!(nexus.shared(), Some(Protocol::Iscsi)); - }); + // this should be idempotent so validate that sharing the + // same thing over the same protocol + // works + let share = nexus.share_iscsi().await.unwrap(); + let share2 = nexus.share_iscsi().await.unwrap(); + assert_eq!(share, share2); + assert_eq!(nexus.shared(), Some(Protocol::Iscsi)); + }); - // sharing the nexus over nvmf should fail - Reactor::block_on(async { - let nexus = nexus_lookup("nexus0").unwrap(); - assert_eq!(nexus.share_nvmf().await.is_err(), true); - assert_eq!(nexus.shared(), Some(Protocol::Iscsi)); - }); + // sharing the nexus over nvmf should fail + Reactor::block_on(async { + let nexus = nexus_lookup("nexus0").unwrap(); + assert_eq!(nexus.share_nvmf().await.is_err(), true); + assert_eq!(nexus.shared(), Some(Protocol::Iscsi)); + }); - // unshare the nexus and then share over nvmf - Reactor::block_on(async { - let nexus = nexus_lookup("nexus0").unwrap(); - nexus.unshare().await.unwrap(); - let shared = nexus.shared(); - assert_eq!(shared, Some(Protocol::Off)); + // unshare the nexus and then share over nvmf + Reactor::block_on(async { + let nexus = nexus_lookup("nexus0").unwrap(); + nexus.unshare().await.unwrap(); + let shared = nexus.shared(); + assert_eq!(shared, Some(Protocol::Off)); - let shared = nexus.share_nvmf().await.unwrap(); - let shared2 = nexus.share_nvmf().await.unwrap(); + let shared = nexus.share_nvmf().await.unwrap(); + let shared2 = nexus.share_nvmf().await.unwrap(); - assert_eq!(shared, shared2); - assert_eq!(nexus.shared(), Some(Protocol::Nvmf)); - }); + assert_eq!(shared, shared2); + assert_eq!(nexus.shared(), Some(Protocol::Nvmf)); + }); - // sharing the bdev directly, over iSCSI or nvmf should result - // in an error - Reactor::block_on(async { - let bdev = Bdev::lookup_by_name("nexus0").unwrap(); - assert_eq!(bdev.share_iscsi().await.is_err(), true); - assert_eq!(bdev.share_nvmf().await.is_err(), true); - }); + // sharing the bdev directly, over iSCSI or nvmf should result + // in an error + Reactor::block_on(async { + let bdev = Bdev::lookup_by_name("nexus0").unwrap(); + assert_eq!(bdev.share_iscsi().await.is_err(), true); + assert_eq!(bdev.share_nvmf().await.is_err(), true); + }); - // unshare the nexus - Reactor::block_on(async { - let nexus = nexus_lookup("nexus0").unwrap(); - nexus.unshare().await.unwrap(); - }); + // unshare the nexus + Reactor::block_on(async { + let nexus = nexus_lookup("nexus0").unwrap(); + nexus.unshare().await.unwrap(); + }); - Reactor::block_on(async { - let nexus = nexus_lookup("nexus0").unwrap(); - assert_eq!(nexus.shared(), Some(Protocol::Off)); - let bdev = Bdev::lookup_by_name("nexus0").unwrap(); - assert_eq!(bdev.shared(), Some(Protocol::Off)); - nexus.destroy().await.unwrap(); - }); + Reactor::block_on(async { + let nexus = nexus_lookup("nexus0").unwrap(); + assert_eq!(nexus.shared(), Some(Protocol::Off)); + let bdev = Bdev::lookup_by_name("nexus0").unwrap(); + assert_eq!(bdev.shared(), Some(Protocol::Off)); + nexus.destroy().await.unwrap(); + }); - mayastor_env_stop(0); - }) - .unwrap(); - }) - .unwrap(); + mayastor_env_stop(0); + }) + .await; } diff --git a/mayastor/tests/reactor.rs b/mayastor/tests/reactor.rs index 818db8187..cfcdec00e 100644 --- a/mayastor/tests/reactor.rs +++ b/mayastor/tests/reactor.rs @@ -17,7 +17,6 @@ pub mod common; // This test requires the system to have at least 2 cpus #[test] fn reactor_start_stop() { - common::mayastor_test_init(); let mut args = MayastorCliArgs::default(); args.reactor_mask = "0x1".to_string(); let ms = MayastorEnvironment::new(args); diff --git a/mayastor/tests/reactor_block_on.rs b/mayastor/tests/reactor_block_on.rs index 1d56ba517..58f80278b 100644 --- a/mayastor/tests/reactor_block_on.rs +++ b/mayastor/tests/reactor_block_on.rs @@ -16,21 +16,17 @@ static COUNT: Lazy> = Lazy::new(|| AtomicCell::new(0)); #[test] fn reactor_block_on() { common::mayastor_test_init(); - let ms = MayastorEnvironment::new(MayastorCliArgs::default()); - ms.start(|| { - Reactor::block_on(async move { - assert_eq!(COUNT.load(), 0); - COUNT.store(1); - Reactors::master() - .send_future(async { assert_eq!(COUNT.load(), 2) }); - Reactor::block_on(async { - assert_eq!(COUNT.load(), 1); - COUNT.store(2); - }); + MayastorEnvironment::new(MayastorCliArgs::default()).init(); + Reactor::block_on(async move { + assert_eq!(COUNT.load(), 0); + COUNT.store(1); + Reactors::master().send_future(async { assert_eq!(COUNT.load(), 2) }); + Reactor::block_on(async { + assert_eq!(COUNT.load(), 1); + COUNT.store(2); }); - mayastor_env_stop(0); - }) - .unwrap(); + }); + mayastor_env_stop(0); assert_eq!(COUNT.load(), 2); } From dfccad1179ef747aa261c3c64b9a440b1d21bbba Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Mon, 19 Oct 2020 12:18:12 +0200 Subject: [PATCH 13/92] nix: bump pkg versions --- mayastor/src/core/env.rs | 4 ++-- mayastor/tests/common/compose.rs | 2 +- mayastor/tests/core.rs | 2 +- mayastor/tests/error_fault_child.rs | 14 ++++++------ mayastor/tests/lvs_pool.rs | 15 +++---------- mayastor/tests/nexus_share.rs | 3 --- nix/mayastor-overlay.nix | 1 - nix/pkgs/liburing/default.nix | 34 ----------------------------- nix/sources.json | 18 +++++++-------- services/common/src/lib.rs | 4 ++-- 10 files changed, 25 insertions(+), 72 deletions(-) delete mode 100644 nix/pkgs/liburing/default.nix diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index a68dade86..94b610cb6 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -90,7 +90,7 @@ pub struct MayastorCliArgs { #[structopt(short = "L")] /// Enable logging for sub components pub log_components: Vec, - #[structopt(short = "m", default_value = "0x3")] + #[structopt(short = "m", default_value = "0x1")] /// The reactor mask to be used for starting up the instance pub reactor_mask: String, #[structopt(short = "N")] @@ -135,7 +135,7 @@ impl Default for MayastorCliArgs { mbus_endpoint: None, node_name: None, env_context: None, - reactor_mask: "0x3".into(), + reactor_mask: "0x1".into(), mem_size: 0, rpc_address: "/var/tmp/mayastor.sock".to_string(), no_pci: true, diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 72bc4bf53..df3da4a8b 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -37,7 +37,7 @@ use ipnetwork::Ipv4Network; use tokio::sync::oneshot::channel; use tonic::transport::Channel; -use crate::{common, common::mayastor_test_init}; +use crate::common::mayastor_test_init; use ::rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, mayastor_client::MayastorClient, diff --git a/mayastor/tests/core.rs b/mayastor/tests/core.rs index 1cb8775c3..f50d398cc 100644 --- a/mayastor/tests/core.rs +++ b/mayastor/tests/core.rs @@ -6,7 +6,7 @@ use uuid::Uuid; use common::MayastorTest; use mayastor::{ bdev::{nexus_create, nexus_lookup, util::uring}, - core::{Bdev, BdevHandle, MayastorCliArgs, Reactor}, + core::{Bdev, BdevHandle, MayastorCliArgs}, nexus_uri::{bdev_create, bdev_destroy}, }; use rpc::mayastor::ShareProtocolNexus; diff --git a/mayastor/tests/error_fault_child.rs b/mayastor/tests/error_fault_child.rs index 9382ed3cf..73325f1a0 100644 --- a/mayastor/tests/error_fault_child.rs +++ b/mayastor/tests/error_fault_child.rs @@ -6,7 +6,7 @@ pub use common::error_bdev::{ VBDEV_IO_FAILURE, }; use mayastor::{ - bdev::{ActionType, nexus_create, nexus_lookup, NexusStatus}, + bdev::{nexus_create, nexus_lookup, ActionType, NexusStatus}, core::{Bdev, MayastorCliArgs}, subsys::Config, }; @@ -65,11 +65,11 @@ async fn nexus_fault_child_test() { 10, ); - for _ in 0..3 { + for _ in 0 .. 3 { err_read_nexus_both(false).await; common::reactor_run_millis(1); } - for _ in 0..2 { + for _ in 0 .. 2 { // the second iteration causes the error count to exceed the max no // of retry errors (4) for the read and causes the child to be // removed @@ -77,7 +77,7 @@ async fn nexus_fault_child_test() { common::reactor_run_millis(1); } }) - .await; + .await; // error child should be removed from the IO path here @@ -87,14 +87,14 @@ async fn nexus_fault_child_test() { ms.spawn(async { err_read_nexus_both(true).await; // should succeed because both IOs go to the remaining child err_write_nexus(true).await; // should succeed because the IO goes to - // the remaining child + // the remaining child }) - .await; + .await; ms.spawn(async { delete_nexus().await; }) - .await; + .await; common::delete_file(&[DISKNAME1.to_string()]); common::delete_file(&[DISKNAME2.to_string()]); diff --git a/mayastor/tests/lvs_pool.rs b/mayastor/tests/lvs_pool.rs index 7d6a6d95c..e6b510d74 100644 --- a/mayastor/tests/lvs_pool.rs +++ b/mayastor/tests/lvs_pool.rs @@ -1,15 +1,6 @@ -use std::panic::catch_unwind; - +use common::MayastorTest; use mayastor::{ - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Protocol, - Reactor, - Share, - }, + core::{Bdev, MayastorCliArgs, Protocol, Share}, lvs::{Lvs, PropName, PropValue}, nexus_uri::bdev_create, subsys::NvmfSubsystem, @@ -17,7 +8,7 @@ use mayastor::{ use rpc::mayastor::CreatePoolRequest; pub mod common; -use common::MayastorTest; + static DISKNAME1: &str = "/tmp/disk1.img"; #[tokio::test] diff --git a/mayastor/tests/nexus_share.rs b/mayastor/tests/nexus_share.rs index 1443f0518..2a5cf07ed 100644 --- a/mayastor/tests/nexus_share.rs +++ b/mayastor/tests/nexus_share.rs @@ -1,12 +1,9 @@ -use std::panic::catch_unwind; - use mayastor::{ bdev::{nexus_create, nexus_lookup}, core::{ mayastor_env_stop, Bdev, MayastorCliArgs, - MayastorEnvironment, Protocol, Reactor, Share, diff --git a/nix/mayastor-overlay.nix b/nix/mayastor-overlay.nix index b5051491d..d49a99804 100644 --- a/nix/mayastor-overlay.nix +++ b/nix/mayastor-overlay.nix @@ -1,6 +1,5 @@ self: super: { libiscsi = super.callPackage ./pkgs/libiscsi { }; - liburing = super.callPackage ./pkgs/liburing { }; nvmet-cli = super.callPackage ./pkgs/nvmet-cli { }; libspdk = (super.callPackage ./pkgs/libspdk { }).release; libspdk-dev = (super.callPackage ./pkgs/libspdk { }).debug; diff --git a/nix/pkgs/liburing/default.nix b/nix/pkgs/liburing/default.nix deleted file mode 100644 index 593a03b04..000000000 --- a/nix/pkgs/liburing/default.nix +++ /dev/null @@ -1,34 +0,0 @@ -{ stdenv -, fetchgit -, fetchpatch -, lib -, sources -}: - -stdenv.mkDerivation rec { - pname = "liburing"; - version = lib.removePrefix "liburing-" sources.liburing.branch; - src = sources.liburing; - - separateDebugInfo = true; - enableParallelBuilding = true; - - outputs = [ "out" "lib" "dev" "man" ]; - - configurePhase = '' - ./configure \ - --prefix=$out \ - --includedir=$dev/include \ - --libdir=$lib/lib \ - --libdevdir=$lib/lib \ - --mandir=$man/share/man \ - ''; - - meta = with stdenv.lib; { - description = "Userspace library for the Linux io_uring API"; - homepage = https://git.kernel.dk/cgit/liburing/; - license = licenses.lgpl21; - platforms = platforms.linux; - maintainers = with maintainers; [ thoughtpolice ]; - }; -} diff --git a/nix/sources.json b/nix/sources.json index 70a2e26d7..e3fbb8e8c 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -29,10 +29,10 @@ "homepage": "https://github.com/nmattia/niv", "owner": "nmattia", "repo": "niv", - "rev": "f73bf8d584148677b01859677a63191c31911eae", - "sha256": "0jlmrx633jvqrqlyhlzpvdrnim128gc81q5psz2lpp2af8p8q9qs", + "rev": "9d35b9e4837ab88517210b1701127612c260eccf", + "sha256": "0q50xhnm8g2yfyakrh0nly4swyygxpi0a8cb9gp65wcakcgvzvdh", "type": "tarball", - "url": "https://github.com/nmattia/niv/archive/f73bf8d584148677b01859677a63191c31911eae.tar.gz", + "url": "https://github.com/nmattia/niv/archive/9d35b9e4837ab88517210b1701127612c260eccf.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "nixpkgs": { @@ -41,10 +41,10 @@ "homepage": "https://github.com/NixOS/nixpkgs", "owner": "NixOS", "repo": "nixpkgs", - "rev": "16e15a2821cfb6a2c1ae3953decb98fc735f0bed", - "sha256": "17dz5gw7p3s9g4qf8x98wl79w5kpiw20ljnym67kh404yzr5j07v", + "rev": "7ef527cff856ea7938dba20769a6d59ebc9575e6", + "sha256": "0zhvly0b99846x1y3jyva79amf0kyi9c6lwg8l3ghig669kxlwa7", "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/16e15a2821cfb6a2c1ae3953decb98fc735f0bed.tar.gz", + "url": "https://github.com/NixOS/nixpkgs/archive/7ef527cff856ea7938dba20769a6d59ebc9575e6.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "nixpkgs-mozilla": { @@ -53,10 +53,10 @@ "homepage": "https://github.com/mozilla/nixpkgs-mozilla", "owner": "mozilla", "repo": "nixpkgs-mozilla", - "rev": "efda5b357451dbb0431f983cca679ae3cd9b9829", - "sha256": "11wqrg86g3qva67vnk81ynvqyfj0zxk83cbrf0p9hsvxiwxs8469", + "rev": "57c8084c7ef41366993909c20491e359bbb90f54", + "sha256": "0lchhjys1jj8fdiisd2718sqd63ys7jrj6hq6iq9l1gxj3mz2w81", "type": "tarball", - "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/efda5b357451dbb0431f983cca679ae3cd9b9829.tar.gz", + "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/57c8084c7ef41366993909c20491e359bbb90f54.tar.gz", "url_template": "https://github.com///archive/.tar.gz" } } diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index 04c414130..d6cbac12f 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -171,8 +171,8 @@ impl Service { } } - async fn process_message<'a>( - arguments: Arguments<'a>, + async fn process_message( + arguments: Arguments<'_>, subscriptions: &[Box], ) -> Result<(), ServiceError> { let channel = arguments.request.channel(); From fa93bf806ab69613fe53b64c7707fef1ef402ffd Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Tue, 20 Oct 2020 00:57:11 +0100 Subject: [PATCH 14/92] CAS-420 create gRPC method to obtain statistics from getrusage(2) system call --- mayastor/src/bin/cli/cli.rs | 3 ++ mayastor/src/bin/cli/perf_cli.rs | 68 ++++++++++++++++++++++++++++++ mayastor/src/grpc/mayastor_grpc.rs | 15 ++++++- mayastor/src/host/mod.rs | 1 + mayastor/src/host/resource.rs | 42 ++++++++++++++++++ rpc/proto/mayastor.proto | 20 +++++++++ 6 files changed, 148 insertions(+), 1 deletion(-) create mode 100644 mayastor/src/bin/cli/perf_cli.rs create mode 100644 mayastor/src/host/resource.rs diff --git a/mayastor/src/bin/cli/cli.rs b/mayastor/src/bin/cli/cli.rs index 1ab573b20..a4846a946 100644 --- a/mayastor/src/bin/cli/cli.rs +++ b/mayastor/src/bin/cli/cli.rs @@ -19,6 +19,7 @@ mod device_cli; mod jsonrpc_cli; mod nexus_child_cli; mod nexus_cli; +mod perf_cli; mod pool_cli; mod rebuild_cli; mod replica_cli; @@ -82,6 +83,7 @@ async fn main() -> Result<(), Status> { .subcommand(replica_cli::subcommands()) .subcommand(bdev_cli::subcommands()) .subcommand(device_cli::subcommands()) + .subcommand(perf_cli::subcommands()) .subcommand(rebuild_cli::subcommands()) .subcommand(snapshot_cli::subcommands()) .subcommand(jsonrpc_cli::subcommands()) @@ -93,6 +95,7 @@ async fn main() -> Result<(), Status> { ("bdev", Some(args)) => bdev_cli::handler(ctx, args).await?, ("device", Some(args)) => device_cli::handler(ctx, args).await?, ("nexus", Some(args)) => nexus_cli::handler(ctx, args).await?, + ("perf", Some(args)) => perf_cli::handler(ctx, args).await?, ("pool", Some(args)) => pool_cli::handler(ctx, args).await?, ("replica", Some(args)) => replica_cli::handler(ctx, args).await?, ("rebuild", Some(args)) => rebuild_cli::handler(ctx, args).await?, diff --git a/mayastor/src/bin/cli/perf_cli.rs b/mayastor/src/bin/cli/perf_cli.rs new file mode 100644 index 000000000..a408de059 --- /dev/null +++ b/mayastor/src/bin/cli/perf_cli.rs @@ -0,0 +1,68 @@ +//! +//! Methods related to the gathering of performance statistics. +//! +//! At present we only have get_resource_usage() which is +//! essentially the result of a getrusage(2) system call. + +use super::context::Context; +use ::rpc::mayastor as rpc; +use clap::{App, AppSettings, ArgMatches, SubCommand}; +use tonic::Status; + +pub fn subcommands<'a, 'b>() -> App<'a, 'b> { + let resource = + SubCommand::with_name("resource").about("Resource usage statistics"); + + SubCommand::with_name("perf") + .settings(&[ + AppSettings::SubcommandRequiredElseHelp, + AppSettings::ColoredHelp, + AppSettings::ColorAlways, + ]) + .about("Performance statistics") + .subcommand(resource) +} + +pub async fn handler( + ctx: Context, + matches: &ArgMatches<'_>, +) -> Result<(), Status> { + match matches.subcommand() { + ("resource", Some(args)) => get_resource_usage(ctx, args).await, + (cmd, _) => { + Err(Status::not_found(format!("command {} does not exist", cmd))) + } + } +} + +async fn get_resource_usage( + mut ctx: Context, + _matches: &ArgMatches<'_>, +) -> Result<(), Status> { + ctx.v2("Requesting resource usage statistics"); + + let mut table: Vec> = Vec::new(); + + let reply = ctx.client.get_resource_usage(rpc::Null {}).await?; + + if let Some(usage) = &reply.get_ref().usage { + table.push(vec![ + usage.soft_faults.to_string(), + usage.hard_faults.to_string(), + usage.vol_csw.to_string(), + usage.invol_csw.to_string(), + ]); + } + + ctx.print_list( + vec![ + ">SOFT_FAULTS", + ">HARD_FAULTS", + ">VOLUNTARY_CSW", + ">INVOLUNTARY_CSW", + ], + table, + ); + + Ok(()) +} diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 3e96940eb..aa038ab86 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -30,7 +30,7 @@ use crate::{ sync_config, GrpcResult, }, - host::blk_device, + host::{blk_device, resource}, }; #[derive(Debug)] @@ -428,4 +428,17 @@ impl mayastor_server::Mayastor for MayastorSvc { trace!("{:?}", reply); Ok(Response::new(reply)) } + + #[instrument(level = "debug", err)] + async fn get_resource_usage( + &self, + _request: Request, + ) -> GrpcResult { + let usage = resource::get_resource_usage().await?; + let reply = GetResourceUsageReply { + usage: Some(usage), + }; + trace!("{:?}", reply); + Ok(Response::new(reply)) + } } diff --git a/mayastor/src/host/mod.rs b/mayastor/src/host/mod.rs index 13d238869..4394fe982 100644 --- a/mayastor/src/host/mod.rs +++ b/mayastor/src/host/mod.rs @@ -1 +1,2 @@ pub mod blk_device; +pub mod resource; diff --git a/mayastor/src/host/resource.rs b/mayastor/src/host/resource.rs new file mode 100644 index 000000000..b36f09c5f --- /dev/null +++ b/mayastor/src/host/resource.rs @@ -0,0 +1,42 @@ +//! +//! This module implements the get_resource_usage() gRPC method, +//! which retrieves information via the getrusage(2) system call. + +use ::rpc::mayastor::ResourceUsage; +use std::{io::Error, mem::MaybeUninit, os::raw::c_int}; + +fn getrusage(who: c_int) -> Result { + let mut data: MaybeUninit = MaybeUninit::uninit(); + + if unsafe { libc::getrusage(who, data.as_mut_ptr()) } < 0 { + return Err(Error::last_os_error()); + } + + Ok(unsafe { data.assume_init() }) +} + +struct Usage<'a>(&'a libc::rusage); + +impl From> for ResourceUsage { + fn from(usage: Usage) -> ResourceUsage { + let rusage = usage.0; + ResourceUsage { + soft_faults: rusage.ru_minflt, + hard_faults: rusage.ru_majflt, + swaps: rusage.ru_nswap, + in_block_ops: rusage.ru_inblock, + out_block_ops: rusage.ru_oublock, + ipc_msg_send: rusage.ru_msgsnd, + ipc_msg_rcv: rusage.ru_msgrcv, + signals: rusage.ru_nsignals, + vol_csw: rusage.ru_nvcsw, + invol_csw: rusage.ru_nivcsw, + } + } +} + +/// Obtain resource usage statistics for the current process. +pub async fn get_resource_usage() -> Result { + let rusage = getrusage(libc::RUSAGE_SELF)?; + Ok(Usage(&rusage).into()) +} diff --git a/rpc/proto/mayastor.proto b/rpc/proto/mayastor.proto index 0dced23be..e3a3fdcbf 100644 --- a/rpc/proto/mayastor.proto +++ b/rpc/proto/mayastor.proto @@ -67,6 +67,9 @@ service Mayastor { // Enumerate block devices on current host rpc ListBlockDevices (ListBlockDevicesRequest) returns (ListBlockDevicesReply) {} + + // Obtain resource usage statistics for the current process + rpc GetResourceUsage (Null) returns (GetResourceUsageReply) {} } // Means no arguments or no return value. @@ -362,6 +365,23 @@ message ListBlockDevicesReply { repeated BlockDevice devices = 1; } +message ResourceUsage { + int64 soft_faults = 1; // page reclaims (soft page faults) + int64 hard_faults = 2; // hard page faults + int64 swaps = 3; // swaps + int64 in_block_ops = 4; // input block operations + int64 out_block_ops = 5; // output block operations + int64 ipc_msg_send = 6; // IPC messages sent + int64 ipc_msg_rcv = 7; // IPC messages received + int64 signals = 8; // signals received + int64 vol_csw = 9; // voluntary context switches + int64 invol_csw = 10; // involuntary context switches +} + +message GetResourceUsageReply { + ResourceUsage usage = 1; +} + // Anything what follows here are private interfaces used for interacting with // mayastor outside the scope of CSI. From ec74ce231fc79b37c6aa3d6152ac35b186cddc7e Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 22 Oct 2020 11:08:37 +0100 Subject: [PATCH 15/92] CAS-488: message bus timeout and retry Message bus requests can go unanswered. In fact, if no receiver was listening in when the request was sent, it will not get it altogether. This a default timeout of 5s with a default max number of 5 retries. To avoid complicating things for the moment, each retry it spaced out by 1s * nr of retries and capped at 10s - further down the line we might want to customize this a bit. By using request_ext one can specify timeout and max_retries explicitly. --- mbus-api/examples/client/main.rs | 8 ++++- mbus-api/src/lib.rs | 58 +++++++++++++++++++++++++++++++- mbus-api/src/mbus_nats.rs | 56 +++++++++++++++++++++++++++--- mbus-api/src/send.rs | 35 ++++++++++++++++--- 4 files changed, 146 insertions(+), 11 deletions(-) diff --git a/mbus-api/examples/client/main.rs b/mbus-api/examples/client/main.rs index c17ce0082..f3f74c6fc 100644 --- a/mbus-api/examples/client/main.rs +++ b/mbus-api/examples/client/main.rs @@ -1,6 +1,7 @@ use log::info; use mbus_api::{Message, *}; use serde::{Deserialize, Serialize}; +use std::time::Duration; use structopt::StructOpt; use tokio::stream::StreamExt; @@ -72,7 +73,12 @@ async fn main() { start_server_side().await; } - let reply = DummyRequest {}.request().await.unwrap(); + let options = TimeoutOptions::new() + .with_timeout(Duration::from_secs(1)) + .with_max_retries(Some(3)); + + // request() will use the bus default timeout and retries + let reply = DummyRequest {}.request_ext(options).await.unwrap(); info!("Received reply: {:?}", reply); // We can also use the following api to specify a different channel and bus diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs index 2c748d2c0..bc4409002 100644 --- a/mbus-api/src/lib.rs +++ b/mbus-api/src/lib.rs @@ -17,7 +17,7 @@ pub use send::*; use serde::{Deserialize, Serialize}; use smol::io; use snafu::Snafu; -use std::{fmt::Debug, marker::PhantomData, str::FromStr}; +use std::{fmt::Debug, marker::PhantomData, str::FromStr, time::Duration}; /// Available Message Bus channels #[derive(Clone, Debug)] @@ -169,6 +169,12 @@ pub trait Message { async fn publish(&self) -> io::Result<()>; /// publish a message with a request for a `Self::Reply` reply async fn request(&self) -> io::Result; + /// publish a message with a request for a `Self::Reply` reply + /// and non default timeout options + async fn request_ext( + &self, + options: TimeoutOptions, + ) -> io::Result; } /// The preamble is used to peek into messages so allowing for them to be routed @@ -214,6 +220,55 @@ pub type BusOptions = nats::Options; /// Save on typing pub type DynBus = Box; +/// Timeout for receiving a reply to a request message +/// Max number of retries until it gives up +#[derive(Clone)] +pub struct TimeoutOptions { + /// initial request message timeout + pub(crate) timeout: std::time::Duration, + /// max number of retries following the initial attempt's timeout + pub(crate) max_retries: Option, +} + +impl TimeoutOptions { + pub(crate) fn default_timeout() -> Duration { + Duration::from_secs(6) + } + pub(crate) fn default_max_retries() -> u32 { + 6 + } +} + +impl Default for TimeoutOptions { + fn default() -> Self { + Self { + timeout: Self::default_timeout(), + max_retries: Some(Self::default_max_retries()), + } + } +} + +impl TimeoutOptions { + /// New options with default values + pub fn new() -> Self { + Default::default() + } + + /// Timeout after which we'll either fail the request or start retrying + /// if max_retries is greater than 0 or None + pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Specify a max number of retries before giving up + /// None for unlimited retries + pub fn with_max_retries(mut self, max_retries: Option) -> Self { + self.max_retries = max_retries; + self + } +} + /// Messaging Bus trait with "generic" publish and request/reply semantics #[async_trait] #[clonable] @@ -232,6 +287,7 @@ pub trait Bus: Clone + Send + Sync { &self, channel: Channel, message: &[u8], + options: Option, ) -> io::Result; /// Flush queued messages to the server async fn flush(&self) -> io::Result<()>; diff --git a/mbus-api/src/mbus_nats.rs b/mbus-api/src/mbus_nats.rs index 833a7c088..f9fca7066 100644 --- a/mbus-api/src/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -11,13 +11,20 @@ pub fn message_bus_init_tokio(server: String) { NATS_MSG_BUS.get_or_init(|| { // Waits for the message bus to become ready tokio::runtime::Handle::current().block_on(async { - NatsMessageBus::new(&server, BusOptions::new()).await + NatsMessageBus::new( + &server, + BusOptions::new(), + TimeoutOptions::new(), + ) + .await }) }); } /// Initialise the Nats Message Bus pub async fn message_bus_init(server: String) { - let nc = NatsMessageBus::new(&server, BusOptions::new()).await; + let nc = + NatsMessageBus::new(&server, BusOptions::new(), TimeoutOptions::new()) + .await; NATS_MSG_BUS .set(nc) .ok() @@ -37,6 +44,7 @@ pub fn bus() -> DynBus { // Would we want to have both sync and async clients? #[derive(Clone)] struct NatsMessageBus { + timeout_options: TimeoutOptions, connection: Connection, } impl NatsMessageBus { @@ -74,8 +82,13 @@ impl NatsMessageBus { } } - async fn new(server: &str, _options: BusOptions) -> Self { + async fn new( + server: &str, + _bus_options: BusOptions, + timeout_options: TimeoutOptions, + ) -> Self { Self { + timeout_options, connection: Self::connect(server).await, } } @@ -99,8 +112,43 @@ impl Bus for NatsMessageBus { &self, channel: Channel, message: &[u8], + options: Option, ) -> io::Result { - self.connection.request(&channel.to_string(), message).await + let channel = &channel.to_string(); + + let options = options.unwrap_or_else(|| self.timeout_options.clone()); + let mut timeout = options.timeout; + let mut retries = 0; + + loop { + let request = self.connection.request(channel, message); + + let result = tokio::time::timeout(timeout, request).await; + if let Ok(r) = result { + return r; + } + if Some(retries) == options.max_retries { + log::error!("Timed out on {}", channel); + return Err(io::ErrorKind::TimedOut.into()); + } + + log::debug!( + "Timeout after {:?} on {} - {} retries left", + timeout, + channel, + if let Some(max) = options.max_retries { + (max - retries).to_string() + } else { + "unlimited".to_string() + } + ); + + retries += 1; + timeout = std::cmp::min( + Duration::from_secs(1) * retries, + Duration::from_secs(10), + ); + } } async fn flush(&self) -> io::Result<()> { diff --git a/mbus-api/src/send.rs b/mbus-api/src/send.rs index d82a56ef9..af01c8168 100644 --- a/mbus-api/src/send.rs +++ b/mbus-api/src/send.rs @@ -151,6 +151,12 @@ macro_rules! bus_impl_message { async fn request(&self) -> smol::io::Result<$R> { $T::Request(self, self.channel(), bus()).await } + async fn request_ext( + &self, + options: TimeoutOptions, + ) -> smol::io::Result<$R> { + $T::Request_Ext(self, self.channel(), bus(), options).await + } } }; } @@ -174,7 +180,21 @@ where bus: DynBus, ) -> io::Result { let msg = SendMessage::::new(payload, channel, bus); - msg.request().await + msg.request(None).await + } + + /// Sends the message and requests a reply + /// May fail if the bus fails to publish the message. + /// With additional timeout parameters + #[allow(non_snake_case)] + async fn Request_Ext( + payload: &'a S, + channel: Channel, + bus: DynBus, + options: TimeoutOptions, + ) -> io::Result { + let msg = SendMessage::::new(payload, channel, bus); + msg.request(Some(options)).await } } @@ -253,11 +273,16 @@ where } /// Sends the message and requests a reply. - /// todo: add timeout with retry logic? - pub(crate) async fn request(&self) -> io::Result { + pub(crate) async fn request( + &self, + options: Option, + ) -> io::Result { let payload = serde_json::to_vec(&self.payload)?; - let reply = - self.bus.request(self.channel.clone(), &payload).await?.data; + let reply = self + .bus + .request(self.channel.clone(), &payload, options) + .await? + .data; let reply: ReplyPayload = serde_json::from_slice(&reply)?; reply.0.map_err(|error| { io::Error::new(io::ErrorKind::Other, format!("{:?}", error)) From 70e5119c743cc607237a8970d56652588ef9c9b1 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 22 Oct 2020 12:59:18 +0100 Subject: [PATCH 16/92] Disable Nix tests from CI The NixOS KVM in the lab keeps loosing connectivity and we now have the docker compose tests which should be used instead. The existing tests are still kept, we're just not running them on CI. --- Jenkinsfile | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6e1e378f9..2f2c9dff6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -117,15 +117,6 @@ pipeline { } } } - stage('nix tests') { - agent { label 'nixos-mayastor-kvm' } - steps { - sh 'nix-build ./nix/test -A rebuild' - sh 'nix-build ./nix/test -A fio_nvme_basic' - sh 'nix-build ./nix/test -A nvmf_distributed' - sh 'nix-build ./nix/test -A nvmf_ports' - } - } stage('moac unit tests') { agent { label 'nixos-mayastor' } steps { From ecc8a047759a18d36c1a40c0864ef46b2b875800 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Thu, 22 Oct 2020 10:38:48 +0100 Subject: [PATCH 17/92] Add a function to check if a nexus child is local A nexus child is identified as being local if its bdev driver is neither nvme nor iscsi. Therefore, nexus children with bdev drivers of type "lvol", "aio" or "malloc" will be identified as local. --- mayastor/src/bdev/nexus/nexus_child.rs | 12 ++++ mayastor/tests/common/compose.rs | 2 +- mayastor/tests/nexus_child_location.rs | 94 ++++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 mayastor/tests/nexus_child_location.rs diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 2f6938b19..ad160554b 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -439,4 +439,16 @@ impl NexusChild { .map(|j| j.stats().progress as i32) .unwrap_or_else(|| -1) } + + /// Determines if a child is local to the nexus (i.e. on the same node) + pub fn is_local(&self) -> Option { + match &self.bdev { + Some(bdev) => { + // A local child is not exported over nvme or iscsi + let local = bdev.driver() != "nvme" && bdev.driver() != "iscsi"; + Some(local) + } + None => None, + } + } } diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index df3da4a8b..5e5b7fc50 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -492,7 +492,7 @@ impl ComposeTest { Ok(()) } - /// ge the logs from the container. It would be nice to make it implicit + /// get the logs from the container. It would be nice to make it implicit /// that is, when you make a rpc call, whatever logs where created due to /// that are returned pub async fn logs(&self, name: &str) -> Result<(), Error> { diff --git a/mayastor/tests/nexus_child_location.rs b/mayastor/tests/nexus_child_location.rs new file mode 100644 index 000000000..908b548f4 --- /dev/null +++ b/mayastor/tests/nexus_child_location.rs @@ -0,0 +1,94 @@ +use mayastor::{ + bdev::{nexus_create, nexus_lookup}, + core::MayastorCliArgs, +}; +use rpc::mayastor::{BdevShareRequest, BdevUri, Null}; + +pub mod common; +use common::{Builder, MayastorTest}; + +static NEXUS_NAME: &str = "child_location_nexus"; + +#[tokio::test] +async fn child_location() { + // create a new composeTest + let test = Builder::new() + .name("child_location_test") + .network("10.1.0.0/16") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); + + // Use GRPC handles to invoke methods on containers + let mut hdls = test.grpc_handles().await.unwrap(); + + // Create and share a bdev over nvmf + hdls[0].bdev.list(Null {}).await.unwrap(); + hdls[0] + .bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + hdls[0] + .bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) + .await + .unwrap(); + + // Create and share a bdev over iscsi + hdls[0] + .bdev + .create(BdevUri { + uri: "malloc:///disk1?size_mb=100".into(), + }) + .await + .unwrap(); + hdls[0] + .bdev + .share(BdevShareRequest { + name: "disk1".into(), + proto: "iscsi".into(), + }) + .await + .unwrap(); + + let mayastor = MayastorTest::new(MayastorCliArgs::default()); + mayastor + .spawn(async move { + // Create a nexus with a local child, and two remote children (one + // exported over nvmf and the other over iscsi). + nexus_create( + NEXUS_NAME, + 1024 * 1024 * 50, + None, + &[ + "malloc:///malloc0?blk_size=512&size_mb=100".into(), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "iscsi://{}:3260/iqn.2019-05.io.openebs:disk1/0", + hdls[0].endpoint.ip() + ), + ], + ) + .await + .unwrap(); + + let nexus = nexus_lookup(NEXUS_NAME).expect("Failed to find nexus"); + let children = &nexus.children; + assert_eq!(children.len(), 3); + assert_eq!(children[0].is_local().unwrap(), true); + assert_eq!(children[1].is_local().unwrap(), false); + assert_eq!(children[2].is_local().unwrap(), false); + }) + .await; +} From 183d4f7160b8388b037566a2732006314189c19c Mon Sep 17 00:00:00 2001 From: Devdutt Shenoi Date: Sat, 24 Oct 2020 23:02:38 +0530 Subject: [PATCH 18/92] doc: update faulty links --- doc/build.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/build.md b/doc/build.md index 8817c4a24..170454f48 100644 --- a/doc/build.md +++ b/doc/build.md @@ -5,9 +5,9 @@ use the nightly compiler. Nightly is required in all of the provided build possi Build options ================== -- [Building with Nix (recommended)](Building-the-sources-with-nixpkg) -- [Build inside docker](Build-inside-docker) -- [Building the hard way](Build-it-the-hard-way) +- [Building with Nix (recommended)](#Building-the-sources-with-nixpkg) +- [Build inside docker](#Build-inside-docker) +- [Building the hard way](#Build-it-the-hard-way) ## Building the sources with nixpkg From 131978f1dd81523143ce24761b15d2f1ab53c309 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Tue, 20 Oct 2020 07:44:30 +0100 Subject: [PATCH 19/92] CAS-491-Remove-stored-handle --- mayastor/src/bdev/nexus/nexus_child.rs | 68 +++-------------------- mayastor/src/bdev/nexus/nexus_label.rs | 59 +++++++++++++------- mayastor/src/bdev/nexus/nexus_metadata.rs | 52 ++++++++--------- mayastor/tests/nexus_label.rs | 11 ++-- 4 files changed, 77 insertions(+), 113 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index ad160554b..aa00f6890 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -14,7 +14,7 @@ use crate::{ }, NexusErrStore, }, - core::{Bdev, BdevHandle, CoreError, Descriptor, DmaBuf}, + core::{Bdev, BdevHandle, CoreError, Descriptor}, nexus_uri::{bdev_destroy, NexusBdevError}, rebuild::{ClientOperations, RebuildJob}, subsys::Config, @@ -44,20 +44,10 @@ pub enum ChildError { ChildInvalid {}, #[snafu(display("Opening child bdev without bdev pointer"))] OpenWithoutBdev {}, - #[snafu(display("Failed to create a BdevHandle for child"))] + #[snafu(display("Failed to create a BdevHandle for child: {}", source))] HandleCreate { source: CoreError }, } -#[derive(Debug, Snafu)] -pub enum ChildIoError { - #[snafu(display("Error writing to {}: {}", name, source))] - WriteError { source: CoreError, name: String }, - #[snafu(display("Error reading from {}: {}", name, source))] - ReadError { source: CoreError, name: String }, - #[snafu(display("Invalid descriptor for child bdev {}", name))] - InvalidDescriptor { name: String }, -} - #[derive(Debug, Serialize, PartialEq, Deserialize, Copy, Clone)] pub enum Reason { /// no particular reason for the child to be in this state @@ -136,9 +126,6 @@ pub struct NexusChild { /// current state of the child #[serde(skip_serializing)] state: ChildState, - /// descriptor obtained after opening a device - #[serde(skip_serializing)] - pub(crate) bdev_handle: Option, /// record of most-recent IO errors #[serde(skip_serializing)] pub(crate) err_store: Option, @@ -231,7 +218,6 @@ impl NexusChild { }, )?); - self.bdev_handle = Some(BdevHandle::try_from(desc.clone()).unwrap()); self.desc = Some(desc); let cfg = Config::get(); @@ -323,9 +309,7 @@ impl NexusChild { } } // just to be explicit - let hdl = self.bdev_handle.take(); let desc = self.desc.take(); - drop(hdl); drop(desc); } @@ -345,7 +329,6 @@ impl NexusChild { desc: None, ch: std::ptr::null_mut(), state: ChildState::Init, - bdev_handle: None, err_store: None, } } @@ -373,59 +356,24 @@ impl NexusChild { || self.state() == ChildState::Faulted(Reason::OutOfSync) } - /// return references to child's bdev and descriptor + /// return reference to child's bdev and a new BdevHandle /// both must be present - otherwise it is considered an error - pub fn get_dev(&self) -> Result<(&Bdev, &BdevHandle), ChildError> { + pub fn get_dev(&self) -> Result<(&Bdev, BdevHandle), ChildError> { if !self.is_accessible() { info!("{}: Child is inaccessible: {}", self.parent, self.name); return Err(ChildError::ChildInaccessible {}); } if let Some(bdev) = &self.bdev { - if let Some(desc) = &self.bdev_handle { - return Ok((bdev, desc)); + if let Ok(desc) = self.get_descriptor() { + let hndl = + BdevHandle::try_from(desc).context(HandleCreate {})?; + return Ok((bdev, hndl)); } } - Err(ChildError::ChildInvalid {}) } - /// write the contents of the buffer to this child - pub async fn write_at( - &self, - offset: u64, - buf: &DmaBuf, - ) -> Result { - match self.bdev_handle.as_ref() { - Some(desc) => { - Ok(desc.write_at(offset, buf).await.context(WriteError { - name: self.name.clone(), - })?) - } - None => Err(ChildIoError::InvalidDescriptor { - name: self.name.clone(), - }), - } - } - - /// read from this child device into the given buffer - pub async fn read_at( - &self, - offset: u64, - buf: &mut DmaBuf, - ) -> Result { - match self.bdev_handle.as_ref() { - Some(desc) => { - Ok(desc.read_at(offset, buf).await.context(ReadError { - name: self.name.clone(), - })?) - } - None => Err(ChildIoError::InvalidDescriptor { - name: self.name.clone(), - }), - } - } - /// Return the rebuild job which is rebuilding this child, if rebuilding fn get_rebuild_job(&self) -> Option<&mut RebuildJob> { let job = RebuildJob::lookup(&self.name).ok()?; diff --git a/mayastor/src/bdev/nexus/nexus_label.rs b/mayastor/src/bdev/nexus/nexus_label.rs index 2333d6e2e..ddb688385 100644 --- a/mayastor/src/bdev/nexus/nexus_label.rs +++ b/mayastor/src/bdev/nexus/nexus_label.rs @@ -52,12 +52,6 @@ //! The nbd0 zero device does not show the partitions when mounting //! it without the nexus in the data path, there would be two paritions //! ``` -use std::{ - fmt::{self, Display}, - io::{Cursor, Seek, SeekFrom}, - str::FromStr, -}; - use bincode::{deserialize_from, serialize, serialize_into, Error}; use crc::{crc32, Hasher32}; use futures::future::join_all; @@ -66,14 +60,19 @@ use serde::{ ser::{Serialize, SerializeTuple, Serializer}, }; use snafu::{ResultExt, Snafu}; +use std::{ + fmt::{self, Display}, + io::{Cursor, Seek, SeekFrom}, + str::FromStr, +}; use uuid::{self, parser, Uuid}; use crate::{ bdev::nexus::{ nexus_bdev::Nexus, - nexus_child::{ChildError, ChildIoError, NexusChild}, + nexus_child::{ChildError, NexusChild}, }, - core::{DmaBuf, DmaError}, + core::{CoreError, DmaBuf, DmaError}, }; #[derive(Debug, Snafu)] @@ -81,9 +80,9 @@ pub enum LabelError { #[snafu(display("{}", source))] NexusChildError { source: ChildError }, #[snafu(display("Error reading {}: {}", name, source))] - ReadError { name: String, source: ChildIoError }, + ReadError { name: String, source: CoreError }, #[snafu(display("Write error: {}", source))] - WriteError { source: ChildIoError }, + WriteError { name: String, source: CoreError }, #[snafu(display( "Failed to allocate buffer for reading {}: {}", name, @@ -131,6 +130,12 @@ pub enum LabelError { BackupLocation {}, #[snafu(display("GPT partition table location is incorrect"))] PartitionTableLocation {}, + #[snafu(display( + "Could not get handle for child bdev {}: {}", + name, + source + ))] + HandleCreate { name: String, source: ChildError }, } struct LabelData { @@ -345,7 +350,7 @@ impl Nexus { for result in join_all(futures).await { if let Err(error) = result { // return the first error - return Err(error).context(WriteError {}); + return Err(error); } } @@ -384,7 +389,7 @@ impl Nexus { for result in join_all(futures).await { if let Err(error) = result { // return the first error - return Err(error).context(WriteError {}); + return Err(error); } } @@ -914,15 +919,15 @@ impl NexusLabel { impl NexusChild { /// read and validate this child's label pub async fn probe_label(&self) -> Result { - let (bdev, desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; // // Protective MBR - let mut buf = desc.dma_malloc(block_size).context(ReadAlloc { + let mut buf = hndl.dma_malloc(block_size).context(ReadAlloc { name: String::from("header"), })?; - self.read_at(0, &mut buf).await.context(ReadError { + hndl.read_at(0, &mut buf).await.context(ReadError { name: String::from("MBR"), })?; let mbr = NexusLabel::read_mbr(&buf)?; @@ -936,7 +941,7 @@ impl NexusChild { // GPT header(s) // Get primary. - self.read_at(block_size, &mut buf) + hndl.read_at(block_size, &mut buf) .await .context(ReadError { name: String::from("primary GPT header"), @@ -947,7 +952,7 @@ impl NexusChild { active = &primary; // Get secondary. let offset = (bdev.num_blocks() - 1) * block_size; - self.read_at(offset, &mut buf).await.context(ReadError { + hndl.read_at(offset, &mut buf).await.context(ReadError { name: String::from("secondary GPT header"), })?; match NexusLabel::read_secondary_header(&buf) { @@ -986,7 +991,7 @@ impl NexusChild { ); // Get secondary and see if we are able to proceed. let offset = (bdev.num_blocks() - 1) * block_size; - self.read_at(offset, &mut buf).await.context(ReadError { + hndl.read_at(offset, &mut buf).await.context(ReadError { name: String::from("secondary GPT header"), })?; match NexusLabel::read_secondary_header(&buf) { @@ -1023,11 +1028,11 @@ impl NexusChild { block_size, ); let mut buf = - desc.dma_malloc(blocks * block_size).context(ReadAlloc { + hndl.dma_malloc(blocks * block_size).context(ReadAlloc { name: String::from("partition table"), })?; let offset = active.lba_table * block_size; - self.read_at(offset, &mut buf).await.context(ReadError { + hndl.read_at(offset, &mut buf).await.context(ReadError { name: String::from("partition table"), })?; let mut partitions = NexusLabel::read_partitions(&buf, active)?; @@ -1064,6 +1069,20 @@ impl NexusChild { label, } } + + /// write the contents of the buffer to this child + async fn write_at( + &self, + offset: u64, + buf: &DmaBuf, + ) -> Result { + let (_bdev, hndl) = self.get_dev().context(HandleCreate { + name: self.name.clone(), + })?; + Ok(hndl.write_at(offset, buf).await.context(WriteError { + name: self.name.clone(), + })?) + } } pub struct NexusChildLabel<'a> { diff --git a/mayastor/src/bdev/nexus/nexus_metadata.rs b/mayastor/src/bdev/nexus/nexus_metadata.rs index 75fc8baf7..172271b61 100644 --- a/mayastor/src/bdev/nexus/nexus_metadata.rs +++ b/mayastor/src/bdev/nexus/nexus_metadata.rs @@ -48,11 +48,11 @@ use snafu::{ResultExt, Snafu}; use crate::{ bdev::nexus::{ nexus_bdev::Nexus, - nexus_child::{ChildError, ChildIoError, NexusChild}, + nexus_child::{ChildError, NexusChild}, nexus_label::{Aligned, GptEntry, GptGuid, LabelError}, nexus_metadata_content::NexusConfig, }, - core::{DmaBuf, DmaError}, + core::{CoreError, DmaBuf, DmaError}, }; #[derive(Debug, Snafu)] @@ -62,9 +62,9 @@ pub enum MetaDataError { #[snafu(display("Error probing disk label: {}", source))] ProbeLabelError { source: LabelError }, #[snafu(display("Error reading {}: {}", name, source))] - ReadError { name: String, source: ChildIoError }, + ReadError { name: String, source: CoreError }, #[snafu(display("Error writing {}: {}", name, source))] - WriteError { name: String, source: ChildIoError }, + WriteError { name: String, source: CoreError }, #[snafu(display( "Failed to allocate buffer for reading {}: {}", name, @@ -319,7 +319,7 @@ impl NexusChild { &self, partition_lba: u64, ) -> Result { - let (bdev, desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; // @@ -329,10 +329,10 @@ impl NexusChild { block_size, ); let mut buf = - desc.dma_malloc(blocks * block_size).context(ReadAlloc { + hndl.dma_malloc(blocks * block_size).context(ReadAlloc { name: String::from("header"), })?; - self.read_at((partition_lba + 1) * block_size, &mut buf) + hndl.read_at((partition_lba + 1) * block_size, &mut buf) .await .context(ReadError { name: String::from("header"), @@ -347,10 +347,10 @@ impl NexusChild { block_size, ); let mut buf = - desc.dma_malloc(blocks * block_size).context(ReadAlloc { + hndl.dma_malloc(blocks * block_size).context(ReadAlloc { name: String::from("index"), })?; - self.read_at( + hndl.read_at( (header.self_lba + header.index_start) * block_size, &mut buf, ) @@ -384,15 +384,15 @@ impl NexusChild { let entry = &metadata.index[selected as usize]; - let (bdev, desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; let blocks = entry.data_end - entry.data_start + 1; let mut buf = - desc.dma_malloc(blocks * block_size).context(ReadAlloc { + hndl.dma_malloc(blocks * block_size).context(ReadAlloc { name: String::from("object"), })?; - self.read_at( + hndl.read_at( (metadata.header.self_lba + entry.data_start) * block_size, &mut buf, ) @@ -409,7 +409,7 @@ impl NexusChild { &self, metadata: &NexusMetaData, ) -> Result, MetaDataError> { - let (bdev, desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; let mut list: Vec = Vec::new(); @@ -417,10 +417,10 @@ impl NexusChild { for entry in &metadata.index { let blocks = entry.data_end - entry.data_start + 1; let mut buf = - desc.dma_malloc(blocks * block_size).context(ReadAlloc { + hndl.dma_malloc(blocks * block_size).context(ReadAlloc { name: String::from("object"), })?; - self.read_at( + hndl.read_at( (metadata.header.self_lba + entry.data_start) * block_size, &mut buf, ) @@ -439,7 +439,7 @@ impl NexusChild { &self, metadata: &NexusMetaData, ) -> Result<(), MetaDataError> { - let (bdev, _desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; let blocks = metadata.header.index_start @@ -466,7 +466,7 @@ impl NexusChild { serialize_into(&mut writer, entry).context(SerializeError {})?; } - self.write_at(metadata.header.self_lba * block_size, &buf) + hndl.write_at(metadata.header.self_lba * block_size, &buf) .await .context(WriteError { name: String::from("index"), @@ -478,7 +478,7 @@ impl NexusChild { /// Write a config object to disk. /// Also add a corresponding entry to the index and update the header. /// Will fail if there is insufficent space remaining on the disk. - pub async fn write_config_object( + async fn write_config_object( &self, metadata: &mut NexusMetaData, config: &NexusConfig, @@ -491,7 +491,7 @@ impl NexusChild { }); } - let (bdev, _desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; let timestamp = now @@ -529,7 +529,7 @@ impl NexusChild { serialize_into(&mut writer, config).context(SerializeError {})?; let checksum = crc32::checksum_ieee(buf.as_slice()); - self.write_at((metadata.header.self_lba + start) * block_size, &buf) + hndl.write_at((metadata.header.self_lba + start) * block_size, &buf) .await .context(WriteError { name: String::from("object"), @@ -568,7 +568,7 @@ impl NexusChild { let mut index: Vec = Vec::new(); - let (bdev, _desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; let timestamp = now @@ -599,7 +599,7 @@ impl NexusChild { serialize_into(&mut writer, config).context(SerializeError {})?; let checksum = crc32::checksum_ieee(buf.as_slice()); - self.write_at((header.self_lba + start) * block_size, &buf) + hndl.write_at((header.self_lba + start) * block_size, &buf) .await .context(WriteError { name: String::from("object"), @@ -644,7 +644,7 @@ impl NexusChild { &mut self, metadata: &mut NexusMetaData, ) -> Result<(), MetaDataError> { - let (bdev, _desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; let block_size = bdev.block_len() as u64; let alignment = bdev.alignment(); @@ -658,7 +658,7 @@ impl NexusChild { .context(ReadAlloc { name: String::from("object"), })?; - self.read_at( + hndl.read_at( (self_lba + entry.data_start) * block_size, &mut buf, ) @@ -666,7 +666,7 @@ impl NexusChild { .context(ReadError { name: String::from("object"), })?; - self.write_at((self_lba + start) * block_size, &buf) + hndl.write_at((self_lba + start) * block_size, &buf) .await .context(WriteError { name: String::from("object"), @@ -695,7 +695,7 @@ impl NexusChild { pub async fn create_metadata( &mut self, ) -> Result { - let (bdev, _desc) = self.get_dev().context(NexusChildError {})?; + let (bdev, _hndl) = self.get_dev().context(NexusChildError {})?; if let Some(partition) = self .probe_label() diff --git a/mayastor/tests/nexus_label.rs b/mayastor/tests/nexus_label.rs index 8e9014e53..bed66b53c 100644 --- a/mayastor/tests/nexus_label.rs +++ b/mayastor/tests/nexus_label.rs @@ -133,10 +133,10 @@ async fn label_child() { let mut buffer = hdl.dma_malloc(34 * 512).unwrap(); file.read_exact(&mut buffer.as_mut_slice()).unwrap(); // write out the MBR + primary GPT header + GPT partition table - child.write_at(0, &buffer).await.unwrap(); + hdl.write_at(0, &buffer).await.unwrap(); let mut read_buffer = hdl.dma_malloc(34 * 512).unwrap(); - child.read_at(0, &mut read_buffer).await.unwrap(); + hdl.read_at(0, &mut read_buffer).await.unwrap(); for (i, o) in buffer.as_slice().iter().zip(read_buffer.as_slice().iter()) { assert_eq!(i, o) } @@ -146,13 +146,10 @@ async fn label_child() { let mut buffer = hdl.dma_malloc(33 * 512).unwrap(); file.read_exact(&mut buffer.as_mut_slice()).unwrap(); // write out the secondary GPT partition table + GPT header - child.write_at(131_039 * 512, &buffer).await.unwrap(); + hdl.write_at(131_039 * 512, &buffer).await.unwrap(); let mut read_buffer = hdl.dma_malloc(33 * 512).unwrap(); - child - .read_at(131_039 * 512, &mut read_buffer) - .await - .unwrap(); + hdl.read_at(131_039 * 512, &mut read_buffer).await.unwrap(); for (i, o) in buffer.as_slice().iter().zip(read_buffer.as_slice().iter()) { assert_eq!(i, o) } From 591d1c1a5dc7d4383bf32317cfe659eb8069b3d8 Mon Sep 17 00:00:00 2001 From: Devdutt Shenoi Date: Mon, 26 Oct 2020 18:22:35 +0530 Subject: [PATCH 20/92] doc: correct build with nix-shell step --- doc/build.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build.md b/doc/build.md index 170454f48..c9b512c86 100644 --- a/doc/build.md +++ b/doc/build.md @@ -45,7 +45,7 @@ Installation of a [nix package manager](https://nixos.org/nix/download.html) on other distros: ```bash -curl https://nixos.org/nix/install | sh +curl -L https://nixos.org/nix/install | sh ``` We have provided a `shell.nix` file that can be used to build and compile From 92328c61b9c2e7d12f2eb091c4e55863a2979fe3 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Tue, 27 Oct 2020 07:19:40 +0000 Subject: [PATCH 21/92] Remove redundant source field in error message --- mayastor/src/bdev/nexus/nexus_child.rs | 2 +- mayastor/src/bdev/nexus/nexus_label.rs | 10 +++------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index aa00f6890..5ba17763b 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -44,7 +44,7 @@ pub enum ChildError { ChildInvalid {}, #[snafu(display("Opening child bdev without bdev pointer"))] OpenWithoutBdev {}, - #[snafu(display("Failed to create a BdevHandle for child: {}", source))] + #[snafu(display("Failed to create a BdevHandle for child"))] HandleCreate { source: CoreError }, } diff --git a/mayastor/src/bdev/nexus/nexus_label.rs b/mayastor/src/bdev/nexus/nexus_label.rs index ddb688385..106eb450b 100644 --- a/mayastor/src/bdev/nexus/nexus_label.rs +++ b/mayastor/src/bdev/nexus/nexus_label.rs @@ -79,9 +79,9 @@ use crate::{ pub enum LabelError { #[snafu(display("{}", source))] NexusChildError { source: ChildError }, - #[snafu(display("Error reading {}: {}", name, source))] + #[snafu(display("Error reading {}", name))] ReadError { name: String, source: CoreError }, - #[snafu(display("Write error: {}", source))] + #[snafu(display("Write error"))] WriteError { name: String, source: CoreError }, #[snafu(display( "Failed to allocate buffer for reading {}: {}", @@ -130,11 +130,7 @@ pub enum LabelError { BackupLocation {}, #[snafu(display("GPT partition table location is incorrect"))] PartitionTableLocation {}, - #[snafu(display( - "Could not get handle for child bdev {}: {}", - name, - source - ))] + #[snafu(display("Could not get handle for child bdev {}", name,))] HandleCreate { name: String, source: ChildError }, } From 463af18226e2b2a79478a8f275d5eaa97cce47ed Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 26 Oct 2020 12:45:36 +0000 Subject: [PATCH 22/92] CAS-493 Separate Nexus read and write channels. To make the logic for controlling write-only child bdevs more straightforward, the write-only count in the NexusChannel is removed and now we use a two separate vectors of BdevHandles, one for read operations and one for write (or other modifying) operations. --- mayastor/src/bdev/nexus/nexus_bdev.rs | 17 ++++---- mayastor/src/bdev/nexus/nexus_channel.rs | 50 ++++++++++++++--------- mayastor/src/bdev/nexus/nexus_fn_table.rs | 4 +- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 98d589f2b..1e810d3a7 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -708,7 +708,7 @@ impl Nexus { } let ch = NexusChannel::inner_from_channel(ch); - let (desc, ch) = ch.ch[ch.previous].io_tuple(); + let (desc, ch) = ch.read_ch[ch.previous].io_tuple(); let ret = Self::readv_impl(io, desc, ch); if ret != 0 { let bio = Bio::from(io); @@ -719,8 +719,7 @@ impl Nexus { /// read vectored io from the underlying children. pub(crate) fn readv(&self, io: &Bio, channels: &mut NexusChannelInner) { - // we use RR to read from the children also, set that we only need - // to read from one child before we complete the IO to the callee. + // we use RR to read from the children. let child = channels.child_select(); // if there is no buffer space for us allocated within the request @@ -736,7 +735,7 @@ impl Nexus { return; } - let (desc, ch) = channels.ch[child].io_tuple(); + let (desc, ch) = channels.read_ch[child].io_tuple(); let ret = Self::readv_impl(io.as_ptr(), desc, ch); @@ -778,7 +777,7 @@ impl Nexus { pub(crate) fn reset(&self, io: &Bio, channels: &NexusChannelInner) { // in case of resets, we want to reset all underlying children let results = channels - .ch + .write_ch .iter() .map(|c| unsafe { let (bdev, chan) = c.io_tuple(); @@ -806,7 +805,7 @@ impl Nexus { pub(crate) fn writev(&self, io: &Bio, channels: &NexusChannelInner) { // in case of writes, we want to write to all underlying children let results = channels - .ch + .write_ch .iter() .map(|c| unsafe { let (desc, chan) = c.io_tuple(); @@ -835,7 +834,7 @@ impl Nexus { pub(crate) fn unmap(&self, io: &Bio, channels: &NexusChannelInner) { let results = channels - .ch + .write_ch .iter() .map(|c| unsafe { let (desc, chan) = c.io_tuple(); @@ -861,7 +860,7 @@ impl Nexus { pub(crate) fn write_zeroes(&self, io: &Bio, channels: &NexusChannelInner) { let results = channels - .ch + .write_ch .iter() .map(|c| unsafe { let (b, c) = c.io_tuple(); @@ -893,7 +892,7 @@ impl Nexus { // for replicas, passthru only works with our vendor commands as the // underlying bdev is not nvmf let results = channels - .ch + .write_ch .iter() .map(|c| unsafe { debug!("nvme_admin on {}", c.get_bdev().driver()); diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index 7b3400edc..ba5c1a3a8 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -26,8 +26,8 @@ pub(crate) struct NexusChannel { #[repr(C)] #[derive(Debug)] pub(crate) struct NexusChannelInner { - pub(crate) ch: Vec, - pub(crate) write_only: usize, + pub(crate) write_ch: Vec, + pub(crate) read_ch: Vec, pub(crate) previous: usize, device: *mut c_void, } @@ -52,7 +52,8 @@ pub enum DREvent { impl NexusChannelInner { /// very simplistic routine to rotate between children for read operations pub(crate) fn child_select(&mut self) -> usize { - if self.previous != self.ch.len() - self.write_only - 1 { + debug_assert!(self.read_ch.len() > 0); + if self.previous < self.read_ch.len() - 1 { self.previous += 1; } else { self.previous = 0; @@ -74,37 +75,41 @@ impl NexusChannelInner { ); trace!( - "{}: Current number of IO channels {}", + "{}: Current number of IO channels write: {} read: {}", nexus.name, - self.ch.len(), + self.write_ch.len(), + self.read_ch.len(), ); // clear the vector of channels and reset other internal values, // clearing the values will drop any existing handles in the // channel - self.ch.clear(); + self.write_ch.clear(); + self.read_ch.clear(); self.previous = 0; - self.write_only = 0; - // iterate to over all our children which are in the open state + // iterate over all our children which are in the open state nexus .children .iter_mut() .filter(|c| c.state() == ChildState::Open) .for_each(|c| { - self.ch.push( + self.write_ch.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), - ) + ); + self.read_ch.push( + BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), + ); }); - if !self.ch.is_empty() { + // then add write-only children + if !self.read_ch.is_empty() { nexus .children .iter_mut() .filter(|c| c.rebuilding()) .map(|c| { - self.write_only += 1; - self.ch.push( + self.write_ch.push( BdevHandle::try_from(c.get_descriptor().unwrap()) .unwrap(), ) @@ -113,9 +118,10 @@ impl NexusChannelInner { } trace!( - "{}: New number of IO channels {} out of {} children", + "{}: New number of IO channels write:{} read:{} out of {} children", nexus.name, - self.ch.len(), + self.write_ch.len(), + self.read_ch.len(), nexus.children.len() ); @@ -134,9 +140,9 @@ impl NexusChannel { let ch = NexusChannel::from_raw(ctx); let mut channels = Box::new(NexusChannelInner { - ch: Vec::new(), + write_ch: Vec::new(), + read_ch: Vec::new(), previous: 0, - write_only: 0, device, }); @@ -145,9 +151,12 @@ impl NexusChannel { .iter_mut() .filter(|c| c.state() == ChildState::Open) .map(|c| { - channels.ch.push( + channels.write_ch.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), - ) + ); + channels.read_ch.push( + BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), + ); }) .for_each(drop); ch.inner = Box::into_raw(channels); @@ -159,7 +168,8 @@ impl NexusChannel { let nexus = unsafe { Nexus::from_raw(device) }; debug!("{} Destroying IO channels", nexus.bdev.name()); let inner = NexusChannel::from_raw(ctx).inner_mut(); - inner.ch.clear(); + inner.write_ch.clear(); + inner.read_ch.clear(); } /// function called when we receive a Dynamic Reconfigure event (DR) diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index 6c6affc93..14916185c 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -110,9 +110,11 @@ impl NexusFnTable { // set the fields that need to be (re)set per-attempt if nio.io_type() == io_type::READ { + // set that we only need to read from one child + // before we complete the IO to the callee. nio.reset(1); } else { - nio.reset(ch.ch.len()) + nio.reset(ch.write_ch.len()) } let nexus = nio.nexus_as_ref(); From 0edc2fd99485220dbdfb7c92f0f13136602db8bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Oct 2020 20:32:34 +0000 Subject: [PATCH 23/92] build(deps): bump systeminformation in /mayastor-test Bumps [systeminformation](https://github.com/sebhildebrandt/systeminformation) from 4.23.1 to 4.27.11. - [Release notes](https://github.com/sebhildebrandt/systeminformation/releases) - [Changelog](https://github.com/sebhildebrandt/systeminformation/blob/master/CHANGELOG.md) - [Commits](https://github.com/sebhildebrandt/systeminformation/commits) Signed-off-by: dependabot[bot] --- mayastor-test/package-lock.json | 6 +++--- mayastor-test/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mayastor-test/package-lock.json b/mayastor-test/package-lock.json index 104d4111e..009cc60ec 100644 --- a/mayastor-test/package-lock.json +++ b/mayastor-test/package-lock.json @@ -2763,9 +2763,9 @@ } }, "systeminformation": { - "version": "4.23.1", - "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.23.1.tgz", - "integrity": "sha512-gtqfvz5jUIMqWn0kkdkV4G8uiLmJckQ+z6aKy1uyE0OPU/6tStubahtZDiF0ajSRVJht+Vd4pX5DDwQLhAapww==" + "version": "4.27.11", + "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.27.11.tgz", + "integrity": "sha512-U7bigXbOnsB8k1vNHS0Y13RCsRz5/UohiUmND+3mMUL6vfzrpbe/h4ZqewowB+B+tJNnmGFDj08Z8xGfYo45dQ==" }, "table": { "version": "5.4.6", diff --git a/mayastor-test/package.json b/mayastor-test/package.json index 6447ffc8d..25192e8d5 100644 --- a/mayastor-test/package.json +++ b/mayastor-test/package.json @@ -24,7 +24,7 @@ "read": "^1.0.7", "semistandard": "^14.2.0", "sleep-promise": "^8.0.1", - "systeminformation": "^4.23.1", + "systeminformation": "^4.27.11", "wtfnode": "^0.8.1" }, "author": "Jan Kryl ", From d4d0824c676f3f42a0d02dbeafc5adae3e1ab843 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Wed, 28 Oct 2020 07:57:22 +0000 Subject: [PATCH 24/92] clippy fix and code review comments --- mayastor/src/bdev/nexus/nexus_bdev.rs | 14 ++++---- mayastor/src/bdev/nexus/nexus_channel.rs | 40 +++++++++++------------ mayastor/src/bdev/nexus/nexus_fn_table.rs | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 1e810d3a7..19f9f8ac5 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -708,7 +708,7 @@ impl Nexus { } let ch = NexusChannel::inner_from_channel(ch); - let (desc, ch) = ch.read_ch[ch.previous].io_tuple(); + let (desc, ch) = ch.readers[ch.previous].io_tuple(); let ret = Self::readv_impl(io, desc, ch); if ret != 0 { let bio = Bio::from(io); @@ -735,7 +735,7 @@ impl Nexus { return; } - let (desc, ch) = channels.read_ch[child].io_tuple(); + let (desc, ch) = channels.readers[child].io_tuple(); let ret = Self::readv_impl(io.as_ptr(), desc, ch); @@ -777,7 +777,7 @@ impl Nexus { pub(crate) fn reset(&self, io: &Bio, channels: &NexusChannelInner) { // in case of resets, we want to reset all underlying children let results = channels - .write_ch + .writers .iter() .map(|c| unsafe { let (bdev, chan) = c.io_tuple(); @@ -805,7 +805,7 @@ impl Nexus { pub(crate) fn writev(&self, io: &Bio, channels: &NexusChannelInner) { // in case of writes, we want to write to all underlying children let results = channels - .write_ch + .writers .iter() .map(|c| unsafe { let (desc, chan) = c.io_tuple(); @@ -834,7 +834,7 @@ impl Nexus { pub(crate) fn unmap(&self, io: &Bio, channels: &NexusChannelInner) { let results = channels - .write_ch + .writers .iter() .map(|c| unsafe { let (desc, chan) = c.io_tuple(); @@ -860,7 +860,7 @@ impl Nexus { pub(crate) fn write_zeroes(&self, io: &Bio, channels: &NexusChannelInner) { let results = channels - .write_ch + .writers .iter() .map(|c| unsafe { let (b, c) = c.io_tuple(); @@ -892,7 +892,7 @@ impl Nexus { // for replicas, passthru only works with our vendor commands as the // underlying bdev is not nvmf let results = channels - .write_ch + .writers .iter() .map(|c| unsafe { debug!("nvme_admin on {}", c.get_bdev().driver()); diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index ba5c1a3a8..55835f2f9 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -26,8 +26,8 @@ pub(crate) struct NexusChannel { #[repr(C)] #[derive(Debug)] pub(crate) struct NexusChannelInner { - pub(crate) write_ch: Vec, - pub(crate) read_ch: Vec, + pub(crate) writers: Vec, + pub(crate) readers: Vec, pub(crate) previous: usize, device: *mut c_void, } @@ -52,8 +52,8 @@ pub enum DREvent { impl NexusChannelInner { /// very simplistic routine to rotate between children for read operations pub(crate) fn child_select(&mut self) -> usize { - debug_assert!(self.read_ch.len() > 0); - if self.previous < self.read_ch.len() - 1 { + debug_assert!(!self.readers.is_empty()); + if self.previous < self.readers.len() - 1 { self.previous += 1; } else { self.previous = 0; @@ -77,15 +77,15 @@ impl NexusChannelInner { trace!( "{}: Current number of IO channels write: {} read: {}", nexus.name, - self.write_ch.len(), - self.read_ch.len(), + self.writers.len(), + self.readers.len(), ); // clear the vector of channels and reset other internal values, // clearing the values will drop any existing handles in the // channel - self.write_ch.clear(); - self.read_ch.clear(); + self.writers.clear(); + self.readers.clear(); self.previous = 0; // iterate over all our children which are in the open state @@ -94,22 +94,22 @@ impl NexusChannelInner { .iter_mut() .filter(|c| c.state() == ChildState::Open) .for_each(|c| { - self.write_ch.push( + self.writers.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), ); - self.read_ch.push( + self.readers.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), ); }); // then add write-only children - if !self.read_ch.is_empty() { + if !self.readers.is_empty() { nexus .children .iter_mut() .filter(|c| c.rebuilding()) .map(|c| { - self.write_ch.push( + self.writers.push( BdevHandle::try_from(c.get_descriptor().unwrap()) .unwrap(), ) @@ -120,8 +120,8 @@ impl NexusChannelInner { trace!( "{}: New number of IO channels write:{} read:{} out of {} children", nexus.name, - self.write_ch.len(), - self.read_ch.len(), + self.writers.len(), + self.readers.len(), nexus.children.len() ); @@ -140,8 +140,8 @@ impl NexusChannel { let ch = NexusChannel::from_raw(ctx); let mut channels = Box::new(NexusChannelInner { - write_ch: Vec::new(), - read_ch: Vec::new(), + writers: Vec::new(), + readers: Vec::new(), previous: 0, device, }); @@ -151,10 +151,10 @@ impl NexusChannel { .iter_mut() .filter(|c| c.state() == ChildState::Open) .map(|c| { - channels.write_ch.push( + channels.writers.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), ); - channels.read_ch.push( + channels.readers.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), ); }) @@ -168,8 +168,8 @@ impl NexusChannel { let nexus = unsafe { Nexus::from_raw(device) }; debug!("{} Destroying IO channels", nexus.bdev.name()); let inner = NexusChannel::from_raw(ctx).inner_mut(); - inner.write_ch.clear(); - inner.read_ch.clear(); + inner.writers.clear(); + inner.readers.clear(); } /// function called when we receive a Dynamic Reconfigure event (DR) diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index 14916185c..059bf3413 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -114,7 +114,7 @@ impl NexusFnTable { // before we complete the IO to the callee. nio.reset(1); } else { - nio.reset(ch.write_ch.len()) + nio.reset(ch.writers.len()) } let nexus = nio.nexus_as_ref(); From ad06574aad75ba8e7ca673eef36cbfa6c2e73a3c Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Wed, 28 Oct 2020 12:29:20 +0000 Subject: [PATCH 25/92] Add spdk null bdev driver Useful for benchmarking the I/O stack with minimal overhead --- mayastor/src/bdev/dev.rs | 2 + mayastor/src/bdev/dev/null.rs | 200 ++++++++++++++++++++++++++++++++++ spdk-sys/wrapper.h | 1 + 3 files changed, 203 insertions(+) create mode 100644 mayastor/src/bdev/dev/null.rs diff --git a/mayastor/src/bdev/dev.rs b/mayastor/src/bdev/dev.rs index 2e1380eeb..ae8314b82 100644 --- a/mayastor/src/bdev/dev.rs +++ b/mayastor/src/bdev/dev.rs @@ -34,6 +34,7 @@ mod aio; mod iscsi; mod loopback; mod malloc; +mod null; mod nvme; mod nvmf; mod uring; @@ -53,6 +54,7 @@ impl Uri { // really should not be used other than for testing "aio" => Ok(Box::new(aio::Aio::try_from(&url)?)), "malloc" => Ok(Box::new(malloc::Malloc::try_from(&url)?)), + "null" => Ok(Box::new(null::Null::try_from(&url)?)), // retain this for the time being for backwards compatibility "bdev" => Ok(Box::new(loopback::Loopback::try_from(&url)?)), diff --git a/mayastor/src/bdev/dev/null.rs b/mayastor/src/bdev/dev/null.rs new file mode 100644 index 000000000..22a76e9f9 --- /dev/null +++ b/mayastor/src/bdev/dev/null.rs @@ -0,0 +1,200 @@ +//! As the name implies, this is a dummy driver that discards all writes and +//! returns undefined data for reads. It's useful for benchmarking the I/O stack +//! with minimal overhead and should *NEVER* be used with *real* data. +use crate::{ + bdev::util::uri, + nexus_uri::{ + NexusBdevError, + {self}, + }, +}; +use async_trait::async_trait; +use std::{collections::HashMap, convert::TryFrom}; +use url::Url; +use uuid::Uuid; + +#[derive(Debug)] +pub struct Null { + /// the name of the bdev we created, this is equal to the URI path minus + /// the leading '/' + name: String, + /// alias which can be used to open the bdev + alias: String, + /// the number of blocks the device should have + num_blocks: u64, + /// the size of a single block if no blk_size is given we default to 512 + blk_size: u32, + /// uuid of the spdk bdev + uuid: Option, +} +use crate::{ + bdev::{CreateDestroy, GetName}, + core::Bdev, + ffihelper::{cb_arg, done_errno_cb, ErrnoResult, IntoCString}, +}; +use futures::channel::oneshot; +use nix::errno::Errno; +use snafu::ResultExt; + +impl TryFrom<&Url> for Null { + type Error = NexusBdevError; + + fn try_from(uri: &Url) -> Result { + let segments = uri::segments(uri); + if segments.is_empty() { + return Err(NexusBdevError::UriInvalid { + uri: uri.to_string(), + message: "no path segments".to_string(), + }); + } + + let mut parameters: HashMap = + uri.query_pairs().into_owned().collect(); + + let blk_size: u32 = if let Some(value) = parameters.remove("blk_size") { + value.parse().context(nexus_uri::IntParamParseError { + uri: uri.to_string(), + parameter: String::from("blk_size"), + })? + } else { + 512 + }; + + if blk_size != 512 && blk_size != 4096 { + return Err(NexusBdevError::UriInvalid { + uri: uri.to_string(), + message: + "invalid blk_size specified must be one of 512 or 4096" + .to_string(), + }); + } + + let size: u32 = if let Some(value) = parameters.remove("size_mb") { + value.parse().context(nexus_uri::IntParamParseError { + uri: uri.to_string(), + parameter: String::from("size_mb"), + })? + } else { + 0 + }; + + let num_blocks: u32 = + if let Some(value) = parameters.remove("num_blocks") { + value.parse().context(nexus_uri::IntParamParseError { + uri: uri.to_string(), + parameter: String::from("blk_size"), + })? + } else { + 0 + }; + + if size != 0 && num_blocks != 0 { + return Err(NexusBdevError::UriInvalid { + uri: uri.to_string(), + message: "conflicting parameters num_blocks and size_mb are mutually exclusive" + .to_string(), + }); + } + + let uuid = uri::uuid(parameters.remove("uuid")).context( + nexus_uri::UuidParamParseError { + uri: uri.to_string(), + }, + )?; + + Ok(Self { + name: uri.path()[1 ..].into(), + alias: uri.to_string(), + num_blocks: if num_blocks != 0 { + num_blocks + } else { + (size << 20) / blk_size + } as u64, + blk_size, + uuid: uuid.or_else(|| Some(Uuid::new_v4())), + }) + } +} + +impl GetName for Null { + fn get_name(&self) -> String { + self.name.clone() + } +} + +#[async_trait(?Send)] +impl CreateDestroy for Null { + type Error = NexusBdevError; + + async fn create(&self) -> Result { + if Bdev::lookup_by_name(&self.name).is_some() { + return Err(NexusBdevError::BdevExists { + name: self.name.clone(), + }); + } + + let cname = self.name.clone().into_cstring(); + + let opts = spdk_sys::spdk_null_bdev_opts { + name: cname.as_ptr(), + uuid: std::ptr::null(), + num_blocks: self.num_blocks, + block_size: self.blk_size, + md_size: 0, + md_interleave: false, + dif_type: spdk_sys::SPDK_DIF_DISABLE, + dif_is_head_of_md: false, + }; + + let ret = unsafe { + let mut bdev: *mut spdk_sys::spdk_bdev = std::ptr::null_mut(); + spdk_sys::bdev_null_create(&mut bdev, &opts) + }; + + if ret != 0 { + Err(NexusBdevError::CreateBdev { + source: Errno::from_i32(ret), + name: self.name.clone(), + }) + } else { + self.uuid.map(|u| { + Bdev::lookup_by_name(&self.name).map(|mut b| { + b.set_uuid(Some(u.to_string())); + if !b.add_alias(&self.alias) { + error!( + "Failed to add alias {} to device {}", + self.alias, + self.get_name() + ); + } + }) + }); + Ok(self.name.clone()) + } + } + + async fn destroy(self: Box) -> Result<(), Self::Error> { + if let Some(bdev) = Bdev::lookup_by_name(&self.name) { + let (s, r) = oneshot::channel::>(); + unsafe { + spdk_sys::bdev_null_delete( + bdev.as_ptr(), + Some(done_errno_cb), + cb_arg(s), + ) + }; + + r.await + .context(nexus_uri::CancelBdev { + name: self.name.clone(), + })? + .context(nexus_uri::DestroyBdev { + name: self.name, + }) + } else { + Err(NexusBdevError::BdevNotFound { + name: self.name, + }) + } + } +} diff --git a/spdk-sys/wrapper.h b/spdk-sys/wrapper.h index 624ad9a8b..c2d1dffba 100644 --- a/spdk-sys/wrapper.h +++ b/spdk-sys/wrapper.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include From 774cb749e37310c31003eb719cc323ca8b080ffd Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Thu, 29 Oct 2020 12:45:31 +0000 Subject: [PATCH 26/92] env: Remove support for legacy INI configuration The upcoming SPDK 20.10 removes support for the legacy INI configuration so do the same in Mayastor, dropping the -c CLI option. Use of the legacy config had already been removed from the tests. --- mayastor/src/core/env.rs | 55 ---------------------------------------- 1 file changed, 55 deletions(-) diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index ae52689b2..1526317a4 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -23,10 +23,6 @@ use tokio::runtime::Builder; use spdk_sys::{ maya_log, spdk_app_shutdown_cb, - spdk_conf_allocate, - spdk_conf_free, - spdk_conf_read, - spdk_conf_set_as_default, spdk_log_level, spdk_log_open, spdk_log_set_level, @@ -81,9 +77,6 @@ fn parse_mb(src: &str) -> Result { setting(structopt::clap::AppSettings::ColoredHelp) )] pub struct MayastorCliArgs { - #[structopt(short = "c")] - /// Path to the configuration file if any - pub config: Option, #[structopt(short = "g", default_value = grpc::default_endpoint_str())] /// IP address and port (optional) for the gRPC server to listen on pub grpc_endpoint: String, @@ -140,7 +133,6 @@ impl Default for MayastorCliArgs { rpc_address: "/var/tmp/mayastor.sock".to_string(), no_pci: true, log_components: vec![], - config: None, mayastor_config: None, child_status_config: None, hugedir: None, @@ -181,8 +173,6 @@ extern "C" { pub enum EnvError { #[snafu(display("Failed to install signal handler"))] SetSigHdl { source: nix::Error }, - #[snafu(display("Failed to read configuration file: {}", reason))] - ParseConfig { reason: String }, #[snafu(display("Failed to initialize logging subsystem"))] InitLog, #[snafu(display("Failed to initialize {} target", target))] @@ -194,7 +184,6 @@ type Result = std::result::Result; /// Mayastor argument #[derive(Debug, Clone)] pub struct MayastorEnvironment { - pub config: Option, pub node_name: String, pub mbus_endpoint: Option, pub grpc_endpoint: Option, @@ -229,7 +218,6 @@ pub struct MayastorEnvironment { impl Default for MayastorEnvironment { fn default() -> Self { Self { - config: None, node_name: "mayastor-node".into(), mbus_endpoint: None, grpc_endpoint: None, @@ -338,7 +326,6 @@ impl MayastorEnvironment { grpc_endpoint: Some(grpc::endpoint(args.grpc_endpoint)), mbus_endpoint: subsys::mbus_endpoint(args.mbus_endpoint), node_name: args.node_name.unwrap_or_else(|| "mayastor-node".into()), - config: args.config, mayastor_config: args.mayastor_config, child_status_config: args.child_status_config, log_component: args.log_components, @@ -386,44 +373,6 @@ impl MayastorEnvironment { Ok(()) } - /// read the config file we use this mostly for testing - fn read_config_file(&self) -> Result<()> { - if self.config.is_none() { - return Ok(()); - } - - let path = - CString::new(self.config.as_ref().unwrap().as_str()).unwrap(); - let config = unsafe { spdk_conf_allocate() }; - - assert_ne!(config, std::ptr::null_mut()); - - if unsafe { spdk_conf_read(config, path.as_ptr()) } != 0 { - return Err(EnvError::ParseConfig { - reason: "Failed to read file from disk".into(), - }); - } - - let rc = unsafe { - if spdk_sys::spdk_conf_first_section(config).is_null() { - Err(EnvError::ParseConfig { - reason: "failed to parse config file".into(), - }) - } else { - Ok(()) - } - }; - - if rc.is_ok() { - trace!("Setting default config to {:p}", config); - unsafe { spdk_conf_set_as_default(config) }; - } else { - unsafe { spdk_conf_free(config) } - } - - rc - } - /// construct an array of options to be passed to EAL and start it fn initialize_eal(&self) { let mut args: Vec = Vec::new(); @@ -677,10 +626,6 @@ impl MayastorEnvironment { self.init_logger().unwrap(); self.load_yaml_config(); - // load the .ini format file, still here to allow CI passing. There is - // no real harm of loading this ini file as long as there are no - // conflicting bdev definitions - self.read_config_file().unwrap(); self.load_child_status(); From 7f8e7ce9f049f76a4fc374f77c123c4a7765cb4a Mon Sep 17 00:00:00 2001 From: Sumindar <32561380+Sumindar@users.noreply.github.com> Date: Thu, 29 Oct 2020 22:09:28 +0530 Subject: [PATCH 27/92] Update README.md some changes --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index fda43a18b..db728a6f7 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,7 @@ buf.fill(0xff); // same IO. Put differently. A single IO becomes three IOs bd.write_at(0, &mut buf).await.unwrap(); -// fill the buffer with zeroes and read back the data +// fill the buffer with zeros and read back the data buf.fill(0x00); bd.read_at(0, &mut buf).await.unwrap(); @@ -125,11 +125,11 @@ and they want the most simple (but fast) storage device. For a more elaborate ex To communicate with the children, the Nexus uses industry standard protocols. Currently, the Nexus has support for direct access to local storage and remote storage using NVMF or iSCSI. The other advantage is that if you were to remove -the Nexus out of the data path, you would still ba able to access your data as if Mayastor was not there. +the Nexus out of the data path, you would still be able to access your data as if Mayastor was not there. The Nexus itself does not store any data and in its most simplistic form the Nexus is a proxy towards real storage devices where the transport may vary. It can however, as mentioned, "transform" the data, which makes it possible to -store copies of your data within different cloud systems. One of the other ideas we have is to write block device on top +store copies of your data within different cloud systems. One of the other ideas we have is to write a block device on top of a S3 bucket such that you can create PVCs from [Minio](https://min.io/), AWS or any other compatible S3 bucket. This simplifies the replication model for the Nexus itself somewhat but creates a bit more on the buffering side of things. What model fits best for you? You get to decide! @@ -169,10 +169,10 @@ vhost-user code can be seen in the link section (still in C). ## Client

-Although that a client for gRPC server is not required for the product, +Although a client for gRPC server is not required for the product, it is important for testing and troubleshooting. The client allows you to manage storage pools and replicas and just use `--help` -option if not sure how to use it. CSI services are not covered by the client. +option if you are not sure how to use it. CSI services are not covered by the client.

In following example of a client session is assumed that mayastor has been From d36111d7fd533904f83797286d3d5a88736cae8a Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 30 Oct 2020 10:13:05 +0000 Subject: [PATCH 28/92] CAS-489: limit config sync to pools The sync_config used by mayastor during configuration changes now saves only pools into the config file. Loading and "manually" writing of base bdev's and nexus is still supported as it facilitates testing. This somewhat improves the current k8s config by allowing replicas to be revived after a crash/restart. --- deploy/mayastor-daemonset.yaml | 15 +++++++ mayastor/src/core/env.rs | 6 +-- mayastor/src/subsys/config/mod.rs | 68 ++++++++++++------------------- 3 files changed, 45 insertions(+), 44 deletions(-) diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 69e3c62a7..9fe85ad95 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -46,10 +46,13 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP + - name: IMPORT_NEXUSES + value: "false" args: - "-N$(MY_NODE_NAME)" - "-g$(MY_POD_IP)" - "-nnats" + - "-y/var/local/mayastor/config.yaml" securityContext: privileged: true volumeMounts: @@ -57,6 +60,10 @@ spec: mountPath: /dev - name: dshm mountPath: /dev/shm + - name: configlocation + mountPath: /var/local/mayastor/ + - name: config + mountPath: /var/local/mayastor/config.yaml resources: limits: cpu: "1" @@ -82,3 +89,11 @@ spec: - name: hugepage emptyDir: medium: HugePages + - name: configlocation + hostPath: + path: /var/local/mayastor/ + type: DirectoryOrCreate + - name: config + hostPath: + path: /var/local/mayastor/config.yaml + type: FileOrCreate diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 1526317a4..bcac62f21 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -146,7 +146,7 @@ pub static GLOBAL_RC: Lazy>> = Lazy::new(|| Arc::new(Mutex::new(-1))); /// keep track if we have received a signal already -pub static SIG_RECIEVED: Lazy = +pub static SIG_RECEIVED: Lazy = Lazy::new(|| AtomicBool::new(false)); /// FFI functions that are needed to initialize the environment @@ -299,12 +299,12 @@ unsafe extern "C" fn signal_trampoline(_: *mut c_void) { /// called on SIGINT and SIGTERM extern "C" fn mayastor_signal_handler(signo: i32) { - if SIG_RECIEVED.load(SeqCst) { + if SIG_RECEIVED.load(SeqCst) { return; } warn!("Received SIGNO: {}", signo); - SIG_RECIEVED.store(true, SeqCst); + SIG_RECEIVED.store(true, SeqCst); unsafe { spdk_thread_send_critical_msg( Mthread::get_init().0, diff --git a/mayastor/src/subsys/config/mod.rs b/mayastor/src/subsys/config/mod.rs index f917cedd4..b2dc68195 100644 --- a/mayastor/src/subsys/config/mod.rs +++ b/mayastor/src/subsys/config/mod.rs @@ -43,7 +43,7 @@ use crate::{ lvs::Lvs, nexus_uri::bdev_create, pool::PoolsIter, - replica::{self, ReplicaIter, ShareType}, + replica::{ReplicaIter, ShareType}, subsys::{ config::opts::{ BdevOpts, @@ -156,6 +156,8 @@ pub struct Config { pub nexus_opts: NexusOpts, /// error store opts pub err_store_opts: ErrStoreOpts, + /// list of pools to create on load + pub pools: Option>, /// /// The next options are intended for usage during testing /// @@ -163,9 +165,7 @@ pub struct Config { pub base_bdevs: Option>, /// list of nexus bdevs that will create the base bdevs implicitly pub nexus_bdevs: Option>, - /// list of pools to create on load, the base_bdevs should be created first - pub pools: Option>, - /// any base bdevs created implicitly share them over nvmf + /// any base bdevs created implicitly are shared over nvmf pub implicit_share_base: bool, /// flag to enable or disable config sync pub sync_disable: bool, @@ -313,6 +313,25 @@ impl Config { Ok(current) } + /// write the current pool configuration to disk + pub fn write_pools

(&self, file: P) -> Result<(), std::io::Error> + where + P: AsRef, + { + let pools = serde_json::json!({ + "pools": self.pools.clone() + }); + + if let Ok(s) = serde_yaml::to_string(&pools) { + let mut file = File::create(file)?; + return file.write_all(s.as_bytes()); + } + Err(std::io::Error::new( + std::io::ErrorKind::Other, + "failed to serialize the pool config", + )) + } + /// write the current configuration to disk pub fn write

(&self, file: P) -> Result<(), std::io::Error> where @@ -483,38 +502,6 @@ impl Config { failures } - /// Share any pool replicas defined in the config file. - async fn share_replicas(&self) { - if let Some(pools) = self.pools.as_ref() { - let replicas = pools - .iter() - .map(|p| { - p.replicas - .iter() - .filter(|r| r.share.is_some()) - .filter_map(|replica| { - ReplicaIter::new() - .find(|dev| dev.get_uuid() == replica.name) - .map(|dev| (dev, replica.share.unwrap())) - }) - .collect::>() - }) - .flatten() - .collect::>(); - - for (dev, share) in replicas { - if let Err(error) = dev.share(share).await { - error!( - "Failed to share {} over {:?}, error={}", - dev.get_uuid(), - share, - error - ); - } - } - } - } - pub fn import_nexuses() -> bool { match std::env::var_os("IMPORT_NEXUSES") { Some(val) => val.into_string().unwrap().parse::().unwrap(), @@ -535,7 +522,6 @@ impl Config { // the nexus create will fail let mut errors = self.create_pools().await; - self.share_replicas().await; if Self::import_nexuses() { errors += self.create_nexus_bdevs().await; @@ -553,7 +539,7 @@ impl Config { pub(crate) fn export_config() -> Result<(), std::io::Error> { let cfg = Config::get().refresh().unwrap(); match cfg.source.as_ref() { - Some(target) => cfg.write(&target), + Some(target) => cfg.write_pools(&target), // no config file to export to None => Ok(()), } @@ -585,7 +571,7 @@ pub struct BaseBdev { pub uri: String, } -#[derive(Debug, Default, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Default, PartialEq, Serialize, Deserialize, Clone)] /// Pools that we create. Future work will include the ability to create RAID0 /// or RAID5. pub struct Pool { @@ -593,7 +579,7 @@ pub struct Pool { pub name: String, /// bdevs to create outside of the nexus control pub disks: Vec, - /// list of replicas to share on load + /// list of replicas (not required, informational only) pub replicas: Vec, } @@ -607,7 +593,7 @@ impl From<&Pool> for rpc::mayastor::CreatePoolRequest { } } -#[derive(Debug, Default, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Default, PartialEq, Serialize, Deserialize, Clone)] /// Pool replicas that we share via `ShareType` pub struct Replica { /// name of the replica From 1e76bb9746cda1b5dbadfbf4029c863fcb51491f Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Thu, 22 Oct 2020 12:05:22 +0100 Subject: [PATCH 29/92] spdk-sys: Remove --without-vhost The combination of --with-crypto and --without-vhost leads to build failure: /usr/bin/ld.bfd: /home/user/src/spdk/dpdk/build/lib/librte_pmd_qat.a(common_qat_qat_qp.c.o): in function `qat_crc_verify': /home/user/src/spdk/dpdk/build-tmp/../drivers/crypto/qat/qat_sym.h:154: undefined reference to `rte_net_crc_calc' /usr/bin/ld.bfd: /home/user/src/spdk/dpdk/build/lib/librte_pmd_qat.a(common_qat_qat_qp.c.o): in function `qat_crc_generate': /home/user/src/spdk/dpdk/build-tmp/../drivers/crypto/qat/qat_sym.h:178: undefined reference to `rte_net_crc_calc' collect2: error: ld returned 1 exit status make[2]: *** [/home/user/src/spdk/mk/spdk.app.mk:65: /home/jteh/src/spdk/build/bin/spdk_trace_record] Error 1 make[1]: *** [/home/user/src/spdk/mk/spdk.subdirs.mk:44: trace_record] Error 2 due to dpdk/012affe1 introducing a dependence on rte_net when building with crypto, drop the latter. The --without-vhost may have been introduced due to conflicting vhost implementations in SPDK and DPDK, which appears to have been resolved upstream. --- nix/pkgs/libspdk/default.nix | 2 -- spdk-sys/build.sh | 2 -- 2 files changed, 4 deletions(-) diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index f8bde2f56..b35ef6e2e 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -52,7 +52,6 @@ let configureFlags = [ "--target-arch=nehalem" "--without-isal" - "--without-vhost" "--with-iscsi-initiator" "--with-crypto" "--with-uring" @@ -76,7 +75,6 @@ let make -j`nproc` find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete - #find . -type f -name 'librte_vhost.a' -delete $CC -shared -o libspdk.so \ -lc -laio -liscsi -lnuma -ldl -lrt -luuid -lpthread -lcrypto \ diff --git a/spdk-sys/build.sh b/spdk-sys/build.sh index d3fa6eceb..b5e3d2520 100755 --- a/spdk-sys/build.sh +++ b/spdk-sys/build.sh @@ -13,7 +13,6 @@ pushd spdk || { echo "Can not find spdk directory"; exit; } --target-arch=nehalem \ --disable-tests \ --without-isal \ - --without-vhost \ --with-iscsi-initiator \ --with-crypto \ --with-uring \ @@ -24,7 +23,6 @@ make -j $(nproc) # delete things we for sure do not want link find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete -#find . -type f -name 'librte_vhost.a' -delete # the event libraries are the libraries that parse configuration files # we do our own config file parsing, and we setup our own targets. From 682e802e7db2b9517bda67df540e9e7d7428856c Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Thu, 22 Oct 2020 18:24:53 +0100 Subject: [PATCH 30/92] mayastor: Build against SPDK 20.10 Sync with upstream changes to spdk_nvmf_subsystem, spdk_nvmf_transport_opts and spdk_iscsi_opts. Specify public portal group in iscsi_portal_grp_create. Sync with renamed nvmf_tgt_accept. Remove libspdk_sock_uring.a before building libspdk.so to remove the use of io_uring for sockets as this is not supported on kernel 5.4. Update libspdk to build against the v20.10.x-mayastor branch and update the spdk-sys readme. --- mayastor/src/lvs/lvs_pool.rs | 3 ++- mayastor/src/subsys/config/opts.rs | 15 ++++++++++----- mayastor/src/subsys/nvmf/subsystem.rs | 5 ++++- mayastor/src/subsys/nvmf/target.rs | 5 ++--- mayastor/src/target/iscsi.rs | 2 +- mayastor/src/target/nvmf.rs | 4 ++-- nix/pkgs/libspdk/default.nix | 7 ++++--- spdk-sys/README.md | 13 +++++-------- spdk-sys/build.rs | 1 + spdk-sys/build.sh | 1 + 10 files changed, 32 insertions(+), 24 deletions(-) diff --git a/mayastor/src/lvs/lvs_pool.rs b/mayastor/src/lvs/lvs_pool.rs index 05d658a84..26d30f80d 100644 --- a/mayastor/src/lvs/lvs_pool.rs +++ b/mayastor/src/lvs/lvs_pool.rs @@ -243,11 +243,12 @@ impl Lvs { /// Create a pool on base bdev pub async fn create(name: &str, bdev: &str) -> Result { let pool_name = name.into_cstring(); + let bdev_name = bdev.into_cstring(); let (sender, receiver) = pair::>(); unsafe { vbdev_lvs_create( - Bdev::lookup_by_name(bdev).unwrap().as_ptr(), + bdev_name.as_ptr(), pool_name.as_ptr(), 0, // We used to clear a pool with UNMAP but that takes awfully diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index bb206425f..ec96757eb 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -186,13 +186,10 @@ impl From for spdk_nvmf_transport_opts { max_aq_depth: o.max_aq_depth, num_shared_buffers: o.num_shared_buf, buf_cache_size: o.buf_cache_size, - max_srq_depth: o.max_srq_depth, - no_srq: o.no_srq, - c2h_success: o.ch2_success, dif_insert_or_strip: o.dif_insert_or_strip, - sock_priority: o.sock_priority, - acceptor_backlog: 0, abort_timeout_sec: 0, + association_timeout: 120000, + transport_specific: std::ptr::null(), } } } @@ -392,6 +389,10 @@ pub struct IscsiTgtOpts { error_recovery_level: u32, /// todo allow_duplicate_isid: bool, + /// todo + max_large_data_in_per_connection: u32, + /// todo + max_r2t_per_connection: u32, } impl Default for IscsiTgtOpts { @@ -415,6 +416,8 @@ impl Default for IscsiTgtOpts { immediate_data: true, error_recovery_level: 0, allow_duplicate_isid: false, + max_large_data_in_per_connection: 64, + max_r2t_per_connection: 64, } } } @@ -444,6 +447,8 @@ impl From<&IscsiTgtOpts> for spdk_iscsi_opts { ImmediateData: o.immediate_data, ErrorRecoveryLevel: o.error_recovery_level, AllowDuplicateIsid: o.allow_duplicate_isid, + MaxLargeDataInPerConnection: o.max_large_data_in_per_connection, + MaxR2TPerConnection: o.max_r2t_per_connection, } } } diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 437f2f55e..8242fdece 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -101,7 +101,10 @@ impl Debug for NvmfSubsystem { .field("subnqn", &self.0.as_ref().subnqn.as_str().to_string()) .field("sn", &self.0.as_ref().sn.as_str().to_string()) .field("mn", &self.0.as_ref().mn.as_str().to_string()) - .field("allow_any_host", &self.0.as_ref().allow_any_host) + .field( + "allow_any_host", + &self.0.as_ref().flags.allow_any_host(), + ) .field("listeners", &self.listeners_to_vec()) .finish() } diff --git a/mayastor/src/subsys/nvmf/target.rs b/mayastor/src/subsys/nvmf/target.rs index 2cc575842..8541eeed7 100644 --- a/mayastor/src/subsys/nvmf/target.rs +++ b/mayastor/src/subsys/nvmf/target.rs @@ -7,13 +7,13 @@ use std::{ use nix::errno::Errno; use spdk_sys::{ + nvmf_tgt_accept, spdk_env_get_core_count, spdk_nvmf_poll_group_destroy, spdk_nvmf_subsystem_create, spdk_nvmf_subsystem_set_mn, spdk_nvmf_target_opts, spdk_nvmf_tgt, - spdk_nvmf_tgt_accept, spdk_nvmf_tgt_create, spdk_nvmf_tgt_destroy, spdk_nvmf_tgt_listen, @@ -246,8 +246,7 @@ impl Target { } /// poll function that the acceptor runs extern "C" fn acceptor_poll(tgt: *mut c_void) -> i32 { - let tgt = tgt as *mut spdk_nvmf_tgt; - unsafe { spdk_nvmf_tgt_accept(tgt) }; + unsafe { nvmf_tgt_accept(tgt) }; 0 } diff --git a/mayastor/src/target/iscsi.rs b/mayastor/src/target/iscsi.rs index 5e7b3ede1..9f1a703c6 100644 --- a/mayastor/src/target/iscsi.rs +++ b/mayastor/src/target/iscsi.rs @@ -347,7 +347,7 @@ fn create_portal_group( let portal_port = CString::new(port_no.to_string()).unwrap(); let portal_host = CString::new(address.to_owned()).unwrap(); - let pg = unsafe { iscsi_portal_grp_create(pg_no) }; + let pg = unsafe { iscsi_portal_grp_create(pg_no, false) }; if pg.is_null() { return Err(Error::CreatePortalGroup {}); } diff --git a/mayastor/src/target/nvmf.rs b/mayastor/src/target/nvmf.rs index 4a58f2913..f7edb68bb 100644 --- a/mayastor/src/target/nvmf.rs +++ b/mayastor/src/target/nvmf.rs @@ -19,6 +19,7 @@ use once_cell::sync::Lazy; use snafu::{ResultExt, Snafu}; use spdk_sys::{ + nvmf_tgt_accept, spdk_nvme_transport_id, spdk_nvmf_poll_group, spdk_nvmf_poll_group_add, @@ -41,7 +42,6 @@ use spdk_sys::{ spdk_nvmf_subsystem_stop, spdk_nvmf_target_opts, spdk_nvmf_tgt, - spdk_nvmf_tgt_accept, spdk_nvmf_tgt_add_transport, spdk_nvmf_tgt_create, spdk_nvmf_tgt_destroy, @@ -500,7 +500,7 @@ impl Target { extern "C" fn acceptor_poll(target_ptr: *mut c_void) -> c_int { unsafe { let target = &mut *(target_ptr as *mut Self); - spdk_nvmf_tgt_accept(target.inner); + nvmf_tgt_accept(target.inner.cast()); } -1 } diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index b35ef6e2e..fb29368e6 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -19,13 +19,13 @@ let # Derivation attributes for production version of libspdk drvAttrs = rec { - version = "20.07"; + version = "20.10"; src = fetchFromGitHub { owner = "openebs"; repo = "spdk"; - rev = "b09bed5edaca1d827a6432ca602639d43e3e93a0"; - sha256 = "0y26p4m99gbnf6iz2vbai26msnry7m428g8q3icpg28izmnk00d1"; + rev = "46b25360887c5d19433f575c7ad14259721abc6f"; + sha256 = "0cjnpkqx95cgrk9kbm4drrd5piimprz7wsbiahsllm1j2avdzsfs"; #sha256 = stdenv.lib.fakeSha256; fetchSubmodules = true; }; @@ -74,6 +74,7 @@ let buildPhase = '' make -j`nproc` find . -type f -name 'libspdk_event_nvmf.a' -delete + find . -type f -name 'libspdk_sock_uring.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete $CC -shared -o libspdk.so \ diff --git a/spdk-sys/README.md b/spdk-sys/README.md index 017c4bae8..1a02318df 100644 --- a/spdk-sys/README.md +++ b/spdk-sys/README.md @@ -8,7 +8,7 @@ optional argument. nix-shell --arg nospdk true ``` -The above will result in a shell, where there is no SPDK. In order to develop an +The above results in a shell where there is no SPDK. In order to develop in an environment like this, it is assumed that you will have a local checkout of SPDK within the spdk-sys directory. @@ -16,18 +16,15 @@ within the spdk-sys directory. cd ${workspace}/spdk-sys git clone https://github.com/openebs/spdk cd spdk -git checkout mayastor-x.y +git checkout vYY.mm.x-mayastor git submodule update --init --recursive cd .. ./build.sh ``` -The above (when the proper values for x and y are satisfied) will result in a +The above (when the proper values for YY, mm and x are satisfied) results in a libspdk.so within the spdk directory. When building, the build script will pick up the library. -Note that when you want to switch back, you have to ensure, that the spdk dir is -removed (or renamed) to avoid it including or linking by accident. - - - +Note that when you want to switch back, you have to ensure that the spdk dir is +removed (or renamed) to avoid including or linking it by accident. diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 6fd2c5135..0a187bab7 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -84,6 +84,7 @@ fn main() { .whitelist_function("^vbdev_.*") .whitelist_function("^nvme_cmd_.*") .whitelist_function("^nvme_status_.*") + .whitelist_function("^nvmf_tgt_accept") .blacklist_type("^longfunc") .whitelist_var("^NVMF.*") .whitelist_var("^SPDK.*") diff --git a/spdk-sys/build.sh b/spdk-sys/build.sh index b5e3d2520..ffb95d0ca 100755 --- a/spdk-sys/build.sh +++ b/spdk-sys/build.sh @@ -22,6 +22,7 @@ make -j $(nproc) # delete things we for sure do not want link find . -type f -name 'libspdk_event_nvmf.a' -delete +find . -type f -name 'libspdk_sock_uring.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete # the event libraries are the libraries that parse configuration files From 98339db4cad31f610b394efb9bf316709b2bbf3f Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Mon, 2 Nov 2020 00:18:54 +0000 Subject: [PATCH 31/92] CAS-490 clean up nexus "share" code - refactored "impl Nexus" to use Share trait implementation as appropriate - removed unused file: mayastor/src/bdev/nexus/nexus_iscsi.rs - removed unused file: mayastor/src/bdev/nexus/nexus_nvmf.rs - removed dead code from file: mayastor/src/target/nvmf.rs Note that the ability to encrypt the share has been temporarily removed. This needs to be revisited once the Share trait itself can accomodate such behaviour. --- mayastor-test/test_nexus.js | 2 +- mayastor/src/bdev/nexus/mod.rs | 2 - mayastor/src/bdev/nexus/nexus_bdev.rs | 30 +- mayastor/src/bdev/nexus/nexus_iscsi.rs | 81 --- mayastor/src/bdev/nexus/nexus_nvmf.rs | 76 --- mayastor/src/bdev/nexus/nexus_share.rs | 266 +++------ mayastor/src/core/bdev.rs | 95 ++-- mayastor/src/core/mod.rs | 8 + mayastor/src/pool.rs | 30 +- mayastor/src/replica.rs | 50 +- mayastor/src/subsys/nvmf/subsystem.rs | 9 +- mayastor/src/target/nvmf.rs | 710 +------------------------ 12 files changed, 166 insertions(+), 1193 deletions(-) delete mode 100644 mayastor/src/bdev/nexus/nexus_iscsi.rs delete mode 100644 mayastor/src/bdev/nexus/nexus_nvmf.rs diff --git a/mayastor-test/test_nexus.js b/mayastor-test/test_nexus.js index d37dc826f..4e004436c 100644 --- a/mayastor-test/test_nexus.js +++ b/mayastor-test/test_nexus.js @@ -147,7 +147,7 @@ function controlPlaneTest (thisProtocol) { }); }); - it('should re-publish the nexus using a crypto-key', (done) => { + it.skip('should re-publish the nexus using a crypto-key', (done) => { client.publishNexus( { uuid: UUID, diff --git a/mayastor/src/bdev/nexus/mod.rs b/mayastor/src/bdev/nexus/mod.rs index 1c898977f..5c35d365a 100644 --- a/mayastor/src/bdev/nexus/mod.rs +++ b/mayastor/src/bdev/nexus/mod.rs @@ -23,13 +23,11 @@ pub mod nexus_child_status_config; mod nexus_config; pub mod nexus_fn_table; pub mod nexus_io; -pub mod nexus_iscsi; pub mod nexus_label; pub mod nexus_metadata; pub mod nexus_metadata_content; pub mod nexus_module; pub mod nexus_nbd; -pub mod nexus_nvmf; pub mod nexus_share; /// public function which simply calls register module diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 19f9f8ac5..edf4ae205 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -6,7 +6,6 @@ use std::{ convert::TryFrom, - fmt, fmt::{Display, Formatter}, os::raw::c_void, }; @@ -43,10 +42,8 @@ use crate::{ nexus_channel::{DREvent, NexusChannel, NexusChannelInner}, nexus_child::{ChildError, ChildState, NexusChild}, nexus_io::{io_status, nvme_admin_opc, Bio}, - nexus_iscsi::{NexusIscsiError, NexusIscsiTarget}, nexus_label::LabelError, nexus_nbd::{NbdDisk, NbdError}, - nexus_nvmf::{NexusNvmfError, NexusNvmfTarget}, }, }, core::{Bdev, CoreError, DmaError, Share}, @@ -105,15 +102,11 @@ pub enum Error { #[snafu(display("Failed to share nexus over NBD {}", name))] ShareNbdNexus { source: NbdError, name: String }, #[snafu(display("Failed to share iscsi nexus {}", name))] - ShareIscsiNexus { - source: NexusIscsiError, - name: String, - }, + ShareIscsiNexus { source: CoreError, name: String }, #[snafu(display("Failed to share nvmf nexus {}", name))] - ShareNvmfNexus { - source: NexusNvmfError, - name: String, - }, + ShareNvmfNexus { source: CoreError, name: String }, + #[snafu(display("Failed to unshare nexus {}", name))] + UnshareNexus { source: CoreError, name: String }, #[snafu(display("Failed to allocate label of nexus {}", name))] AllocLabel { source: DmaError, name: String }, #[snafu(display("Failed to write label of nexus {}", name))] @@ -285,20 +278,11 @@ impl From for tonic::Status { pub(crate) static NEXUS_PRODUCT_ID: &str = "Nexus CAS Driver v0.0.1"; +#[derive(Debug)] pub enum NexusTarget { NbdDisk(NbdDisk), - NexusIscsiTarget(NexusIscsiTarget), - NexusNvmfTarget(NexusNvmfTarget), -} - -impl fmt::Debug for NexusTarget { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - NexusTarget::NbdDisk(disk) => fmt::Debug::fmt(&disk, f), - NexusTarget::NexusIscsiTarget(tgt) => fmt::Debug::fmt(&tgt, f), - NexusTarget::NexusNvmfTarget(tgt) => fmt::Debug::fmt(&tgt, f), - } - } + NexusIscsiTarget, + NexusNvmfTarget, } /// The main nexus structure diff --git a/mayastor/src/bdev/nexus/nexus_iscsi.rs b/mayastor/src/bdev/nexus/nexus_iscsi.rs deleted file mode 100644 index 54114eb9c..000000000 --- a/mayastor/src/bdev/nexus/nexus_iscsi.rs +++ /dev/null @@ -1,81 +0,0 @@ -//! Utility functions and wrappers for working with iSCSI devices in SPDK. - -use std::fmt; - -use snafu::Snafu; - -use crate::{ - core::Bdev, - target::{ - iscsi::{create_uri, share, target_name, unshare}, - Side, - }, -}; - -#[derive(Debug, Snafu)] -pub enum NexusIscsiError { - #[snafu(display("Bdev not found {}", dev))] - BdevNotFound { dev: String }, - #[snafu(display( - "Failed to create iscsi target for bdev uuid {}, error {}", - dev, - err - ))] - CreateTargetFailed { dev: String, err: String }, -} - -/// Iscsi target representation. -pub struct NexusIscsiTarget { - bdev_name: String, /* logically we might store a spdk_iscsi_tgt_node here but ATM the bdev name is all we actually need */ -} - -impl NexusIscsiTarget { - /// Allocate iscsi device for the bdev and start it. - /// When the function returns the iscsi target is ready for IO. - pub fn create(bdev_name: &str) -> Result { - let bdev = match Bdev::lookup_by_name(bdev_name) { - None => { - return Err(NexusIscsiError::BdevNotFound { - dev: bdev_name.to_string(), - }) - } - Some(bd) => bd, - }; - - match share(bdev_name, &bdev, Side::Nexus) { - Ok(_) => Ok(Self { - bdev_name: bdev_name.to_string(), - }), - Err(e) => Err(NexusIscsiError::CreateTargetFailed { - dev: bdev_name.to_string(), - err: e.to_string(), - }), - } - } - - pub async fn destroy(self) { - info!("Destroying iscsi frontend target"); - match unshare(&self.bdev_name).await { - Ok(()) => (), - Err(e) => { - error!("Failed to destroy iscsi frontend target, error {}", e) - } - } - } - - pub fn as_uri(&self) -> String { - create_uri(Side::Nexus, &target_name(&self.bdev_name)) - } -} - -impl fmt::Debug for NexusIscsiTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}@{:?}", self.as_uri(), self.bdev_name) - } -} - -impl fmt::Display for NexusIscsiTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_uri()) - } -} diff --git a/mayastor/src/bdev/nexus/nexus_nvmf.rs b/mayastor/src/bdev/nexus/nexus_nvmf.rs deleted file mode 100644 index 9960c5832..000000000 --- a/mayastor/src/bdev/nexus/nexus_nvmf.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! Utility functions and wrappers for working with NVMEoF devices in SPDK. - -use std::fmt; - -use snafu::Snafu; - -use crate::{ - core::Bdev, - subsys::NvmfSubsystem, - target::nvmf::{share, unshare}, -}; - -#[derive(Debug, Snafu)] -pub enum NexusNvmfError { - #[snafu(display("Bdev not found {}", dev))] - BdevNotFound { dev: String }, - #[snafu(display( - "Failed to create nvmf target for bdev uuid {}, error {}", - dev, - err - ))] - CreateTargetFailed { dev: String, err: String }, -} - -/// Nvmf target representation. -pub struct NexusNvmfTarget { - uuid: String, -} - -impl NexusNvmfTarget { - pub async fn create(my_uuid: &str) -> Result { - info!("Creating nvmf nexus target: {}", my_uuid); - let bdev = match Bdev::lookup_by_name(&my_uuid) { - None => { - return Err(NexusNvmfError::BdevNotFound { - dev: my_uuid.to_string(), - }); - } - Some(bd) => bd, - }; - - match share(&my_uuid, &bdev).await { - Ok(_) => Ok(Self { - uuid: my_uuid.to_string(), - }), - Err(e) => Err(NexusNvmfError::CreateTargetFailed { - dev: my_uuid.to_string(), - err: e.to_string(), - }), - } - } - pub async fn destroy(self) { - info!("Destroying nvmf nexus target"); - match unshare(&self.uuid).await { - Ok(()) => (), - Err(e) => { - error!("Failed to destroy nvmf frontend target, error {}", e) - } - } - } - - pub fn as_uri(&self) -> String { - NvmfSubsystem::nqn_lookup(&self.uuid) - .unwrap() - .uri_endpoints() - .unwrap() - .pop() - .unwrap() - } -} - -impl fmt::Debug for NexusNvmfTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}@{:?}", self.as_uri(), self.uuid) - } -} diff --git a/mayastor/src/bdev/nexus/nexus_share.rs b/mayastor/src/bdev/nexus/nexus_share.rs index afc95e470..2e48c726c 100644 --- a/mayastor/src/bdev/nexus/nexus_share.rs +++ b/mayastor/src/bdev/nexus/nexus_share.rs @@ -1,37 +1,24 @@ -use std::ffi::CString; - use async_trait::async_trait; -use futures::channel::oneshot; use snafu::ResultExt; use rpc::mayastor::ShareProtocolNexus; -use spdk_sys::create_crypto_disk; use crate::{ bdev::nexus::{ nexus_bdev::{ - CreateCryptoBdev, - DestroyCryptoBdev, Error, - Error::AlreadyShared, Nexus, NexusTarget, ShareIscsiNexus, ShareNbdNexus, ShareNvmfNexus, + UnshareNexus, }, - nexus_iscsi::{NexusIscsiError, NexusIscsiTarget}, nexus_nbd::NbdDisk, - nexus_nvmf::{NexusNvmfError, NexusNvmfTarget}, }, - core::{Bdev, Protocol, Share}, - ffihelper::{cb_arg, done_errno_cb, errno_result_from_i32, ErrnoResult}, + core::{Protocol, Share}, }; -/// we are using the multi buffer encryption implementation using CBC as the -/// algorithm -const CRYPTO_FLAVOUR: &str = "crypto_aesni_mb"; - #[async_trait(? Send)] /// /// The sharing of the nexus is different compared to regular bdevs @@ -47,54 +34,42 @@ impl Share for Nexus { async fn share_iscsi(&self) -> Result { match self.shared() { - Some(Protocol::Iscsi) => Ok(self.share_uri().unwrap()), - Some(Protocol::Off) | None => self - .bdev - .share_iscsi() - .await - .map_err(|e| Error::ShareIscsiNexus { - source: NexusIscsiError::CreateTargetFailed { - dev: self.bdev.to_string(), - err: e.to_string(), - }, + Some(Protocol::Off) | None => { + self.bdev.share_iscsi().await.context(ShareIscsiNexus { name: self.name.clone(), - }) - .map(|_u| self.share_uri().unwrap()), - Some(p) => { - error!("nexus {} already shared as {:?}", self.name, p); - Err(AlreadyShared { + })?; + } + Some(Protocol::Iscsi) => {} + Some(protocol) => { + error!("nexus {} already shared as {:?}", self.name, protocol); + return Err(Error::AlreadyShared { name: self.name.clone(), - }) + }); } } + Ok(self.share_uri().unwrap()) } async fn share_nvmf(&self) -> Result { - let bdev = Bdev::from(self.bdev.as_ptr()); match self.shared() { - Some(Protocol::Nvmf) => Ok(self.share_uri().unwrap()), - Some(Protocol::Off) | None => bdev - .share_nvmf() - .await - .map_err(|e| Error::ShareNvmfNexus { - source: NexusNvmfError::CreateTargetFailed { - dev: self.bdev.to_string(), - err: e.to_string(), - }, + Some(Protocol::Off) | None => { + self.bdev.share_nvmf().await.context(ShareNvmfNexus { name: self.name.clone(), - }) - .map(|_| self.share_uri().unwrap()), - Some(p) => { - warn!("nexus {} already shared as {}", self.name, p); - Err(AlreadyShared { + })?; + } + Some(Protocol::Nvmf) => {} + Some(protocol) => { + warn!("nexus {} already shared as {}", self.name, protocol); + return Err(Error::AlreadyShared { name: self.name.clone(), - }) + }); } } + Ok(self.share_uri().unwrap()) } async fn unshare(&self) -> Result { - self.bdev.unshare().await.map_err(|_e| Error::NotShared { + self.bdev.unshare().await.context(UnshareNexus { name: self.name.clone(), }) } @@ -112,188 +87,87 @@ impl Share for Nexus { } } +impl From<&NexusTarget> for ShareProtocolNexus { + fn from(target: &NexusTarget) -> ShareProtocolNexus { + match target { + NexusTarget::NbdDisk(_) => ShareProtocolNexus::NexusNbd, + NexusTarget::NexusIscsiTarget => ShareProtocolNexus::NexusIscsi, + NexusTarget::NexusNvmfTarget => ShareProtocolNexus::NexusNvmf, + } + } +} + impl Nexus { pub async fn share( &mut self, - share_protocol: ShareProtocolNexus, - key: Option, + protocol: ShareProtocolNexus, + _key: Option, ) -> Result { - // We could already be shared -- as CSI is idempotent chances are we get - // called for some odd reason. Validate indeed -- that we are - // shared by walking the target. If so, and the protocol is - // correct simply return Ok(). If so, and the protocol is - // incorrect, return Error(). If we are not shared but the - // variant says we should be, carry on to correct the state. - match self.nexus_target { - Some(NexusTarget::NbdDisk(ref nbd_disk)) => { - if share_protocol != ShareProtocolNexus::NexusNbd { - return Err(Error::AlreadyShared { - name: self.name.clone(), - }); - } else { - warn!("{} is already shared", self.name); - return Ok(nbd_disk.as_uri()); - } + // This function should be idempotent as it's possible that + // we get called more than once for some odd reason. + if let Some(target) = &self.nexus_target { + // We're already shared ... + if ShareProtocolNexus::from(target) == protocol { + // Same protocol as that requested, simply return Ok() + warn!("{} is already shared", self.name); + return Ok(self.get_share_uri().unwrap()); } - Some(NexusTarget::NexusIscsiTarget(ref iscsi_target)) => { - if share_protocol != ShareProtocolNexus::NexusIscsi { - return Err(Error::AlreadyShared { - name: self.name.clone(), - }); - } else { - warn!("{} is already shared", self.name); - return Ok(iscsi_target.as_uri()); - } - } - Some(NexusTarget::NexusNvmfTarget(ref nvmf_target)) => { - if share_protocol != ShareProtocolNexus::NexusNvmf { - return Err(Error::AlreadyShared { - name: self.name.clone(), - }); - } else { - warn!("{} is already shared", self.name); - return Ok(nvmf_target.as_uri()); - } - } - None => (), - } - - assert_eq!(self.share_handle, None); - - let name = if let Some(key) = key { - let name = format!("crypto-{}", self.name); - // constant - let flavour = CString::new(CRYPTO_FLAVOUR).unwrap(); - // name of the crypto device - let cname = CString::new(name.clone()).unwrap(); - // the nexus device itself - let base = CString::new(self.name.clone()).unwrap(); - // the keys to the castle - let key = CString::new(key).unwrap(); - - let cipher = CString::new("AES_CBC").unwrap(); - - let errno = unsafe { - create_crypto_disk( - base.as_ptr(), - cname.as_ptr(), - flavour.as_ptr(), - key.as_ptr(), - cipher.as_ptr(), - std::ptr::null_mut(), - ) - }; - errno_result_from_i32(name, errno).context(CreateCryptoBdev { + // Error as protocol differs from that requested. + return Err(Error::AlreadyShared { name: self.name.clone(), - })? - } else { - self.name.clone() - }; - - debug!("creating share handle for {}", name); - // The share handle is the actual bdev that is shared through the - // various protocols. + }); + } - let device_id = match share_protocol { + match protocol { ShareProtocolNexus::NexusNbd => { - // Publish the nexus to system using nbd device and return the - // path to nbd device. - let nbd_disk = - NbdDisk::create(&name).await.context(ShareNbdNexus { - name: self.name.clone(), - })?; - let device_uri = nbd_disk.as_uri(); - self.nexus_target = Some(NexusTarget::NbdDisk(nbd_disk)); - device_uri - } - ShareProtocolNexus::NexusIscsi => { - // Publish the nexus to system using an iscsi target and return - // the IQN - let iscsi_target = NexusIscsiTarget::create(&name).context( - ShareIscsiNexus { + let disk = NbdDisk::create(&self.name).await.context( + ShareNbdNexus { name: self.name.clone(), }, )?; - let uri = iscsi_target.as_uri(); - self.nexus_target = - Some(NexusTarget::NexusIscsiTarget(iscsi_target)); - uri + let uri = disk.as_uri(); + self.nexus_target = Some(NexusTarget::NbdDisk(disk)); + Ok(uri) + } + ShareProtocolNexus::NexusIscsi => { + let uri = self.share_iscsi().await?; + self.nexus_target = Some(NexusTarget::NexusIscsiTarget); + Ok(uri) } ShareProtocolNexus::NexusNvmf => { - let nvmf_target = NexusNvmfTarget::create(&name) - .await - .context(ShareNvmfNexus { - name: self.name.clone(), - })?; - let uri = nvmf_target.as_uri(); - self.nexus_target = - Some(NexusTarget::NexusNvmfTarget(nvmf_target)); - uri + let uri = self.share_nvmf().await?; + self.nexus_target = Some(NexusTarget::NexusNvmfTarget); + Ok(uri) } - }; - self.share_handle = Some(name); - Ok(device_id) + } } - /// Undo share operation on nexus. To the chain of bdevs are all claimed - /// where the top-level dev is claimed by the subsystem that exports the - /// bdev. As such, we must first destroy the share and move our way down - /// from there. pub async fn unshare_nexus(&mut self) -> Result<(), Error> { match self.nexus_target.take() { Some(NexusTarget::NbdDisk(disk)) => { disk.destroy(); } - Some(NexusTarget::NexusIscsiTarget(iscsi_target)) => { - iscsi_target.destroy().await; + Some(NexusTarget::NexusIscsiTarget) => { + self.unshare().await?; } - Some(NexusTarget::NexusNvmfTarget(nvmf_target)) => { - nvmf_target.destroy().await; + Some(NexusTarget::NexusNvmfTarget) => { + self.unshare().await?; } None => { warn!("{} was not shared", self.name); - return Ok(()); } - }; - - let bdev_name = self.share_handle.take().unwrap(); - if let Some(bdev) = Bdev::lookup_by_name(&bdev_name) { - // if the share handle is the same as bdev name it - // implies there is no top level bdev, and we are done - if self.name != bdev.name() { - let (s, r) = oneshot::channel::>(); - // currently, we only have the crypto vbdev - unsafe { - spdk_sys::delete_crypto_disk( - bdev.as_ptr(), - Some(done_errno_cb), - cb_arg(s), - ); - } - r.await.expect("crypto delete sender is gone").context( - DestroyCryptoBdev { - name: self.name.clone(), - }, - )?; - } - } else { - warn!("Missing bdev for a shared device"); } + Ok(()) } - /// Return URI under which the nexus is shared or None if not shared. pub fn get_share_uri(&self) -> Option { match self.nexus_target { Some(NexusTarget::NbdDisk(ref disk)) => Some(disk.as_uri()), - Some(NexusTarget::NexusIscsiTarget(ref iscsi_target)) => { - Some(iscsi_target.as_uri()) - } - Some(NexusTarget::NexusNvmfTarget(ref nvmf_target)) => { - Some(nvmf_target.as_uri()) - } - _ => None, + Some(NexusTarget::NexusIscsiTarget) => self.share_uri(), + Some(NexusTarget::NexusNvmfTarget) => self.share_uri(), + None => None, } } } diff --git a/mayastor/src/core/bdev.rs b/mayastor/src/core/bdev.rs index 8433201e6..9b69b0002 100644 --- a/mayastor/src/core/bdev.rs +++ b/mayastor/src/core/bdev.rs @@ -1,6 +1,6 @@ use std::{ convert::TryFrom, - ffi::CStr, + ffi::{CStr, CString}, fmt::{Debug, Display, Formatter}, os::raw::c_void, ptr::NonNull, @@ -9,6 +9,7 @@ use std::{ use async_trait::async_trait; use futures::channel::oneshot; use nix::errno::Errno; +use snafu::ResultExt; use spdk_sys::{ spdk_bdev, @@ -35,8 +36,11 @@ use crate::{ share::{Protocol, Share}, uuid::Uuid, CoreError, - CoreError::{ShareIscsi, ShareNvmf}, Descriptor, + ShareIscsi, + ShareNvmf, + UnshareIscsi, + UnshareNvmf, }, ffihelper::{cb_arg, AsStr}, subsys::NvmfSubsystem, @@ -67,45 +71,30 @@ impl Share for Bdev { /// share the bdev over iscsi async fn share_iscsi(&self) -> Result { - iscsi::share(&self.name(), &self, Side::Nexus).map_err(|source| { - ShareIscsi { - source, - } - }) + iscsi::share(&self.name(), &self, Side::Nexus).context(ShareIscsi {}) } /// share the bdev over NVMe-OF TCP async fn share_nvmf(&self) -> Result { - let ss = NvmfSubsystem::try_from(self.clone()).map_err(|source| { - ShareNvmf { - source, - } - })?; - - let shared_as = ss.start().await.map_err(|source| ShareNvmf { - source, - })?; - - info!("shared {}", shared_as); - Ok(shared_as) + let subsystem = + NvmfSubsystem::try_from(self.clone()).context(ShareNvmf {})?; + subsystem.start().await.context(ShareNvmf {}) } /// unshare the bdev regardless of current active share async fn unshare(&self) -> Result { match self.shared() { Some(Protocol::Nvmf) => { - let ss = NvmfSubsystem::nqn_lookup(&self.name()).unwrap(); - ss.stop().await.map_err(|source| ShareNvmf { - source, - })?; - ss.destroy(); + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name()) + { + subsystem.stop().await.context(UnshareNvmf {})?; + subsystem.destroy(); + } } Some(Protocol::Iscsi) => { - iscsi::unshare(&self.name()).await.map_err(|source| { - ShareIscsi { - source, - } - })?; + iscsi::unshare(&self.name()) + .await + .context(UnshareIscsi {})?; } Some(Protocol::Off) | None => {} } @@ -229,24 +218,15 @@ impl Bdev { /// construct bdev from raw pointer pub fn from_ptr(bdev: *mut spdk_bdev) -> Option { - if let Some(ptr) = NonNull::new(bdev) { - Some(Bdev(ptr)) - } else { - None - } + NonNull::new(bdev).map(Bdev) } /// lookup a bdev by its name pub fn lookup_by_name(name: &str) -> Option { - let name = std::ffi::CString::new(name).unwrap(); - if let Some(bdev) = - NonNull::new(unsafe { spdk_bdev_get_by_name(name.as_ptr()) }) - { - Some(Bdev(bdev)) - } else { - None - } + let name = CString::new(name).unwrap(); + Self::from_ptr(unsafe { spdk_bdev_get_by_name(name.as_ptr()) }) } + /// returns the block_size of the underlying device pub fn block_len(&self) -> u32 { unsafe { spdk_bdev_get_block_size(self.0.as_ptr()) } @@ -307,9 +287,7 @@ impl Bdev { /// the UUID that is set for this bdev, all bdevs should have a UUID set pub fn uuid(&self) -> Uuid { - Uuid { - 0: unsafe { spdk_bdev_get_uuid(self.0.as_ptr()) }, - } + Uuid(unsafe { spdk_bdev_get_uuid(self.0.as_ptr()) }) } /// converts the UUID to a string @@ -330,7 +308,7 @@ impl Bdev { /// Set an alias on the bdev, this alias can be used to find the bdev later pub fn add_alias(&self, alias: &str) -> bool { - let alias = std::ffi::CString::new(alias).unwrap(); + let alias = CString::new(alias).unwrap(); let ret = unsafe { spdk_sys::spdk_bdev_alias_add(self.0.as_ptr(), alias.as_ptr()) }; @@ -424,13 +402,7 @@ impl Bdev { } /// returns the first bdev in the list pub fn bdev_first() -> Option { - let bdev = unsafe { spdk_bdev_first() }; - - if bdev.is_null() { - None - } else { - Some(Bdev::from(bdev)) - } + Self::from_ptr(unsafe { spdk_bdev_first() }) } } @@ -448,23 +420,20 @@ impl IntoIterator for Bdev { impl Iterator for BdevIter { type Item = Bdev; fn next(&mut self) -> Option { - if !self.0.is_null() { + if self.0.is_null() { + None + } else { let current = self.0; self.0 = unsafe { spdk_bdev_next(current) }; - Some(Bdev::from(current)) - } else { - None + Bdev::from_ptr(current) } } } impl From<*mut spdk_bdev> for Bdev { - fn from(b: *mut spdk_bdev) -> Self { - if let Some(b) = NonNull::new(b) { - Bdev(b) - } else { - panic!("nullptr dereference while accessing a bdev"); - } + fn from(bdev: *mut spdk_bdev) -> Self { + Self::from_ptr(bdev) + .expect("nullptr dereference while accessing a bdev") } } diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index 443b35962..0399eb34f 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -104,10 +104,18 @@ pub enum CoreError { ShareNvmf { source: NvmfError, }, + #[snafu(display("failed to unshare {}", source))] + UnshareNvmf { + source: NvmfError, + }, #[snafu(display("failed to share {}", source))] ShareIscsi { source: iscsi::Error, }, + #[snafu(display("failed to unshare {}", source))] + UnshareIscsi { + source: iscsi::Error, + }, #[snafu(display("the operation is invalid for this bdev: {}", source))] NotSupported { source: Errno, diff --git a/mayastor/src/pool.rs b/mayastor/src/pool.rs index f2e69caa4..3c99c65bc 100644 --- a/mayastor/src/pool.rs +++ b/mayastor/src/pool.rs @@ -92,19 +92,25 @@ impl Iterator for PoolsIter { type Item = Pool; fn next(&mut self) -> Option { - let next_ptr = match self.lvs_bdev_ptr { - None => unsafe { vbdev_lvol_store_first() }, - Some(ptr) => { - assert!(!ptr.is_null()); - unsafe { vbdev_lvol_store_next(ptr) } + match self.lvs_bdev_ptr { + Some(current) => { + if current.is_null() { + return None; + } + self.lvs_bdev_ptr = + Some(unsafe { vbdev_lvol_store_next(current) }); + Some(unsafe { Pool::from_ptr(current) }) + } + None => { + let current = unsafe { vbdev_lvol_store_first() }; + if current.is_null() { + self.lvs_bdev_ptr = Some(current); + return None; + } + self.lvs_bdev_ptr = + Some(unsafe { vbdev_lvol_store_next(current) }); + Some(unsafe { Pool::from_ptr(current) }) } - }; - self.lvs_bdev_ptr = Some(next_ptr); - - if next_ptr.is_null() { - None - } else { - Some(unsafe { Pool::from_ptr(next_ptr) }) } } } diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index af2bfe266..2f7e6a1a5 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -10,7 +10,7 @@ use snafu::{ResultExt, Snafu}; use spdk_sys::{spdk_lvol, vbdev_lvol_get_from_bdev}; -use crate::{core::Bdev, subsys::NvmfSubsystem, target}; +use crate::{core::Bdev, subsys::NvmfError, target}; /// These are high-level context errors one for each rpc method. #[derive(Debug, Snafu)] @@ -35,11 +35,11 @@ pub enum Error { #[snafu(display("Replica has been already shared"))] ReplicaShared {}, #[snafu(display("share nvmf"))] - ShareNvmf { source: target::nvmf::Error }, + ShareNvmf { source: NvmfError }, #[snafu(display("share iscsi"))] ShareIscsi { source: target::iscsi::Error }, #[snafu(display("unshare nvmf"))] - UnshareNvmf { source: target::nvmf::Error }, + UnshareNvmf { source: NvmfError }, #[snafu(display("unshare iscsi"))] UnshareIscsi { source: target::iscsi::Error }, #[snafu(display("Invalid share protocol {} in request", protocol))] @@ -49,29 +49,13 @@ pub enum Error { } impl From for tonic::Status { - fn from(e: Error) -> Self { - match e { - Error::ReplicaShared { - .. - } => Self::internal(e.to_string()), - Error::ShareNvmf { - .. - } => Self::internal(e.to_string()), - Error::ShareIscsi { - .. - } => Self::internal(e.to_string()), - Error::UnshareNvmf { - .. - } => Self::internal(e.to_string()), - Error::UnshareIscsi { - .. - } => Self::internal(e.to_string()), + fn from(error: Error) -> Self { + match error { Error::InvalidProtocol { .. - } => Self::invalid_argument(e.to_string()), - Error::ReplicaNotFound { - .. - } => Self::not_found(e.to_string()), + } => Self::invalid_argument(error.to_string()), + Error::ReplicaNotFound {} => Self::not_found(error.to_string()), + _ => Self::internal(error.to_string()), } } } @@ -98,19 +82,17 @@ pub enum ShareType { /// Detect share protocol (if any) for replica with given uuid and share ID /// string. fn detect_share(uuid: &str) -> Option<(ShareType, String)> { - // first try nvmf and then try iscsi - if let Some(s) = NvmfSubsystem::nqn_lookup(uuid) { - let mut ep = s.uri_endpoints().unwrap(); - return Some((ShareType::Nvmf, ep.pop().unwrap())); + // first try nvmf ... + if let Some(uri) = target::nvmf::get_uri(uuid) { + return Some((ShareType::Nvmf, uri)); } - match target::nvmf::get_uri(uuid) { - Some(uri) => Some((ShareType::Nvmf, uri)), - None => match target::iscsi::get_uri(target::Side::Replica, uuid) { - Some(uri) => Some((ShareType::Iscsi, uri)), - None => None, - }, + // and then iscsi ... + if let Some(uri) = target::iscsi::get_uri(target::Side::Replica, uuid) { + return Some((ShareType::Iscsi, uri)); } + + None } impl Replica { diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 8242fdece..27c7c317b 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -74,7 +74,7 @@ impl Iterator for NvmfSubsystemIterator { } else { let current = self.0; self.0 = unsafe { spdk_nvmf_subsystem_get_next(current) }; - Some(NvmfSubsystem(NonNull::new(current).unwrap())) + NonNull::new(current).map(NvmfSubsystem) } } } @@ -498,15 +498,12 @@ impl NvmfSubsystem { /// first namespace pub fn bdev(&self) -> Option { let ns = unsafe { spdk_nvmf_subsystem_get_first_ns(self.0.as_ptr()) }; + if ns.is_null() { return None; } - let b = unsafe { spdk_nvmf_ns_get_bdev(ns) }; - if b.is_null() { - return None; - } - Some(Bdev::from(b)) + Bdev::from_ptr(unsafe { spdk_nvmf_ns_get_bdev(ns) }) } fn listeners_to_vec(&self) -> Option> { diff --git a/mayastor/src/target/nvmf.rs b/mayastor/src/target/nvmf.rs index f7edb68bb..d2e7314c0 100644 --- a/mayastor/src/target/nvmf.rs +++ b/mayastor/src/target/nvmf.rs @@ -1,718 +1,30 @@ -#![allow(dead_code)] //! Methods for creating nvmf targets -//! -//! We create a default nvmf target when mayastor starts up. Then for each -//! replica which is to be exported, we create a subsystem in that default -//! target. Each subsystem has one namespace backed by the lvol. -use std::{ - cell::RefCell, - convert::TryFrom, - ffi::{c_void, CStr, CString}, - fmt, - os::raw::c_int, - ptr::{self, copy_nonoverlapping}, -}; - -use futures::channel::oneshot; -use nix::errno::Errno; -use once_cell::sync::Lazy; -use snafu::{ResultExt, Snafu}; -use spdk_sys::{ - nvmf_tgt_accept, - spdk_nvme_transport_id, - spdk_nvmf_poll_group, - spdk_nvmf_poll_group_add, - spdk_nvmf_poll_group_create, - spdk_nvmf_poll_group_destroy, - spdk_nvmf_qpair, - spdk_nvmf_qpair_disconnect, - spdk_nvmf_subsystem, - spdk_nvmf_subsystem_add_listener, - spdk_nvmf_subsystem_add_ns, - spdk_nvmf_subsystem_create, - spdk_nvmf_subsystem_destroy, - spdk_nvmf_subsystem_get_first, - spdk_nvmf_subsystem_get_next, - spdk_nvmf_subsystem_get_nqn, - spdk_nvmf_subsystem_set_allow_any_host, - spdk_nvmf_subsystem_set_mn, - spdk_nvmf_subsystem_set_sn, - spdk_nvmf_subsystem_start, - spdk_nvmf_subsystem_stop, - spdk_nvmf_target_opts, - spdk_nvmf_tgt, - spdk_nvmf_tgt_add_transport, - spdk_nvmf_tgt_create, - spdk_nvmf_tgt_destroy, - spdk_nvmf_tgt_find_subsystem, - spdk_nvmf_tgt_listen, - spdk_nvmf_tgt_stop_listen, - spdk_nvmf_transport_create, - spdk_nvmf_transport_opts, - spdk_nvmf_transport_opts_init, - spdk_poller, - spdk_poller_register, - spdk_poller_unregister, - NVMF_TGT_NAME_MAX_LENGTH, - SPDK_NVME_TRANSPORT_TCP, - SPDK_NVMF_ADRFAM_IPV4, - SPDK_NVMF_DISCOVERY_NQN, - SPDK_NVMF_SUBTYPE_DISCOVERY, - SPDK_NVMF_SUBTYPE_NVME, - SPDK_NVMF_TRADDR_MAX_LEN, - SPDK_NVMF_TRSVCID_MAX_LEN, -}; +use std::convert::TryFrom; use crate::{ - core::{Bdev, Reactors}, - ffihelper::{cb_arg, done_errno_cb, errno_result_from_i32, ErrnoResult}, - subsys::{Config, NvmfSubsystem}, + core::Bdev, + subsys::{NvmfError, NvmfSubsystem}, }; -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("Failed to create nvmf target {}:{}", addr, port))] - CreateTarget { addr: String, port: u16 }, - #[snafu(display( - "Failed to destroy nvmf target {}: {}", - endpoint, - source - ))] - DestroyTarget { source: Errno, endpoint: String }, - #[snafu(display("Invalid nvmf target address \"{}\"", addr))] - TargetAddress { addr: String }, - #[snafu(display("Failed to init opts for nvmf tcp transport"))] - InitOpts {}, - #[snafu(display("Failed to create nvmf tcp transport"))] - TcpTransport {}, - #[snafu(display("Failed to add nvmf tcp transport: {}", source))] - AddTransport { source: Errno }, - #[snafu(display("nvmf target listen failed: {}", source))] - ListenTarget { source: Errno }, - #[snafu(display("nvmf target failed to stop listening: {}", source))] - StopListenTarget { source: Errno }, - #[snafu(display("Failed to create a poll group"))] - CreatePollGroup {}, - #[snafu(display("Failed to create nvmf subsystem {}", nqn))] - CreateSubsystem { nqn: String }, - #[snafu(display("Failed to start nvmf subsystem {}: {}", nqn, source))] - StartSubsystem { source: Errno, nqn: String }, - #[snafu(display("Failed to stop nvmf subsystem {}: {}", nqn, source))] - StopSubsystem { source: Errno, nqn: String }, - #[snafu(display( - "Failed to set property {} of the subsystem {}", - prop, - nqn - ))] - SetSubsystem { prop: &'static str, nqn: String }, - #[snafu(display("Listen on nvmf subsystem {} failed", nqn))] - ListenSubsystem { nqn: String }, - #[snafu(display("Failed to add namespace to nvmf subsystem {}", nqn))] - AddNamespace { nqn: String }, -} - -type Result = std::result::Result; - -static TRANSPORT_NAME: Lazy = - Lazy::new(|| CString::new("TCP").unwrap()); - -thread_local! { - /// nvmf target provides a scope for creating transports, namespaces etc. - /// It is thread-local because TLS is safe to access in rust without any - /// synchronization overhead. It should be accessed only from - /// reactor_0 thread. - static NVMF_TGT: RefCell>> = RefCell::new(None); -} - -/// Given a bdev uuid return a NQN used to connect to the bdev from outside. -fn gen_nqn(id: &str) -> String { - format!("nqn.2019-05.io.openebs:{}", id) -} - -/// Wrapper around spdk nvme subsystem providing rust friendly api. -pub(crate) struct Subsystem { - inner: *mut spdk_nvmf_subsystem, - nqn: String, -} - -impl Subsystem { - /// Create a nvme subsystem identified by the id string (used for nqn - /// creation). - pub unsafe fn create( - inner: *mut spdk_nvmf_subsystem, - trid: *mut spdk_nvme_transport_id, - nqn: String, - ) -> Result { - let sn = CString::new("MayaData Inc.").unwrap(); - if spdk_nvmf_subsystem_set_sn(inner, sn.as_ptr()) != 0 { - return Err(Error::SetSubsystem { - prop: "serial number", - nqn, - }); - } - let mn = CString::new("MayaStor NVMF controller").unwrap(); - if spdk_nvmf_subsystem_set_mn(inner, mn.as_ptr()) != 0 { - return Err(Error::SetSubsystem { - prop: "model name", - nqn, - }); - } - spdk_nvmf_subsystem_set_allow_any_host(inner, true); - // TODO: callback async - - let fut = async move { - let (s, r) = oneshot::channel::>(); - spdk_nvmf_subsystem_add_listener( - inner, - trid, - Some(done_errno_cb), - cb_arg(s), - ); - - assert_eq!(r.await.is_ok(), true); - }; - - Reactors::current().send_future(fut); - - Ok(Self { - inner, - nqn, - }) - } - - /// Convert raw subsystem pointer to subsystem object. - pub unsafe fn from_ptr(inner: *mut spdk_nvmf_subsystem) -> Self { - let nqn = CStr::from_ptr(spdk_nvmf_subsystem_get_nqn(inner)) - .to_str() - .unwrap() - .to_string(); - Self { - inner, - nqn, - } - } - - /// Start the subsystem (it cannot be modified afterwards) - pub async fn start(&mut self) -> Result<()> { - let (sender, receiver) = oneshot::channel::>(); - unsafe { - spdk_nvmf_subsystem_start( - self.inner, - Some(Self::subsystem_start_stop_cb), - cb_arg(sender), - ); - } - - receiver - .await - .expect("Cancellation is not supported") - .context(StartSubsystem { - nqn: self.nqn.clone(), - })?; - - info!("Started nvmf subsystem {}", self.nqn); - Ok(()) - } - - /// Stop the subsystem (it cannot be modified afterwards) - pub async fn stop(&mut self) -> Result<()> { - let (sender, receiver) = oneshot::channel::>(); - unsafe { - spdk_nvmf_subsystem_stop( - self.inner, - Some(Self::subsystem_start_stop_cb), - cb_arg(sender), - ); - } - - receiver - .await - .expect("Cancellation is not supported") - .context(StopSubsystem { - nqn: self.nqn.clone(), - })?; - - info!("Stopped nvmf subsystem {}", self.nqn); - Ok(()) - } - - /// Add nvme subsystem to the target - pub fn add_namespace(&mut self, bdev: &Bdev) -> Result<()> { - let ns_id = unsafe { - spdk_nvmf_subsystem_add_ns( - self.inner, - bdev.as_ptr(), - ptr::null_mut(), - 0, - ptr::null_mut(), - ) - }; - if ns_id == 0 { - Err(Error::AddNamespace { - nqn: self.nqn.clone(), - }) - } else { - Ok(()) - } - } - - /// Get nvme subsystem's NQN - pub fn get_nqn(&mut self) -> String { - unsafe { - CStr::from_ptr(spdk_nvmf_subsystem_get_nqn(self.inner)) - .to_str() - .unwrap() - .to_string() - } - } - - /// Destroy this subsystem. - pub fn destroy(self) { - unsafe { spdk_nvmf_subsystem_destroy(self.inner) }; - } - - /// Callback for async nvmf subsystem start operation. - extern "C" fn subsystem_start_stop_cb( - _ss: *mut spdk_nvmf_subsystem, - sender_ptr: *mut c_void, - errno: i32, - ) { - let sender = unsafe { - Box::from_raw(sender_ptr as *mut oneshot::Sender>) - }; - sender - .send(errno_result_from_i32((), errno)) - .expect("Receiver is gone"); - } -} - -impl fmt::Display for Subsystem { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.nqn) - } -} - -/// Iterator over nvmf subsystems of a nvmf target -struct SubsystemIter { - ss_ptr: *mut spdk_nvmf_subsystem, -} - -impl SubsystemIter { - fn new(tgt_ptr: *mut spdk_nvmf_tgt) -> Self { - Self { - ss_ptr: unsafe { spdk_nvmf_subsystem_get_first(tgt_ptr) }, - } - } -} - -impl Iterator for SubsystemIter { - type Item = Subsystem; - - fn next(&mut self) -> Option { - let ss_ptr = self.ss_ptr; - if ss_ptr.is_null() { - return None; - } - unsafe { - self.ss_ptr = spdk_nvmf_subsystem_get_next(ss_ptr); - Some(Subsystem::from_ptr(ss_ptr)) - } - } -} - -/// Some options can be passed into each target that gets created. -/// -/// Currently, the options are limited to the name of the target to be created -/// and the max number of subsystems this target supports. We set this number -/// equal to the number of pods that can get scheduled on a node which is, by -/// default 110. -pub(crate) struct TargetOpts { - inner: spdk_nvmf_target_opts, -} - -impl TargetOpts { - fn new(name: &str, max_subsystems: u32) -> Self { - let mut opts = spdk_nvmf_target_opts::default(); - let cstr = CString::new(name).unwrap(); - unsafe { - std::ptr::copy_nonoverlapping( - cstr.as_ptr() as *const _ as *mut libc::c_void, - &mut opts.name[0] as *const _ as *mut libc::c_void, - NVMF_TGT_NAME_MAX_LENGTH as usize, - ); - } - - // same as max pods by default - opts.max_subsystems = max_subsystems; - - Self { - inner: opts, - } - } -} - -/// Wrapper around spdk nvmf target providing rust friendly api. -/// nvmf target binds listen addresses and nvmf subsystems with namespaces -/// together. -pub(crate) struct Target { - /// Pointer to SPDK implementation of nvmf target - inner: *mut spdk_nvmf_tgt, - /// Endpoint where this nvmf target listens for incoming connections. - trid: spdk_nvme_transport_id, - opts: spdk_nvmf_transport_opts, - acceptor_poll_rate: u64, - acceptor_poller: *mut spdk_poller, - /// TODO: One poll group per target does not scale - pg: *mut spdk_nvmf_poll_group, -} - -impl Target { - /// Create preconfigured nvmf target with tcp transport and default options. - pub fn create(addr: &str, port: u16) -> Result { - let cfg = Config::get(); - - let mut tgt_opts = TargetOpts::new( - &cfg.nvmf_tcp_tgt_conf.name, - cfg.nvmf_tcp_tgt_conf.max_namespaces, - ); - - let inner = unsafe { spdk_nvmf_tgt_create(&mut tgt_opts.inner) }; - if inner.is_null() { - return Err(Error::CreateTarget { - addr: addr.to_owned(), - port, - }); - } - - let mut trid: spdk_nvme_transport_id = Default::default(); - trid.trtype = SPDK_NVME_TRANSPORT_TCP; - trid.adrfam = SPDK_NVMF_ADRFAM_IPV4; - if addr.len() > SPDK_NVMF_TRADDR_MAX_LEN as usize { - return Err(Error::TargetAddress { - addr: addr.to_owned(), - }); - } - - let c_addr = CString::new(addr).unwrap(); - let port = format!("{}", port); - assert!(port.len() < SPDK_NVMF_TRSVCID_MAX_LEN as usize); - let c_port = CString::new(port.clone()).unwrap(); - - unsafe { - copy_nonoverlapping( - TRANSPORT_NAME.as_ptr(), - &mut trid.trstring[0], - trid.trstring.len(), - ); - copy_nonoverlapping( - c_addr.as_ptr(), - &mut trid.traddr[0], - addr.len() + 1, - ); - copy_nonoverlapping( - c_port.as_ptr(), - &mut trid.trsvcid[0], - port.len() + 1, - ); - } - info!("Created nvmf target at {}:{}", addr, port); - - Ok(Self { - inner, - trid, - opts: cfg.nvmf_tcp_tgt_conf.opts.into(), - acceptor_poll_rate: 1000, // 1ms - acceptor_poller: ptr::null_mut(), - pg: ptr::null_mut(), - }) - } - - /// Add tcp transport to nvmf target - pub async fn add_tcp_transport(&mut self) -> Result<()> { - let ok = unsafe { - spdk_nvmf_transport_opts_init( - TRANSPORT_NAME.as_ptr(), - &mut self.opts, - ) - }; - if !ok { - return Err(Error::InitOpts {}); - } - - let transport = unsafe { - spdk_nvmf_transport_create(TRANSPORT_NAME.as_ptr(), &mut self.opts) - }; - if transport.is_null() { - return Err(Error::TcpTransport {}); - } - - let (sender, receiver) = oneshot::channel::>(); - unsafe { - spdk_nvmf_tgt_add_transport( - self.inner, - transport, - Some(done_errno_cb), - cb_arg(sender), - ); - } - receiver - .await - .expect("Cancellation is not supported") - .context(AddTransport {})?; - info!("Added TCP nvmf transport {}", self); - Ok(()) - } - - /// Listen for incoming connections - pub fn listen(&mut self) -> Result<()> { - let rc = unsafe { - spdk_nvmf_tgt_listen(self.inner, &mut self.trid as *mut _) - }; - - if rc != 0 { - return Err(Error::ListenTarget { - source: Errno::from_i32(rc), - }); - } - debug!("nvmf target listening on {}", self); - Ok(()) - } - - /// A callback called by spdk when a new connection is accepted by nvmf - /// transport. Assign new qpair to a poll group. We have just one poll - /// group so we don't need fancy scheduling algorithm. - extern "C" fn new_qpair( - qpair: *mut spdk_nvmf_qpair, - target_ptr: *mut c_void, - ) { - unsafe { - let target = &*(target_ptr as *mut Self); - if spdk_nvmf_poll_group_add(target.pg, qpair) != 0 { - error!("Unable to add the qpair to a poll group"); - spdk_nvmf_qpair_disconnect(qpair, None, ptr::null_mut()); - } - } - } - - /// Called by SPDK poller to test if there is a new connection on - /// nvmf transport. - extern "C" fn acceptor_poll(target_ptr: *mut c_void) -> c_int { - unsafe { - let target = &mut *(target_ptr as *mut Self); - nvmf_tgt_accept(target.inner.cast()); - } - -1 - } - - /// Create poll group and assign accepted connections (new qpairs) to - /// the poll group. - pub fn accept(&mut self) -> Result<()> { - // create one poll group per target - self.pg = unsafe { spdk_nvmf_poll_group_create(self.inner) }; - if self.pg.is_null() { - return Err(Error::CreatePollGroup {}); - } - - self.acceptor_poller = unsafe { - spdk_poller_register( - Some(Self::acceptor_poll), - self as *mut _ as *mut c_void, - self.acceptor_poll_rate, - ) - }; - info!( - "nvmf target accepting new connections on {} and is ready to roll..{}", - self,'\u{1F483}' - ); - Ok(()) - } - - /// Add nvme subsystem to the target and return it. - pub fn create_subsystem(&mut self, id: &str) -> Result { - let nqn = gen_nqn(id); - let c_nqn = CString::new(nqn.clone()).unwrap(); - let ss = unsafe { - spdk_nvmf_subsystem_create( - self.inner, - c_nqn.as_ptr(), - SPDK_NVMF_SUBTYPE_NVME, - 1, // number of namespaces - ) - }; - if ss.is_null() { - return Err(Error::CreateSubsystem { - nqn, - }); - } - unsafe { Subsystem::create(ss, &mut self.trid as *mut _, nqn) } - } - - /// Add nvme discovery subsystem to the target and return it. - pub fn create_discovery_subsystem(&mut self) -> Result { - let c_nqn = unsafe { - CStr::from_ptr(SPDK_NVMF_DISCOVERY_NQN.as_ptr() as *const i8) - }; - let nqn = String::from(c_nqn.to_str().unwrap()); - - let ss = unsafe { - spdk_nvmf_subsystem_create( - self.inner, - c_nqn.as_ptr(), - SPDK_NVMF_SUBTYPE_DISCOVERY, - 0, // number of namespaces - ) - }; - if ss.is_null() { - return Err(Error::CreateSubsystem { - nqn, - }); - } - unsafe { Subsystem::create(ss, &mut self.trid as *mut _, nqn) } - } - - /// Lookup subsystem by NQN in given nvmf target. - pub fn lookup_subsystem(&mut self, id: &str) -> Option { - let nqn = gen_nqn(id); - let c_nqn = CString::new(nqn.clone()).unwrap(); - let inner = - unsafe { spdk_nvmf_tgt_find_subsystem(self.inner, c_nqn.as_ptr()) }; - if inner.is_null() { - None - } else { - Some(Subsystem { - inner, - nqn, - }) - } - } - - /// Stop nvmf target's subsystems and destroy it. - /// - /// NOTE: we cannot do this in drop because target destroy is asynchronous - /// operation. - pub async fn destroy(mut self) -> Result<()> { - debug!("Destroying nvmf target {}", self); - - // stop accepting new connections - let rc = unsafe { - spdk_nvmf_tgt_stop_listen(self.inner, &mut self.trid as *mut _) - }; - errno_result_from_i32((), rc).context(StopListenTarget {})?; - if !self.acceptor_poller.is_null() { - unsafe { spdk_poller_unregister(&mut self.acceptor_poller) }; - } - - //TODO: make async - let pg_copy = self.pg; - let fut = async move { - let (s, r) = oneshot::channel::>(); - unsafe { - spdk_nvmf_poll_group_destroy( - pg_copy, - Some(done_errno_cb), - cb_arg(s), - ) - }; - assert_eq!(r.await.is_ok(), true); - }; - - fut.await; - - // first we need to inactivate all subsystems of the target - for mut subsystem in SubsystemIter::new(self.inner) { - subsystem.stop().await?; - } - - let (sender, receiver) = oneshot::channel::>(); - unsafe { - spdk_nvmf_tgt_destroy( - self.inner, - Some(done_errno_cb), - cb_arg(sender), - ); - } - - receiver - .await - .expect("Cancellation is not supported") - .context(DestroyTarget { - endpoint: self.endpoint(), - })?; - - info!("nvmf target was destroyed"); - Ok(()) - } - - /// Return address:port of the target - pub fn endpoint(&self) -> String { - unsafe { - format!( - "{}:{}", - CStr::from_ptr(&self.trid.traddr[0]).to_str().unwrap(), - CStr::from_ptr(&self.trid.trsvcid[0]).to_str().unwrap(), - ) - } - } -} - -impl fmt::Display for Target { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.endpoint()) - } -} - -/// Create nvmf target which will be used for exporting the replicas. -pub async fn init(address: &str) -> Result<()> { - let config = Config::get(); - let replica_port = Config::get().nexus_opts.nvmf_replica_port; - let mut boxed_tgt = Box::new(Target::create(address, replica_port)?); - boxed_tgt.add_tcp_transport().await?; - boxed_tgt - .listen() - .unwrap_or_else(|_| panic!("failed to listen on {}", replica_port)); - boxed_tgt.accept()?; - - if config.nexus_opts.nvmf_discovery_enable { - boxed_tgt.create_discovery_subsystem()?.start().await?; - } - - NVMF_TGT.with(move |nvmf_tgt| { - if nvmf_tgt.borrow().is_some() { - panic!("Double initialization of nvmf"); - } - *nvmf_tgt.borrow_mut() = Some(boxed_tgt); - }); - Ok(()) -} - -/// Destroy nvmf target with all its subsystems. -pub async fn fini() -> Result<()> { - let tgt = NVMF_TGT.with(move |nvmf_tgt| { - nvmf_tgt - .borrow_mut() - .take() - .expect("Called nvmf fini without init") - }); - tgt.destroy().await -} - /// Export given bdev over nvmf target. -pub async fn share(uuid: &str, bdev: &Bdev) -> Result<()> { +pub async fn share(uuid: &str, bdev: &Bdev) -> Result<(), NvmfError> { if let Some(ss) = NvmfSubsystem::nqn_lookup(uuid) { assert_eq!(bdev.name(), ss.bdev().unwrap().name()); return Ok(()); }; - let ss = NvmfSubsystem::try_from(bdev.clone()).unwrap(); - ss.start().await.unwrap(); + + let ss = NvmfSubsystem::try_from(bdev.clone())?; + ss.start().await?; + Ok(()) } /// Un-export given bdev from nvmf target. -/// Unsharing replica which is not shared is not an error. -pub async fn unshare(uuid: &str) -> Result<()> { +/// Unsharing a replica which is not shared is not an error. +pub async fn unshare(uuid: &str) -> Result<(), NvmfError> { if let Some(ss) = NvmfSubsystem::nqn_lookup(uuid) { - ss.stop().await.unwrap(); + ss.stop().await?; ss.destroy(); } Ok(()) From 7873353edb3c4caabb4fffbe0fb02c50a8152bff Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Fri, 30 Oct 2020 16:11:00 +0000 Subject: [PATCH 32/92] nvmeadm: use the snafu crate to generate improved errors messages CAS-338 Replace use of the failure crate with snafu. Fixes remaining part of https://github.com/openebs/Mayastor/issues/468 Add more context to errors generate, for example on IO errors, add filename being accessed. Change how connection in progress is handled by returning a specific error so that the CSI driver can check explicitly, and use error kind for detection instead of the magic number 114. --- Cargo.lock | 2 +- csi/src/dev/nvmf.rs | 16 ++-- csi/src/error.rs | 8 ++ nix/pkgs/mayastor/default.nix | 2 +- nvmeadm/Cargo.toml | 2 +- nvmeadm/src/error.rs | 55 +++++++++++ nvmeadm/src/lib.rs | 49 +++------- nvmeadm/src/nvme_namespaces.rs | 8 +- nvmeadm/src/nvmf_discovery.rs | 157 ++++++++++++++++++++++---------- nvmeadm/src/nvmf_subsystem.rs | 81 ++++++++++------ nvmeadm/tests/discovery_test.rs | 7 +- 11 files changed, 257 insertions(+), 130 deletions(-) create mode 100644 nvmeadm/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 54a3fbd5c..ab3485466 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2052,13 +2052,13 @@ dependencies = [ "clap", "derive_builder", "enum-primitive-derive", - "failure", "glob", "ioctl-gen", "libc", "nix 0.14.1", "num-traits 0.1.43", "once_cell", + "snafu", "uuid", ] diff --git a/csi/src/dev/nvmf.rs b/csi/src/dev/nvmf.rs index 14157a3a3..27624b78d 100644 --- a/csi/src/dev/nvmf.rs +++ b/csi/src/dev/nvmf.rs @@ -73,20 +73,20 @@ impl TryFrom<&Url> for NvmfAttach { #[tonic::async_trait] impl Attach for NvmfAttach { async fn attach(&self) -> Result<(), DeviceError> { - if let Err(failure) = nvmeadm::nvmf_discovery::connect( + if let Err(error) = nvmeadm::nvmf_discovery::connect( &self.host, self.port as u32, &self.nqn, ) { - if let Ok(error) = failure.downcast::() { - if let Some(errno) = error.raw_os_error() { - if errno == 114 { - return Ok(()); - } + match (error) { + nvmeadm::error::NvmeError::ConnectInProgress => return Ok(()), + _ => { + return Err(DeviceError::from(format!( + "connect failed: {}", + error + ))) } - return Err(DeviceError::from(error)); } - return Err(DeviceError::new("connect failed")); } Ok(()) diff --git a/csi/src/error.rs b/csi/src/error.rs index 1ee0d1914..a6ec1655e 100644 --- a/csi/src/error.rs +++ b/csi/src/error.rs @@ -86,3 +86,11 @@ impl From for DeviceError { } } } + +impl From for DeviceError { + fn from(error: nvmeadm::error::NvmeError) -> DeviceError { + DeviceError { + message: format!("{}", error), + } + } +} diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index ea4c92033..47584d822 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -41,7 +41,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1al1gx0yn37fmp42hgyl9gn9cw91xd93aappdn84j2sa1nd7mgp0"; + cargoSha256 = "0flkzd6ygri3vibw7g7b9ibw8b1sbrir036il3i9jvapfq8kgxaf"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/nvmeadm/Cargo.toml b/nvmeadm/Cargo.toml index 4fbe0c1ce..f5e293053 100644 --- a/nvmeadm/Cargo.toml +++ b/nvmeadm/Cargo.toml @@ -8,11 +8,11 @@ edition = "2018" clap = "2.33.0" derive_builder = "0.7" enum-primitive-derive = "^0.1" -failure = "0.1" glob = "*" ioctl-gen = "0.1" libc = "0.2" nix = "0.14" num-traits = "^0.1" once_cell = "1.3" +snafu = "0.6" uuid = { version = "0.7", features = ["v4"] } diff --git a/nvmeadm/src/error.rs b/nvmeadm/src/error.rs new file mode 100644 index 000000000..111c1285e --- /dev/null +++ b/nvmeadm/src/error.rs @@ -0,0 +1,55 @@ +use snafu::Snafu; + +#[derive(Debug, Snafu)] +#[allow(missing_docs)] +#[snafu(visibility = "pub(crate)")] +pub enum NvmeError { + #[snafu(display("IO error:"))] + IoError { source: std::io::Error }, + #[snafu(display("Failed to parse {}: {}, {}", path, contents, error))] + ValueParseError { + path: String, + contents: String, + error: String, + }, + #[snafu(display("Failed to parse value"))] + ParseError {}, + #[snafu(display("File IO error: {}, {}", filename, source))] + FileIoError { + filename: String, + source: std::io::Error, + }, + #[snafu(display("nqn: {} not found", text))] + NqnNotFound { text: String }, + #[snafu(display("No nvmf subsystems found"))] + NoSubsystems, + #[snafu(display("Connect in progress"))] + ConnectInProgress, + #[snafu(display("NVMe connect failed: {}, {}", filename, source))] + ConnectError { + source: std::io::Error, + filename: String, + }, + #[snafu(display("IO error during NVMe discovery"))] + DiscoveryError { source: nix::Error }, + #[snafu(display("Controller with nqn: {} not found", text))] + CtlNotFound { text: String }, + #[snafu(display("Invalid path {}: {}", path, source))] + InvalidPath { + source: std::path::StripPrefixError, + path: String, + }, + #[snafu(display("NVMe subsystems error: {}, {}", path_prefix, source))] + SubSysError { + source: glob::PatternError, + path_prefix: String, + }, +} + +impl From for NvmeError { + fn from(source: std::io::Error) -> NvmeError { + NvmeError::IoError { + source, + } + } +} diff --git a/nvmeadm/src/lib.rs b/nvmeadm/src/lib.rs index 8942b0e3e..8cf70793f 100644 --- a/nvmeadm/src/lib.rs +++ b/nvmeadm/src/lib.rs @@ -20,8 +20,6 @@ #[macro_use] extern crate derive_builder; -#[macro_use] -extern crate failure; extern crate glob; #[macro_use] extern crate nix; @@ -30,60 +28,39 @@ extern crate ioctl_gen; #[macro_use] extern crate enum_primitive_derive; use crate::nvme_page::NvmeAdminCmd; -use std::{ - fs, - io::{self, ErrorKind}, - path::Path, - str::FromStr, -}; +use std::{fs, path::Path, str::FromStr}; +pub mod error; pub mod nvme_namespaces; mod nvme_page; pub mod nvmf_discovery; pub mod nvmf_subsystem; +use error::{IoError, NvmeError}; +use snafu::ResultExt; + /// the device entry in /dev for issuing ioctls to the kernels nvme driver const NVME_FABRICS_PATH: &str = "/dev/nvme-fabrics"; /// ioctl for passing any NVMe command to the kernel const NVME_ADMIN_CMD_IOCTL: u32 = iowr!(b'N', 0x41, std::mem::size_of::()); -#[derive(Debug, Fail)] -pub enum NvmeError { - #[fail(display = "IO error: {}", error)] - IoError { error: io::Error }, - #[fail(display = "nqn: {} not found", _0)] - NqnNotFound(String), - #[fail(display = "controller with nqn: {} not found", _0)] - CtlNotFound(String), - #[fail(display = "no nvmf subsystems found")] - NoSubsystems, -} -impl From for NvmeError { - fn from(err: io::Error) -> NvmeError { - NvmeError::IoError { - error: err, - } - } -} /// Read and parse value from a sysfs file -pub fn parse_value(dir: &Path, file: &str) -> Result +pub fn parse_value(dir: &Path, file: &str) -> Result where T: FromStr, + T::Err: ToString, { let path = dir.join(file); - let s = fs::read_to_string(&path)?; + let s = fs::read_to_string(&path).context(IoError {})?; let s = s.trim(); match s.parse() { Ok(v) => Ok(v), - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - format!( - "Failed to parse {}: {}", - path.as_path().to_str().unwrap(), - s - ), - )), + Err(e) => Err(NvmeError::ValueParseError { + path: path.as_path().to_str().unwrap().to_string(), + contents: s.to_string(), + error: e.to_string(), + }), } } diff --git a/nvmeadm/src/nvme_namespaces.rs b/nvmeadm/src/nvme_namespaces.rs index 606394b7d..13d371940 100644 --- a/nvmeadm/src/nvme_namespaces.rs +++ b/nvmeadm/src/nvme_namespaces.rs @@ -1,5 +1,5 @@ -use crate::parse_value; -use failure::Error; +use crate::{error, parse_value}; +use error::NvmeError; use glob::glob; use std::{os::unix::fs::FileTypeExt, path::Path}; @@ -33,7 +33,7 @@ impl NvmeDevice { /// Construct a new NVMe device from a given path. The [struct.NvmeDevice] /// will fill in all the details defined within the structure or return an /// error if the value for the structure could not be found. - fn new(p: &Path) -> Result { + fn new(p: &Path) -> Result { let name = p.file_name().unwrap().to_str().unwrap(); let devpath = format!("/sys/block/{}", name); let subsyspath = format!("/sys/block/{}/device", name); @@ -65,7 +65,7 @@ pub struct NvmeDeviceList { } impl Iterator for NvmeDeviceList { - type Item = Result; + type Item = Result; fn next(&mut self) -> Option { if let Some(e) = self.devices.pop() { return Some(NvmeDevice::new(Path::new(&e))); diff --git a/nvmeadm/src/nvmf_discovery.rs b/nvmeadm/src/nvmf_discovery.rs index 64b4b5e22..aeee8ee3d 100644 --- a/nvmeadm/src/nvmf_discovery.rs +++ b/nvmeadm/src/nvmf_discovery.rs @@ -1,30 +1,29 @@ -use crate::nvme_page::{ - NvmeAdminCmd, - NvmfDiscRspPageEntry, - NvmfDiscRspPageHdr, +use crate::{ + error, + nvme_page::{NvmeAdminCmd, NvmfDiscRspPageEntry, NvmfDiscRspPageHdr}, + nvmf_subsystem::{NvmeSubsystems, Subsystem}, + NVME_ADMIN_CMD_IOCTL, + NVME_FABRICS_PATH, }; -use std::fmt; - -use nix::libc::ioctl as nix_ioctl; - -use crate::nvmf_subsystem::{NvmeSubsystems, Subsystem}; /// when connecting to a NVMF target, we MAY send a NQN that we want to be /// referred as. const MACHINE_UUID_PATH: &str = "/sys/class/dmi/id/product_uuid"; -use crate::{NvmeError, NVME_ADMIN_CMD_IOCTL, NVME_FABRICS_PATH}; -use failure::Error; +use error::{ConnectError, DiscoveryError, FileIoError, NvmeError}; +use nix::libc::ioctl as nix_ioctl; +use num_traits::FromPrimitive; +use snafu::ResultExt; use std::{ + fmt, fs::OpenOptions, - io::{Read, Write}, + io::{ErrorKind, Read, Write}, + net::IpAddr, + os::unix::io::AsRawFd, path::Path, str::FromStr, }; -use num_traits::FromPrimitive; -use std::{net::IpAddr, os::unix::io::AsRawFd}; - static HOST_ID: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { let mut host_id = uuid::Uuid::new_v4().to_string(); @@ -136,18 +135,28 @@ impl Discovery { /// /// The pages are iteratable so you can filter exactly what you are looing /// for - pub fn discover(&mut self) -> Result<&Vec, Error> { + pub fn discover(&mut self) -> Result<&Vec, NvmeError> { self.arg_string = format!( "nqn=nqn.2014-08.org.nvmexpress.discovery,transport={},traddr={},trsvcid={}", self.transport, self.traddr, self.trsvcid ); let p = Path::new(NVME_FABRICS_PATH); - let mut file = OpenOptions::new().write(true).read(true).open(&p)?; + let mut file = + OpenOptions::new().write(true).read(true).open(&p).context( + FileIoError { + filename: NVME_FABRICS_PATH, + }, + )?; - file.write_all(self.arg_string.as_bytes())?; + file.write_all(self.arg_string.as_bytes()) + .context(FileIoError { + filename: NVME_FABRICS_PATH, + })?; let mut buf = String::new(); - file.read_to_string(&mut buf)?; + file.read_to_string(&mut buf).context(FileIoError { + filename: NVME_FABRICS_PATH, + })?; // get the ctl=value from the controller let v = buf.split(',').collect::>()[0] .split('=') @@ -160,10 +169,14 @@ impl Discovery { } // private function that retrieves number of records - fn get_discovery_response_page_entries(&self) -> Result { + fn get_discovery_response_page_entries(&self) -> Result { + let target = format!("/dev/nvme{}", self.ctl_id); let f = OpenOptions::new() .read(true) - .open(Path::new(&format!("/dev/nvme{}", self.ctl_id)))?; + .open(Path::new(&target)) + .context(FileIoError { + filename: target, + })?; // See NVM-Express1_3d 5.14 let hdr_len = std::mem::size_of::() as u32; @@ -174,7 +187,7 @@ impl Discovery { cmd.dptr_len = hdr_len; cmd.dptr = &h as *const _ as u64; - // bytes to dwords, devide by 4. Spec says 0's value + // bytes to dwords, divide by 4. Spec says 0's value let dword_count = (hdr_len >> 2) - 1; let numdl = dword_count & 0xFFFF; @@ -188,25 +201,29 @@ impl Discovery { f.as_raw_fd(), u64::from(NVME_ADMIN_CMD_IOCTL), &cmd - ))? + )) + .context(DiscoveryError)?; }; Ok(h.numrec) } // note we can only transfer max_io size. This means that if the number of - // controllers - // is larger we have to do {] while() we control the size for our - // controllers not others! This means in the future we will have to come + // controllers is larger we have to do {} while() we control the size for + // our controllers not others! This means in the future we will have to come // back to this // - // What really want is a stream of pages where we can filter process them + // What we really want is a stream of pages where we can filter process them // one by one. - fn get_discovery_response_pages(&mut self) -> Result { + fn get_discovery_response_pages(&mut self) -> Result { + let target = format!("/dev/nvme{}", self.ctl_id); let f = OpenOptions::new() .read(true) - .open(Path::new(&format!("/dev/nvme{}", self.ctl_id)))?; + .open(Path::new(&target)) + .context(FileIoError { + filename: target, + })?; let count = self.get_discovery_response_page_entries()?; @@ -234,7 +251,8 @@ impl Discovery { f.as_raw_fd(), u64::from(NVME_ADMIN_CMD_IOCTL), &cmd - ))? + )) + .context(DiscoveryError)? }; let hdr = unsafe { &mut *(buffer as *mut NvmfDiscRspPageHdr) }; @@ -288,24 +306,35 @@ impl Discovery { // we need to close the discovery controller when we are done and before we // connect - fn remove_controller(&self) -> Result<(), Error> { + fn remove_controller(&self) -> Result<(), NvmeError> { let target = format!("/sys/class/nvme/nvme{}/delete_controller", self.ctl_id); let path = Path::new(&target); - let mut file = OpenOptions::new().write(true).open(&path)?; - file.write_all(b"1")?; + let mut file = OpenOptions::new().write(true).open(&path).context( + FileIoError { + filename: &target, + }, + )?; + file.write_all(b"1").context(FileIoError { + filename: target, + })?; Ok(()) } /// Connect to all discovery log page entries found during the discovery /// phase - pub fn connect_all(&mut self) -> Result<(), Error> { + pub fn connect_all(&mut self) -> Result<(), NvmeError> { if self.entries.is_empty() { - return Err(Error::from(NvmeError::NoSubsystems)); + return Err(NvmeError::NoSubsystems {}); } let p = Path::new(NVME_FABRICS_PATH); - let mut file = OpenOptions::new().write(true).read(true).open(&p)?; + let mut file = + OpenOptions::new().write(true).read(true).open(&p).context( + ConnectError { + filename: NVME_FABRICS_PATH, + }, + )?; // we are ignoring errors here, and connect to all possible devices if let Err(connections) = self .entries @@ -352,18 +381,29 @@ impl Discovery { /// ``` /// - pub fn connect(&mut self, nqn: &str) -> Result { + pub fn connect(&mut self, nqn: &str) -> Result { let p = Path::new(NVME_FABRICS_PATH); if let Some(ss) = self.entries.iter_mut().find(|p| p.subnqn == nqn) { let mut file = - OpenOptions::new().write(true).read(true).open(&p)?; - file.write_all(ss.build_connect_args().unwrap().as_bytes())?; + OpenOptions::new().write(true).read(true).open(&p).context( + ConnectError { + filename: NVME_FABRICS_PATH, + }, + )?; + file.write_all(ss.build_connect_args().unwrap().as_bytes()) + .context(ConnectError { + filename: NVME_FABRICS_PATH, + })?; let mut buf = String::new(); - file.read_to_string(&mut buf)?; + file.read_to_string(&mut buf).context(ConnectError { + filename: NVME_FABRICS_PATH, + })?; Ok(buf) } else { - Err(NvmeError::NqnNotFound(nqn.into()).into()) + Err(NvmeError::NqnNotFound { + text: nqn.into(), + }) } } } @@ -387,7 +427,7 @@ impl DiscoveryBuilder { } impl DiscoveryLogEntry { - pub fn build_connect_args(&mut self) -> Result { + pub fn build_connect_args(&mut self) -> Result { let mut connect_args = String::new(); let host_id = HOST_ID.as_str(); @@ -422,7 +462,11 @@ impl DiscoveryLogEntry { /// ``` /// -pub fn connect(ip_addr: &str, port: u32, nqn: &str) -> Result { +pub fn connect( + ip_addr: &str, + port: u32, + nqn: &str, +) -> Result { let mut connect_args = String::new(); let host_id = HOST_ID.as_str(); @@ -438,10 +482,27 @@ pub fn connect(ip_addr: &str, port: u32, nqn: &str) -> Result { connect_args.push_str(&format!("trsvcid={}", port)); let p = Path::new(NVME_FABRICS_PATH); - let mut file = OpenOptions::new().write(true).read(true).open(&p)?; - file.write_all(connect_args.as_bytes())?; + let mut file = OpenOptions::new().write(true).read(true).open(&p).context( + ConnectError { + filename: NVME_FABRICS_PATH, + }, + )?; + if let Err(e) = file.write_all(connect_args.as_bytes()) { + match e.kind() { + ErrorKind::AlreadyExists => { + return Err(NvmeError::ConnectInProgress) + } + _ => { + return Err(NvmeError::IoError { + source: e, + }) + } + } + } let mut buf = String::new(); - file.read_to_string(&mut buf)?; + file.read_to_string(&mut buf).context(ConnectError { + filename: NVME_FABRICS_PATH, + })?; Ok(buf) } @@ -452,8 +513,8 @@ pub fn connect(ip_addr: &str, port: u32, nqn: &str) -> Result { /// let num_disconnects = nvmeadm::nvmf_discovery::disconnect("mynqn"); /// ``` -pub fn disconnect(nqn: &str) -> Result { - let subsys: Result, Error> = NvmeSubsystems::new()? +pub fn disconnect(nqn: &str) -> Result { + let subsys: Result, NvmeError> = NvmeSubsystems::new()? .filter_map(Result::ok) .filter(|e| e.nqn == nqn) .map(|e| { diff --git a/nvmeadm/src/nvmf_subsystem.rs b/nvmeadm/src/nvmf_subsystem.rs index 542bccf07..324646714 100644 --- a/nvmeadm/src/nvmf_subsystem.rs +++ b/nvmeadm/src/nvmf_subsystem.rs @@ -1,9 +1,9 @@ -use crate::{parse_value, NvmeError}; -use failure::Error; +use crate::{error, parse_value}; +use error::{FileIoError, InvalidPath, NvmeError, SubSysError}; use glob::glob; -use std::{fs::OpenOptions, io::Write, path::Path}; +use snafu::ResultExt; +use std::{fs::OpenOptions, io::Write, path::Path, str::FromStr}; -use std::str::FromStr; /// Subsystem struct shows us all the connect fabrics. This does not include /// NVMe devices that are connected by trtype=PCIe #[derive(Default, Clone, Debug)] @@ -29,9 +29,12 @@ pub struct Subsystem { impl Subsystem { /// scans the sysfs directory for attached subsystems skips any transport /// that does not contain a value that is being read in the implementation - pub fn new(source: &Path) -> Result { + pub fn new(source: &Path) -> Result { let name = source - .strip_prefix("/sys/devices/virtual/nvme-fabrics/ctl")? + .strip_prefix("/sys/devices/virtual/nvme-fabrics/ctl") + .context(InvalidPath { + path: format!("{:?}", source), + })? .display() .to_string(); let instance = u32::from_str(name.trim_start_matches("nvme")).unwrap(); @@ -43,9 +46,9 @@ impl Subsystem { let model = parse_value::(&source, "model")?; if serial == "" || model == "" { - return Err( - NvmeError::CtlNotFound("discovery controller".into()).into() - ); + return Err(NvmeError::CtlNotFound { + text: "discovery controller".into(), + }); } // if it does not have a serial and or model -- its a discovery @@ -64,30 +67,51 @@ impl Subsystem { }) } /// issue a rescan to the controller to find new namespaces - pub fn rescan(&self) -> Result<(), Error> { - let target = format!("/sys/class/nvme/{}/rescan_controller", self.name); - let path = Path::new(&target); + pub fn rescan(&self) -> Result<(), NvmeError> { + let filename = + format!("/sys/class/nvme/{}/rescan_controller", self.name); + let path = Path::new(&filename); - let mut file = OpenOptions::new().write(true).open(&path)?; - file.write_all(b"1")?; + let mut file = OpenOptions::new().write(true).open(&path).context( + FileIoError { + filename: &filename, + }, + )?; + file.write_all(b"1").context(FileIoError { + filename, + })?; Ok(()) } /// disconnects the transport dropping all namespaces - pub fn disconnect(&self) -> Result<(), Error> { - let target = format!("/sys/class/nvme/{}/delete_controller", self.name); - let path = Path::new(&target); + pub fn disconnect(&self) -> Result<(), NvmeError> { + let filename = + format!("/sys/class/nvme/{}/delete_controller", self.name); + let path = Path::new(&filename); - let mut file = OpenOptions::new().write(true).open(&path)?; - file.write_all(b"1")?; + let mut file = OpenOptions::new().write(true).open(&path).context( + FileIoError { + filename: &filename, + }, + )?; + file.write_all(b"1").context(FileIoError { + filename, + })?; Ok(()) } /// resets the nvme controller - pub fn reset(&self) -> Result<(), Error> { - let target = format!("/sys/class/nvme/{}/reset_controller", self.name); - let path = Path::new(&target); + pub fn reset(&self) -> Result<(), NvmeError> { + let filename = + format!("/sys/class/nvme/{}/reset_controller", self.name); + let path = Path::new(&filename); - let mut file = OpenOptions::new().write(true).open(&path)?; - file.write_all(b"1")?; + let mut file = OpenOptions::new().write(true).open(&path).context( + FileIoError { + filename: &filename, + }, + )?; + file.write_all(b"1").context(FileIoError { + filename, + })?; Ok(()) } } @@ -99,7 +123,7 @@ pub struct NvmeSubsystems { } impl Iterator for NvmeSubsystems { - type Item = Result; + type Item = Result; fn next(&mut self) -> Option { if let Some(e) = self.entries.pop() { return Some(Subsystem::new(Path::new(&e))); @@ -110,8 +134,11 @@ impl Iterator for NvmeSubsystems { impl NvmeSubsystems { /// Construct a new list of subsystems - pub fn new() -> Result { - let path_entries = glob("/sys/devices/virtual/nvme-fabrics/ctl/nvme*")?; + pub fn new() -> Result { + let path_prefix = "/sys/devices/virtual/nvme-fabrics/ctl/nvme*"; + let path_entries = glob(path_prefix).context(SubSysError { + path_prefix, + })?; let mut entries = Vec::new(); for entry in path_entries { if let Ok(path) = entry { diff --git a/nvmeadm/tests/discovery_test.rs b/nvmeadm/tests/discovery_test.rs index 2efd1b805..e626b2252 100644 --- a/nvmeadm/tests/discovery_test.rs +++ b/nvmeadm/tests/discovery_test.rs @@ -1,6 +1,5 @@ use nvmeadm::nvmf_discovery::{disconnect, DiscoveryBuilder}; -use failure::Error; use std::{ fs::File, io::prelude::*, @@ -92,7 +91,7 @@ pub struct NvmfTarget { } impl NvmfTarget { - pub fn new(config_file: &str, nvmf_port: &str) -> Result { + pub fn new(config_file: &str, nvmf_port: &str) -> Self { create_config_file(config_file, nvmf_port); let spdk_proc = Command::new("../target/debug/mayastor") .arg("-y") @@ -109,9 +108,9 @@ impl NvmfTarget { .build() .unwrap(); - Ok(NvmfTarget { + Self { spdk_proc, - }) + } } } From d57652320eb9d401ef0aae1e17719b94a157d8bc Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Tue, 27 Oct 2020 15:35:31 +0000 Subject: [PATCH 33/92] CAS-492 Serialise reconfigure Ensure only one reconfigure task is active per-nexus at a time. This is achieved by locking an async mutex at the start of Nexus::reconfigure(). --- Cargo.lock | 1 + mayastor/Cargo.toml | 1 + mayastor/src/bdev/nexus/nexus_bdev.rs | 7 ++++++- nix/pkgs/mayastor/default.nix | 26 +++++++++++++------------- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab3485466..b00e15cb9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1723,6 +1723,7 @@ name = "mayastor" version = "0.1.0" dependencies = [ "assert_matches", + "async-mutex", "async-task", "async-trait", "bincode", diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index e9c2ca402..e46127b7f 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -33,6 +33,7 @@ name = "casperf" path = "src/bin/casperf.rs" [dependencies] +async-mutex = "1.4.0" async-task = "4.0.2" async-trait = "0.1.36" bincode = "1.2" diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index edf4ae205..849891a6e 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -10,6 +10,7 @@ use std::{ os::raw::c_void, }; +use async_mutex::Mutex; use futures::channel::oneshot; use nix::errno::Errno; use serde::Serialize; @@ -313,6 +314,8 @@ pub struct Nexus { pub nexus_target: Option, /// the maximum number of times to attempt to send an IO pub(crate) max_io_attempts: i32, + /// mutex to serialise reconfigure + reconfigure_mutex: Mutex<()>, } unsafe impl core::marker::Sync for Nexus {} @@ -405,6 +408,7 @@ impl Nexus { size, nexus_target: None, max_io_attempts: cfg.err_store_opts.max_io_attempts, + reconfigure_mutex: Mutex::new(()), }); n.bdev.set_uuid(match uuid { @@ -439,8 +443,9 @@ impl Nexus { /// reconfigure the child event handler pub(crate) async fn reconfigure(&mut self, event: DREvent) { + let _var = self.reconfigure_mutex.lock().await; let (s, r) = oneshot::channel::(); - //assert!(self.dr_complete_notify.is_none()); + assert!(self.dr_complete_notify.is_none()); self.dr_complete_notify = Some(s); info!( diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 47584d822..c144a30fa 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -34,14 +34,12 @@ let lib.hasPrefix (toString (src + "/${allowedPrefix}")) path) allowedPrefixes) src; - version_drv = import ../../lib/version.nix { inherit lib stdenv git; }; version = builtins.readFile "${version_drv}"; - buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0flkzd6ygri3vibw7g7b9ibw8b1sbrir036il3i9jvapfq8kgxaf"; + cargoSha256 = "0smfd31mzxgnprd30536ww2csy9n0mksaac09p6b138pp7gk7lsj"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" @@ -84,16 +82,18 @@ let }; in { - release = rustPlatform.buildRustPackage (buildProps // { - buildType = "release"; - buildInputs = buildProps.buildInputs ++ [ libspdk ]; - SPDK_PATH = "${libspdk}"; - }); - debug = rustPlatform.buildRustPackage (buildProps // { - buildType = "debug"; - buildInputs = buildProps.buildInputs ++ [ libspdk-dev ]; - SPDK_PATH = "${libspdk-dev}"; - }); + release = rustPlatform.buildRustPackage + (buildProps // { + buildType = "release"; + buildInputs = buildProps.buildInputs ++ [ libspdk ]; + SPDK_PATH = "${libspdk}"; + }); + debug = rustPlatform.buildRustPackage + (buildProps // { + buildType = "debug"; + buildInputs = buildProps.buildInputs ++ [ libspdk-dev ]; + SPDK_PATH = "${libspdk-dev}"; + }); # this is for an image that does not do a build of mayastor adhoc = stdenv.mkDerivation { name = "mayastor-adhoc"; From 9c3514051a3254ef9f77a2ece3ab08049ea462fa Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 5 Nov 2020 12:33:15 +0000 Subject: [PATCH 34/92] Unaffinitize rebuild test threads Unaffinitize threads created by the rebuild test and don't auto share the nexus if not needed in hopes of reducing the number of CI failures. --- mayastor/tests/common/mod.rs | 4 ++-- mayastor/tests/nexus_rebuild.rs | 31 +++++++++++++++++-------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index b1bb6e9b8..8ce73a109 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -432,7 +432,7 @@ pub fn wait_for_rebuild( let mut curr_state = job.state(); let ch = job.notify_chan.1.clone(); let cname = name.clone(); - let t = std::thread::spawn(move || { + let t = Mthread::spawn_unaffinitized(move || { let now = std::time::Instant::now(); let mut error = Ok(()); while curr_state != state && error.is_ok() { @@ -483,7 +483,7 @@ pub fn fio_verify_size(device: &str, size: u64) -> i32 { pub fn reactor_run_millis(milliseconds: u64) { let (s, r) = unbounded::<()>(); - std::thread::spawn(move || { + Mthread::spawn_unaffinitized(move || { std::thread::sleep(Duration::from_millis(milliseconds)); s.send(()) }); diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index ddf1a2e6b..a3f5f880c 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -239,8 +239,7 @@ fn rebuild_with_load() { Reactor::block_on(async { nexus_create(NEXUS_SIZE, 1, false).await; let nexus = nexus_lookup(nexus_name()).unwrap(); - let nexus_device = - common::device_path_from_uri(nexus.get_share_uri().unwrap()); + let nexus_device = nexus_share().await; let (s, r1) = unbounded::(); Mthread::spawn_unaffinitized(move || { @@ -276,19 +275,11 @@ async fn nexus_create(size: u64, children: u64, fill_random: bool) { .await .unwrap(); - let nexus = nexus_lookup(nexus_name()).unwrap(); - let device = common::device_path_from_uri( - nexus - .share(ShareProtocolNexus::NexusNbd, None) - .await - .unwrap(), - ); - reactor_poll!(100); - if fill_random { + let device = nexus_share().await; let nexus_device = device.clone(); let (s, r) = unbounded::(); - std::thread::spawn(move || { + Mthread::spawn_unaffinitized(move || { s.send(common::dd_urandom_blkdev(&nexus_device)) }); let dd_result: i32; @@ -296,13 +287,25 @@ async fn nexus_create(size: u64, children: u64, fill_random: bool) { assert_eq!(dd_result, 0, "Failed to fill nexus with random data"); let (s, r) = unbounded::(); - std::thread::spawn(move || { + Mthread::spawn_unaffinitized(move || { s.send(common::compare_nexus_device(&device, &get_disk(0), true)) }); reactor_poll!(r); } } +async fn nexus_share() -> String { + let nexus = nexus_lookup(nexus_name()).unwrap(); + let device = common::device_path_from_uri( + nexus + .share(ShareProtocolNexus::NexusNbd, None) + .await + .unwrap(), + ); + reactor_poll!(200); + device +} + async fn nexus_add_child(new_child: u64, wait: bool) { let nexus = nexus_lookup(nexus_name()).unwrap(); @@ -334,7 +337,7 @@ async fn nexus_test_child(child: u64) { let nexus = nexus_lookup(nexus_name()).unwrap(); let (s, r) = unbounded::(); - std::thread::spawn(move || { + Mthread::spawn_unaffinitized(move || { s.send(common::compare_devices( &get_disk(0), &get_disk(child), From 5b8741d257930936b75d218667f9b9435b1e99a4 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 5 Nov 2020 12:33:58 +0000 Subject: [PATCH 35/92] Update Jenkins nix configuration Add a daily nix garbage collection timer whenever less than 10G are free --- doc/jenkins.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/doc/jenkins.md b/doc/jenkins.md index d8cc4f60f..a5bbb1f69 100644 --- a/doc/jenkins.md +++ b/doc/jenkins.md @@ -150,6 +150,14 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). boot.kernelModules = [ "nbd" "xfs" "nvme_tcp" "kvm_intel" ]; boot.extraModprobeConfig = "options kvm_intel nested=1"; + nix.gc = { + automatic = true; + dates = "daily"; + }; + nix.extraOptions = '' + min-free = ${toString (10 * 1024 * 1024 * 1024)} + ''; + virtualisation.docker.enable = true; networking.firewall.enable = false; @@ -181,7 +189,7 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). users.users.jenkins.openssh.authorizedKeys.keys = [ "ssh-rsa key used by Jenkins master ..." ]; environment.systemPackages = with pkgs; [ - wget curl vim git jdk openiscsi nvme-cli + wget curl vim git jdk openiscsi nvme-cli lsof ]; } ``` From 4ab248734dbf94344b152386b7c2b6013df3a860 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Tue, 27 Oct 2020 15:19:36 +0000 Subject: [PATCH 36/92] Add FindVolume to node plugin service The FindVolume call can be use to 1. Identify which mayastor CSI node plugin has mounted a volume 2. The type of mount: filesystem or raw block This call in now in addition to FreezeFS and UnfreezeFS, so rename csi/src/freezefs.rs -> csi/src/nodeplugin_svc.rs refactor some symbols to more generic names. The mount crate does not show mounts for block devices, use findmnt for that purpose. Split out the utility code using findmnt to a separate mod, and refactor it to support searching for raw block device mounts. FreezeFS and UnfreezeFS calls now fail with FailedPrecondition for raw block volumes instead of NotFound --- csi/proto/mayastornodeplugin.proto | 17 +++ csi/src/block_vol.rs | 128 +---------------- csi/src/findmnt.rs | 215 +++++++++++++++++++++++++++++ csi/src/freezefs.rs | 75 ---------- csi/src/nodeplugin_grpc.rs | 75 +++++++--- csi/src/nodeplugin_svc.rs | 164 ++++++++++++++++++++++ csi/src/server.rs | 24 +--- 7 files changed, 459 insertions(+), 239 deletions(-) create mode 100644 csi/src/findmnt.rs delete mode 100644 csi/src/freezefs.rs create mode 100644 csi/src/nodeplugin_svc.rs diff --git a/csi/proto/mayastornodeplugin.proto b/csi/proto/mayastornodeplugin.proto index b02ae3676..0e1977496 100644 --- a/csi/proto/mayastornodeplugin.proto +++ b/csi/proto/mayastornodeplugin.proto @@ -22,8 +22,15 @@ service MayastorNodePlugin { // Unfreeze the file system identified by the volume ID, // no check is made if the file system had been previously frozen. rpc UnfreezeFS (UnfreezeFSRequest) returns (UnfreezeFSReply) {} + // Find the volume identified by the volume ID, and return the mount type: + // raw block or filesystem + rpc FindVolume (FindVolumeRequest) returns (FindVolumeReply) {} } +enum VolumeType { + VOLUME_TYPE_FILESYSTEM = 0; // File system mount + VOLUME_TYPE_RAWBLOCK = 1; // Raw block device mount +} // The request message containing ID of the volume to be frozen message FreezeFSRequest { string volume_id = 1; @@ -42,3 +49,13 @@ message UnfreezeFSRequest { // The response message for the unfreeze request. message UnfreezeFSReply { } + +// Message for request on a volume +message FindVolumeRequest { + string volume_id = 1; +} + +// Message for response to a request for a volume +message FindVolumeReply { + VolumeType volume_type = 1; +} diff --git a/csi/src/block_vol.rs b/csi/src/block_vol.rs index 2902d2f97..227708c90 100644 --- a/csi/src/block_vol.rs +++ b/csi/src/block_vol.rs @@ -1,7 +1,6 @@ //! Functions for CSI publish and unpublish block mode volumes. -use serde_json::Value; -use std::{path::Path, process::Command}; +use std::path::Path; use tonic::{Code, Status}; @@ -13,7 +12,7 @@ macro_rules! failure { use crate::{ csi::*, dev::Device, - error::DeviceError, + findmnt, mount::{self}, }; @@ -61,10 +60,10 @@ pub async fn publish_block_volume( //target exists and is a special file // Idempotency, if we have done this already just return success. - match findmnt_device(target_path) { + match findmnt::get_devicepath(target_path) { Ok(findmnt_dev) => { if let Some(fm_devpath) = findmnt_dev { - if equals_findmnt_device(&fm_devpath, &device_path) { + if fm_devpath == device_path { debug!( "{}({}) is already mounted onto {}", fm_devpath, device_path, target_path @@ -153,122 +152,3 @@ pub fn unpublish_block_volume( info!("Volume {} unpublished from {}", volume_id, target_path); Ok(()) } - -/// Keys of interest we expect to find in the JSON output generated -/// by findmnt. -const TARGET_KEY: &str = "target"; -const SOURCE_KEY: &str = "source"; - -/// This function recurses over the de-serialised JSON returned by findmnt -/// and searches for a target (file or directory) and returns the associated -/// device if found. -/// The assumptions made on the structure are: -/// 1. An object has keys named "target" and "source" for a mount point. -/// 2. An object may contain nested arrays of objects. -/// -/// The search is deliberately generic (and hence slower) in an attempt to -/// be more robust to future changes in findmnt. -fn find_findmnt_target_device( - json_val: &serde_json::value::Value, - mountpoint: &str, -) -> Result, DeviceError> { - if let Some(json_array) = json_val.as_array() { - for val in json_array { - if let Some(found) = find_findmnt_target_device(&val, mountpoint)? { - return Ok(Some(found)); - } - } - } - if let Some(json_map) = json_val.as_object() { - if let Some(target) = json_map.get(TARGET_KEY) { - if let Some(source) = json_map.get(SOURCE_KEY) { - if mountpoint == target { - if let Some(source_str) = source.as_str() { - return Ok(Some(source_str.to_string())); - } else { - return Err(DeviceError { - message: "findmnt empty source field".to_string(), - }); - } - } - } else { - return Err(DeviceError { - message: "findmnt missing source field".to_string(), - }); - } - } - // If the object has arrays, then the assumption is that they are arrays - // of objects. - for (_, value) in json_map { - if value.is_array() { - if let Some(found) = - find_findmnt_target_device(value, mountpoint)? - { - return Ok(Some(found)); - } - } - } - } - Ok(None) -} - -/// findmnt command and arguments. -const FINDMNT: &str = "findmnt"; -const FINDMNT_ARGS: [&str; 1] = ["-J"]; - -/// Use the Linux utility findmnt to find the name of the device mounted at a -/// directory or block special file, if any. -fn findmnt_device(mountpoint: &str) -> Result, DeviceError> { - let output = Command::new(FINDMNT).args(&FINDMNT_ARGS).output()?; - if output.status.success() { - let json_str = String::from_utf8(output.stdout)?; - let json: Value = serde_json::from_str(&json_str)?; - if let Some(device) = find_findmnt_target_device(&json, mountpoint)? { - return Ok(Some(device)); - } - } - Ok(None) -} - -/// Unfortunately findmnt may return device names in a format different -/// to that returned by udev. -/// findmnt_device_path is the source field returned from findmnt and -/// can be different for the same source on different systems, -/// for example -/// dev[/nvme0n1], udev[/nvme0n1], tmpfs[/nvme0n1], devtmpfs[/nvme0n1] -/// device_path is the device path returned from udev. -fn equals_findmnt_device(findmnt_device_path: &str, device_path: &str) -> bool { - lazy_static! { - static ref RE_FINDMNT: regex::Regex = regex::Regex::new( - r"(?x).*\[(?P/.*)\] - ", - ) - .unwrap(); - } - - lazy_static! { - static ref RE_DEVPATH: regex::Regex = - regex::Regex::new(r"(?x)/dev(?P/.*)",).unwrap(); - } - - if device_path == findmnt_device_path { - return true; - } else { - // compare the "core" parts of the paths returned by findmnt - // udev - match RE_DEVPATH.captures(device_path) { - Some(dcaps) => match RE_FINDMNT.captures(findmnt_device_path) { - Some(fcaps) => { - return dcaps["device"] == fcaps["device"]; - } - _ => { - warn!("unexpected path from findmnt!"); - } - }, - _ => { - warn!("unexpected device path format!"); - } - } - } - false -} diff --git a/csi/src/findmnt.rs b/csi/src/findmnt.rs new file mode 100644 index 000000000..32e130462 --- /dev/null +++ b/csi/src/findmnt.rs @@ -0,0 +1,215 @@ +use crate::error::DeviceError; +use serde_json::Value; +use std::{collections::HashMap, process::Command, string::String, vec::Vec}; + +// Keys of interest we expect to find in the JSON output generated +/// by findmnt. +const TARGET_KEY: &str = "target"; +const SOURCE_KEY: &str = "source"; +const FSTYPE_KEY: &str = "fstype"; + +#[derive(Debug)] +pub struct DeviceMount { + pub mount_path: String, + pub fstype: String, +} + +#[derive(Debug)] +struct Filter<'a> { + key: &'a str, + value: &'a str, +} + +/// Convert a json value of a key-value pair, to a string, +/// adjusted if required, on the key. +/// +/// The source field returned from findmnt can be different for +/// the same source on different systems, for example +/// dev[/nvme0n1], udev[/nvme0n1], tmpfs[/nvme0n1], devtmpfs[/nvme0n1] +/// this function converts those values to the expected /dev/nvme0n1 +fn key_adjusted_value(key: &str, value: &Value) -> String { + lazy_static! { + static ref RE_UDEVPATH: regex::Regex = regex::Regex::new( + r"(?x).*\[(?P/.*)\] + ", + ) + .unwrap(); + } + + // Do NOT do + // let strvalue = value.to_string(); + // that will return a string delimited with quote characters. + let strvalue: String = match value { + Value::String(str) => str.to_string(), + _ => value.to_string(), + }; + if key == SOURCE_KEY { + if let Some(caps) = RE_UDEVPATH.captures(&strvalue) { + return format!("/dev{}", &caps["device"]); + }; + } + strvalue +} + +const KEYS: &[&str] = &[TARGET_KEY, SOURCE_KEY, FSTYPE_KEY]; + +/// Convert the json map entry to a hashmap of strings +/// with source, target and fstype key-value pairs only. +/// The source field returned from findmnt is converted +/// to the /dev/xxx form if required. +fn jsonmap_to_hashmap( + json_map: &serde_json::Map, +) -> HashMap { + let mut hmap: HashMap = HashMap::new(); + for (key, value) in json_map { + if KEYS.contains(&key.as_str()) { + hmap.insert(key.clone(), key_adjusted_value(key, value)); + } + } + hmap +} + +/// This function recurses over the de-serialised JSON returned by findmnt, +/// finding entries which have key-value pair's matching the +/// filter key-value pair, and populates a vector with those +/// entries as hashmaps of strings. +/// +/// For Mayastor usage the assumptions made on the structure are: +/// 1. An object has keys named "target" and "source" for a mount point. +/// 2. An object may contain nested arrays of objects. +/// +/// The search is deliberately generic (and hence slower) in an attempt to +/// be more robust to future changes in findmnt. +fn filter_findmnt( + json_val: &serde_json::value::Value, + filter: &Filter, + results: &mut Vec>, +) { + match json_val { + Value::Array(json_array) => { + for jsonvalue in json_array { + filter_findmnt(&jsonvalue, filter, results); + } + } + Value::Object(json_map) => { + if let Some(value) = json_map.get(filter.key) { + if filter.value == value + || filter.value == key_adjusted_value(filter.key, value) + { + results.push(jsonmap_to_hashmap(json_map)); + } + } + // If the object has arrays, then the assumption is that they are + // arrays of objects. + for (_, jsonvalue) in json_map { + if jsonvalue.is_array() { + filter_findmnt(jsonvalue, filter, results); + } + } + } + jvalue => { + warn!("Unexpected json type {}", jvalue); + } + }; +} + +/// findmnt executable name. +const FIND_MNT: &str = "findmnt"; +/// findmnt arguments, we only want source, target and filesystem type fields. +const FIND_MNT_ARGS: [&str; 3] = ["-J", "-o", "SOURCE,TARGET,FSTYPE"]; + +/// Execute the Linux utility findmnt, collect the json output, +/// invoke the filter function and return the filtered results. +fn findmnt( + params: Filter, +) -> Result>, DeviceError> { + let output = Command::new(FIND_MNT).args(&FIND_MNT_ARGS).output()?; + if output.status.success() { + let json_str = String::from_utf8(output.stdout)?; + let json: Value = serde_json::from_str(&json_str)?; + let mut results: Vec> = Vec::new(); + filter_findmnt(&json, ¶ms, &mut results); + Ok(results) + } else { + Err(DeviceError { + message: String::from_utf8(output.stderr)?, + }) + } +} + +/// Use the Linux utility findmnt to find the name of the device mounted at a +/// directory or block special file, if any. +/// mount_path is the path a device is mounted on. +pub(crate) fn get_devicepath( + mount_path: &str, +) -> Result, DeviceError> { + let tgt_filter = Filter { + key: TARGET_KEY, + value: mount_path, + }; + let sources = findmnt(tgt_filter)?; + { + match sources.len() { + 0 => Ok(None), + 1 => { + if let Some(devicepath) = sources[0].get(SOURCE_KEY) { + Ok(Some(devicepath.to_string())) + } else { + Err(DeviceError { + message: "missing source field".to_string(), + }) + } + } + _ => { + // should be impossible ... + warn!( + "multiple sources mounted on target {:?}->{}", + sources, mount_path + ); + Err(DeviceError { + message: format!( + "multiple devices mounted at {}", + mount_path + ), + }) + } + } + } +} + +/// Use the Linux utility findmnt to find the mount paths for a block device, +/// if any. +/// device_path is the path to the device for example "/dev/sda1" +pub(crate) fn get_mountpaths( + device_path: &str, +) -> Result, DeviceError> { + let dev_filter = Filter { + key: SOURCE_KEY, + value: device_path, + }; + match findmnt(dev_filter) { + Ok(results) => { + let mut mountpaths: Vec = Vec::new(); + for entry in results { + if let Some(mountpath) = entry.get(TARGET_KEY) { + if let Some(fstype) = entry.get(FSTYPE_KEY) { + mountpaths.push(DeviceMount { + mount_path: mountpath.to_string(), + fstype: fstype.to_string(), + }) + } else { + error!("Missing fstype for {}", mountpath); + mountpaths.push(DeviceMount { + mount_path: mountpath.to_string(), + fstype: "unspecified".to_string(), + }) + } + } else { + warn!("missing target field {:?}", entry); + } + } + Ok(mountpaths) + } + Err(e) => Err(e), + } +} diff --git a/csi/src/freezefs.rs b/csi/src/freezefs.rs deleted file mode 100644 index 0624edaf6..000000000 --- a/csi/src/freezefs.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! The files system freeze support using linux utility fsfreeze -use crate::{ - dev::{Device, DeviceError}, - mount, -}; -use snafu::{ResultExt, Snafu}; -use tokio::process::Command; -use uuid::Uuid; - -#[derive(Debug, Snafu)] -#[snafu(visibility = "pub(crate)")] -pub enum FreezeFsError { - #[snafu(display("Cannot find volume: volume ID: {}", volid))] - VolumeNotFound { volid: String }, - #[snafu(display("Invalid volume ID: {}, {}", volid, source))] - InvalidVolumeId { - source: uuid::parser::ParseError, - volid: String, - }, - #[snafu(display("fsfreeze failed: volume ID: {}, {}", volid, error))] - FsfreezeFailed { volid: String, error: String }, - #[snafu(display("Internal failure: volume ID:{}, {}", volid, source))] - InternalFailure { source: DeviceError, volid: String }, - #[snafu(display("IO error: volume ID: {}, {}", volid, source))] - IOError { - source: std::io::Error, - volid: String, - }, -} - -const FSFREEZE: &str = "fsfreeze"; - -async fn fsfreeze( - volume_id: &str, - freeze_op: &str, -) -> Result<(), FreezeFsError> { - let uuid = Uuid::parse_str(volume_id).context(InvalidVolumeId { - volid: volume_id.to_string(), - })?; - - if let Some(device) = - Device::lookup(&uuid).await.context(InternalFailure { - volid: volume_id.to_string(), - })? - { - let device_path = device.devname(); - if let Some(mnt) = mount::find_mount(Some(&device_path), None) { - let args = [freeze_op, &mnt.dest]; - let output = - Command::new(FSFREEZE).args(&args).output().await.context( - IOError { - volid: volume_id.to_string(), - }, - )?; - if output.status.success() { - return Ok(()); - } else { - return Err(FreezeFsError::FsfreezeFailed { - volid: volume_id.to_string(), - error: String::from_utf8(output.stderr).unwrap(), - }); - } - } - } - Err(FreezeFsError::VolumeNotFound { - volid: volume_id.to_string(), - }) -} -pub async fn freeze_volume(volume_id: &str) -> Result<(), FreezeFsError> { - fsfreeze(volume_id, "--freeze").await -} - -pub async fn unfreeze_volume(volume_id: &str) -> Result<(), FreezeFsError> { - fsfreeze(volume_id, "--unfreeze").await -} diff --git a/csi/src/nodeplugin_grpc.rs b/csi/src/nodeplugin_grpc.rs index 4df9433e0..f5292ac55 100644 --- a/csi/src/nodeplugin_grpc.rs +++ b/csi/src/nodeplugin_grpc.rs @@ -3,9 +3,28 @@ //! node as a Mayastor CSI node plugin, but it is not possible to do so within //! the CSI framework. This service must be deployed on all nodes the //! Mayastor CSI node plugin is deployed. -use crate::freezefs; -use freezefs::{freeze_volume, unfreeze_volume, FreezeFsError}; -use mayastor_node_plugin::*; +use crate::nodeplugin_svc; +use mayastor_node_plugin::{ + mayastor_node_plugin_server::{ + MayastorNodePlugin, + MayastorNodePluginServer, + }, + FindVolumeReply, + FindVolumeRequest, + FreezeFsReply, + FreezeFsRequest, + UnfreezeFsReply, + UnfreezeFsRequest, + VolumeType, +}; + +use nodeplugin_svc::{ + find_volume, + freeze_volume, + unfreeze_volume, + ServiceError, + TypeOfMount, +}; use tonic::{transport::Server, Code, Request, Response, Status}; pub mod mayastor_node_plugin { @@ -15,30 +34,36 @@ pub mod mayastor_node_plugin { #[derive(Debug, Default)] pub struct MayastorNodePluginSvc {} -impl From for Status { - fn from(err: FreezeFsError) -> Self { +impl From for Status { + fn from(err: ServiceError) -> Self { match err { - FreezeFsError::VolumeNotFound { + ServiceError::VolumeNotFound { .. } => Status::new(Code::NotFound, err.to_string()), - FreezeFsError::FsfreezeFailed { + ServiceError::FsfreezeFailed { .. } => Status::new(Code::Internal, err.to_string()), - FreezeFsError::InvalidVolumeId { + ServiceError::InvalidVolumeId { .. } => Status::new(Code::InvalidArgument, err.to_string()), - FreezeFsError::InternalFailure { + ServiceError::InternalFailure { .. } => Status::new(Code::Internal, err.to_string()), - FreezeFsError::IOError { + ServiceError::IOError { + .. + } => Status::new(Code::Unknown, err.to_string()), + ServiceError::InconsistentMountFs { .. } => Status::new(Code::Unknown, err.to_string()), + ServiceError::BlockDeviceMount { + .. + } => Status::new(Code::FailedPrecondition, err.to_string()), } } } #[tonic::async_trait] -impl mayastor_node_plugin_server::MayastorNodePlugin for MayastorNodePluginSvc { +impl MayastorNodePlugin for MayastorNodePluginSvc { async fn freeze_fs( &self, request: Request, @@ -46,7 +71,7 @@ impl mayastor_node_plugin_server::MayastorNodePlugin for MayastorNodePluginSvc { let volume_id = request.into_inner().volume_id; debug!("freeze_fs({})", volume_id); freeze_volume(&volume_id).await?; - Ok(Response::new(mayastor_node_plugin::FreezeFsReply {})) + Ok(Response::new(FreezeFsReply {})) } async fn unfreeze_fs( @@ -56,7 +81,23 @@ impl mayastor_node_plugin_server::MayastorNodePlugin for MayastorNodePluginSvc { let volume_id = request.into_inner().volume_id; debug!("unfreeze_fs({})", volume_id); unfreeze_volume(&volume_id).await?; - Ok(Response::new(mayastor_node_plugin::UnfreezeFsReply {})) + Ok(Response::new(UnfreezeFsReply {})) + } + + async fn find_volume( + &self, + request: Request, + ) -> Result, Status> { + let volume_id = request.into_inner().volume_id; + debug!("find_volume({})", volume_id); + match find_volume(&volume_id).await? { + TypeOfMount::FileSystem => Ok(Response::new(FindVolumeReply { + volume_type: VolumeType::Filesystem as i32, + })), + TypeOfMount::RawBlock => Ok(Response::new(FindVolumeReply { + volume_type: VolumeType::Rawblock as i32, + })), + } } } @@ -69,11 +110,9 @@ impl MayastorNodePluginGrpcServer { endpoint ); if let Err(e) = Server::builder() - .add_service( - mayastor_node_plugin_server::MayastorNodePluginServer::new( - MayastorNodePluginSvc {}, - ), - ) + .add_service(MayastorNodePluginServer::new( + MayastorNodePluginSvc {}, + )) .serve(endpoint) .await { diff --git a/csi/src/nodeplugin_svc.rs b/csi/src/nodeplugin_svc.rs new file mode 100644 index 000000000..4a9803b4f --- /dev/null +++ b/csi/src/nodeplugin_svc.rs @@ -0,0 +1,164 @@ +//! Implement services required by the node plugin +//! find volumes provisioned by Mayastor +//! freeze and unfreeze filesystem volumes provisioned by Mayastor +use crate::{ + dev::{Device, DeviceError}, + findmnt, + mount, +}; +use snafu::{ResultExt, Snafu}; +use tokio::process::Command; +use uuid::Uuid; + +#[derive(Debug, Snafu)] +#[snafu(visibility = "pub(crate)")] +pub enum ServiceError { + #[snafu(display("Cannot find volume: volume ID: {}", volid))] + VolumeNotFound { volid: String }, + #[snafu(display("Invalid volume ID: {}, {}", volid, source))] + InvalidVolumeId { + source: uuid::parser::ParseError, + volid: String, + }, + #[snafu(display("fsfreeze failed: volume ID: {}, {}", volid, error))] + FsfreezeFailed { volid: String, error: String }, + #[snafu(display("Internal failure: volume ID: {}, {}", volid, source))] + InternalFailure { source: DeviceError, volid: String }, + #[snafu(display("IO error: volume ID: {}, {}", volid, source))] + IOError { + source: std::io::Error, + volid: String, + }, + #[snafu(display("Inconsistent mount filesystems: volume ID: {}", volid))] + InconsistentMountFs { volid: String }, + #[snafu(display("Not a filesystem mount: volume ID: {}", volid))] + BlockDeviceMount { volid: String }, +} + +pub enum TypeOfMount { + FileSystem, + RawBlock, +} + +const FSFREEZE: &str = "fsfreeze"; + +async fn fsfreeze( + volume_id: &str, + freeze_op: &str, +) -> Result<(), ServiceError> { + let uuid = Uuid::parse_str(volume_id).context(InvalidVolumeId { + volid: volume_id.to_string(), + })?; + + if let Some(device) = + Device::lookup(&uuid).await.context(InternalFailure { + volid: volume_id.to_string(), + })? + { + let device_path = device.devname(); + if let Some(mnt) = mount::find_mount(Some(&device_path), None) { + let args = [freeze_op, &mnt.dest]; + let output = + Command::new(FSFREEZE).args(&args).output().await.context( + IOError { + volid: volume_id.to_string(), + }, + )?; + return if output.status.success() { + Ok(()) + } else { + let errmsg = String::from_utf8(output.stderr).unwrap(); + debug!( + "{} for volume_id :{} : failed, {}", + freeze_op, volume_id, errmsg + ); + Err(ServiceError::FsfreezeFailed { + volid: volume_id.to_string(), + error: errmsg, + }) + }; + } else { + // mount::find_mount does not return any matches, + // for mounts which are bind mounts to block devices + // (raw block volume). + // It would be incorrect to return the VolumeNotFound error, + // if the volume is mounted as a raw block volume on this node. + // Use findmnt to work out if volume is mounted as a raw + // block, i.e. we get some matches, and return the + // BlockDeviceMount error. + let mountpaths = findmnt::get_mountpaths(&device_path).context( + InternalFailure { + volid: volume_id.to_string(), + }, + )?; + if !mountpaths.is_empty() { + debug!( + "{} for volume_id :{} : failed for block device", + freeze_op, volume_id + ); + return Err(ServiceError::BlockDeviceMount { + volid: volume_id.to_string(), + }); + } + debug!( + "{} for volume_id :{} : failed, cannot find volume", + freeze_op, volume_id + ); + } + } + Err(ServiceError::VolumeNotFound { + volid: volume_id.to_string(), + }) +} + +pub async fn freeze_volume(volume_id: &str) -> Result<(), ServiceError> { + fsfreeze(volume_id, "--freeze").await +} + +pub async fn unfreeze_volume(volume_id: &str) -> Result<(), ServiceError> { + fsfreeze(volume_id, "--unfreeze").await +} + +pub async fn find_volume(volume_id: &str) -> Result { + let uuid = Uuid::parse_str(volume_id).context(InvalidVolumeId { + volid: volume_id.to_string(), + })?; + + if let Some(device) = + Device::lookup(&uuid).await.context(InternalFailure { + volid: volume_id.to_string(), + })? + { + let device_path = device.devname(); + let mountpaths = + findmnt::get_mountpaths(&device_path).context(InternalFailure { + volid: volume_id.to_string(), + })?; + debug!("mountpaths for volume_id :{} : {:?}", volume_id, mountpaths); + if !mountpaths.is_empty() { + let fstype = mountpaths[0].fstype.clone(); + for devmount in mountpaths { + if fstype != devmount.fstype { + debug!( + "Find volume_id :{} : failed, multiple fstypes {}, {}", + volume_id, fstype, devmount.fstype + ); + // This failure is very unlikely but include for + // completeness + return Err(ServiceError::InconsistentMountFs { + volid: volume_id.to_string(), + }); + } + } + debug!("fstype for volume_id :{} is {}", volume_id, fstype); + if fstype == "devtmpfs" { + return Ok(TypeOfMount::RawBlock); + } else { + return Ok(TypeOfMount::FileSystem); + } + } + } + Err(ServiceError::VolumeNotFound { + volid: volume_id.to_string(), + }) +} diff --git a/csi/src/server.rs b/csi/src/server.rs index 117e9745c..11a876dee 100644 --- a/csi/src/server.rs +++ b/csi/src/server.rs @@ -43,34 +43,14 @@ mod block_vol; mod dev; mod error; mod filesystem_vol; +mod findmnt; mod format; -mod freezefs; mod identity; mod match_dev; mod mount; mod node; mod nodeplugin_grpc; - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility = "pub(crate)")] -pub enum CSIError { - #[snafu(display("iscsiadm error: {}", error))] - Iscsiadm { error: String }, - #[snafu(display("Cannot find {}", execname))] - ExecutableNotFound { execname: String }, - #[snafu(display("Could not attach disk after {:?}", value))] - AttachTimeout { value: std::time::Duration }, - #[snafu(display("Invalid URI {}", uristr))] - InvalidURI { uristr: String }, - #[snafu(display("Invalid device path {}", devpath))] - InvalidDevicePath { devpath: String }, - #[snafu(display("Not found {}", value))] - NotFound { value: String }, - #[snafu(display("{}", error))] - Nvmf { error: String }, -} +mod nodeplugin_svc; #[derive(Debug)] struct UnixStream(tokio::net::UnixStream); From 1e9fac452bdf717061cf3827c58032cf9acc146f Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 4 Nov 2020 16:46:55 +0100 Subject: [PATCH 37/92] test: need ability to (easily) drive IO to a nexus This change adds the ability to create workloads within a given mayastor instance. This is for testing -- not just during mayastor development but also for e2e tests where we can invoke job creations form within k8s in the near future. An example is given `io_job.rs` where we create 2 nvmf targets through the compose framework, and then use `struct MayastorTest` to do some IO to it. --- mayastor/src/bdev/dev/nvmf.rs | 2 + mayastor/src/bdev/nexus/nexus_io.rs | 7 +- mayastor/src/core/bdev.rs | 3 +- mayastor/src/core/io_driver.rs | 403 +++++++++++++++++++++++ mayastor/src/core/mod.rs | 2 + mayastor/src/core/thread.rs | 10 +- mayastor/src/nexus_uri.rs | 6 +- mayastor/tests/common/compose.rs | 23 +- mayastor/tests/common/mod.rs | 6 +- mayastor/tests/io_job.rs | 199 +++++++++++ mayastor/tests/lvs_pool_rpc.rs | 2 +- mayastor/tests/mayastor_compose_basic.rs | 2 +- mayastor/tests/nexus_child_location.rs | 2 +- mayastor/tests/replica_timeout.rs | 2 +- mayastor/tests/reset.rs | 4 +- shell.nix | 1 + 16 files changed, 655 insertions(+), 19 deletions(-) create mode 100644 mayastor/src/core/io_driver.rs create mode 100644 mayastor/tests/io_job.rs diff --git a/mayastor/src/bdev/dev/nvmf.rs b/mayastor/src/bdev/dev/nvmf.rs index ca87fc972..259f72db7 100644 --- a/mayastor/src/bdev/dev/nvmf.rs +++ b/mayastor/src/bdev/dev/nvmf.rs @@ -9,6 +9,7 @@ use std::{ use async_trait::async_trait; use futures::channel::oneshot; use snafu::ResultExt; +use tracing::instrument; use url::Url; use spdk_sys::{ @@ -137,6 +138,7 @@ impl CreateDestroy for Nvmf { type Error = NexusBdevError; /// Create an NVMF bdev + #[instrument(err)] async fn create(&self) -> Result { if Bdev::lookup_by_name(&self.get_name()).is_some() { return Err(NexusBdevError::BdevExists { diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index 7e5381e2c..e5b2f76c7 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -265,6 +265,10 @@ impl Bio { pub(crate) fn block_len(&self) -> u64 { self.bdev_as_ref().block_len() as u64 } + #[inline] + pub(crate) fn status(&self) -> i8 { + unsafe { self.0.as_ref().internal.status } + } /// determine if the IO needs an indirect buffer this can happen for example /// when we do a 512 write to a 4k device. @@ -289,11 +293,12 @@ impl Debug for Bio { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { writeln!( f, - "bdev: {} offset: {:?}, num_blocks: {:?}, type: {:?} {:p} ", + "bdev: {} offset: {:?}, num_blocks: {:?}, type: {:?} status: {:?}, {:p} ", self.bdev_as_ref().name(), self.offset(), self.num_blocks(), self.io_type(), + self.status(), self ) } diff --git a/mayastor/src/core/bdev.rs b/mayastor/src/core/bdev.rs index 9b69b0002..093af2902 100644 --- a/mayastor/src/core/bdev.rs +++ b/mayastor/src/core/bdev.rs @@ -160,7 +160,7 @@ impl Bdev { info!("hot remove {} from {}", b.name, b.parent); b.close(); } - }) + }); }); } @@ -371,7 +371,6 @@ impl Bdev { unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender) }; sender.send(errno).expect("stat_cb receiver is gone"); } - /// Get bdev stats or errno value in case of an error. pub async fn stats(&self) -> Result { let mut stat: spdk_bdev_io_stat = Default::default(); diff --git a/mayastor/src/core/io_driver.rs b/mayastor/src/core/io_driver.rs new file mode 100644 index 000000000..9029c8e5c --- /dev/null +++ b/mayastor/src/core/io_driver.rs @@ -0,0 +1,403 @@ +//! helper routines to drive IO to the nexus for testing purposes +use futures::channel::oneshot; +use rand::Rng; +use std::{ptr::NonNull, sync::Mutex}; + +use spdk_sys::{ + spdk_bdev_free_io, + spdk_bdev_read, + spdk_bdev_reset, + spdk_bdev_write, +}; + +use crate::{ + core::{Bdev, Cores, Descriptor, DmaBuf, IoChannel, Mthread}, + ffihelper::pair, + nexus_uri::bdev_create, +}; + +#[derive(Debug)] +pub enum IoType { + /// perform random read operations + READ, + /// perform random write operations + WRITE, +} + +impl Default for IoType { + fn default() -> Self { + Self::READ + } +} + +#[derive(Debug)] +struct Io { + /// buffer we read/write from/to + buf: DmaBuf, + /// type of IO we are supposed to issue + iot: IoType, + /// current offset where we are reading or writing + offset: u64, + /// pointer to our the job we belong too + job: NonNull, +} + +impl Io { + /// start submitting + fn run(&mut self, job: *mut Job) { + self.job = NonNull::new(job).unwrap(); + match self.iot { + IoType::READ => self.read(0), + IoType::WRITE => self.write(0), + }; + } + + /// obtain a reference to the inner job + fn job(&mut self) -> &mut Job { + unsafe { self.job.as_mut() } + } + + /// dispatch the next IO, this is called from within the completion callback + pub fn next(&mut self, offset: u64) { + if self.job().request_reset { + self.job().request_reset = false; + self.reset(); + return; + } + + match self.iot { + IoType::READ => self.read(offset), + IoType::WRITE => self.write(offset), + } + } + + /// dispatch the read IO at given offset + fn read(&mut self, offset: u64) { + unsafe { + if spdk_bdev_read( + self.job.as_ref().desc.as_ptr(), + self.job.as_ref().ch.as_ref().unwrap().as_ptr(), + *self.buf, + offset, + self.buf.len(), + Some(Job::io_completion), + self as *const _ as *mut _, + ) == 0 + { + self.job.as_mut().n_inflight += 1; + } else { + eprintln!( + "failed to submit read IO to {}", + self.job.as_ref().bdev.name() + ); + } + }; + } + + /// dispatch write IO at given offset + fn write(&mut self, offset: u64) { + unsafe { + if spdk_bdev_write( + self.job.as_ref().desc.as_ptr(), + self.job.as_ref().ch.as_ref().unwrap().as_ptr(), + *self.buf, + offset, + self.buf.len(), + Some(Job::io_completion), + self as *const _ as *mut _, + ) == 0 + { + self.job.as_mut().n_inflight += 1; + } else { + eprintln!( + "failed to submit write IO to {}", + self.job.as_ref().bdev.name() + ); + } + }; + } + + /// reset the bdev under test + pub fn reset(&mut self) { + unsafe { + if spdk_bdev_reset( + self.job.as_ref().desc.as_ptr(), + self.job.as_ref().ch.as_ref().unwrap().as_ptr(), + Some(Job::io_completion), + self as *const _ as *mut _, + ) == 0 + { + self.job.as_mut().n_inflight += 1; + } else { + eprintln!( + "failed to submit reset IO to {}", + self.job.as_ref().bdev.name() + ); + } + } + } +} + +#[derive(Debug)] +pub struct Job { + /// that drives IO to a bdev using its own channel. + bdev: Bdev, + /// descriptor to the bdev + desc: Descriptor, + /// io channel used to submit IO + ch: Option, + /// queue depth configured for this job + qd: u64, + /// io_size the io_size is the number of blocks submit per IO + io_size: u64, + /// blk_size of the underlying device + blk_size: u32, + /// num_blocks the device has + num_blocks: u64, + /// aligned set of IOs we can do + io_blocks: u64, + /// io queue + queue: Vec, + /// number of IO's completed + n_io: u64, + /// number of IO's currently inflight + n_inflight: u32, + ///generate random number between 0 and num_block + rng: rand::rngs::ThreadRng, + /// drain the job which means that we wait for all pending IO to complete + /// and stop the run + drain: bool, + /// channels used to signal completion + s: Option>, + r: Option>, + /// issue a reset to the bdev + request_reset: bool, + /// core to run this job on + core: u32, + /// thread this job is run on + thread: Option, +} + +impl Job { + extern "C" fn io_completion( + bdev_io: *mut spdk_sys::spdk_bdev_io, + success: bool, + arg: *mut std::ffi::c_void, + ) { + let ioq: &mut Io = unsafe { &mut *arg.cast() }; + let job = unsafe { ioq.job.as_mut() }; + + if !success { + error!("{}: {:#?}", job.thread.as_ref().unwrap().name(), bdev_io); + } + + assert_eq!(Cores::current(), job.core); + job.n_io += 1; + job.n_inflight -= 1; + + unsafe { spdk_bdev_free_io(bdev_io) } + + if job.n_inflight == 0 { + trace!("{} fully drained", job.thread.as_ref().unwrap().name()); + job.s.take().unwrap().send(true).unwrap(); + return; + } + + if job.drain { + return; + } + + let offset = (job.rng.gen::() % job.io_size) * job.io_blocks; + ioq.next(offset); + } + + pub fn stop(&mut self) -> oneshot::Receiver { + self.drain = true; + self.r.take().expect("double shut down for job") + } + + fn as_ptr(&self) -> *mut Job { + self as *const _ as *mut _ + } + /// start the job that will dispatch an IO up to the provided queue depth + fn start(mut self) -> Box { + let thread = + Mthread::new(format!("job_{}", self.bdev.name()), self.core) + .unwrap(); + thread.with(|| { + self.ch = self.desc.get_channel(); + let mut boxed = Box::new(self); + let ptr = boxed.as_ptr(); + boxed.queue.iter_mut().for_each(|q| q.run(ptr)); + boxed.thread = Mthread::current(); + boxed + }) + } +} + +#[derive(Default)] +pub struct Builder { + /// bdev URI to create + uri: String, + /// queue depth + qd: u64, + /// size of each IO + io_size: u64, + /// type of workload to generate + iot: IoType, + /// existing bdev to use instead of creating one + bdev: Option, + /// core to start the job on, the command will crash if the core is invalid + core: u32, +} + +impl Builder { + pub fn new() -> Self { + Self::default() + } + + /// create a bdev using the given URI + pub fn uri(mut self, uri: &str) -> Self { + self.uri = String::from(uri); + self + } + + /// set the queue depth of the job + pub fn qd(mut self, qd: u64) -> Self { + self.qd = qd; + self + } + + /// io size per IO for the job + pub fn io_size(mut self, io_size: u64) -> Self { + self.io_size = io_size; + self + } + + /// issue read or write requests + pub fn rw(mut self, iot: IoType) -> Self { + self.iot = iot; + self + } + + /// use the given bdev instead of the URI to create the job + pub fn bdev(mut self, bdev: Bdev) -> Self { + self.bdev = Some(bdev); + self + } + /// set the core to run on + pub fn core(mut self, core: u32) -> Self { + self.core = core; + self + } + + pub async fn build(mut self) -> Job { + let bdev = if self.bdev.is_some() { + self.bdev.take().unwrap() + } else { + let name = bdev_create(&self.uri).await.unwrap(); + Bdev::lookup_by_name(&name).unwrap() + }; + + let desc = bdev.open(true).unwrap(); + + let blk_size = bdev.block_len(); + let num_blocks = bdev.num_blocks(); + + let io_size = self.io_size / blk_size as u64; + let io_blocks = num_blocks / io_size; + + let mut queue = Vec::new(); + + (0 .. self.qd).for_each(|offset| { + queue.push(Io { + buf: DmaBuf::new(self.io_size as u64, bdev.alignment()) + .unwrap(), + iot: IoType::READ, + offset, + job: NonNull::dangling(), + }); + }); + let (s, r) = pair::(); + Job { + core: self.core, + bdev, + desc, + ch: None, + qd: self.qd, + io_size, + blk_size, + num_blocks, + queue, + io_blocks, + n_io: 0, + n_inflight: 0, + rng: Default::default(), + drain: false, + s: Some(s), + r: Some(r), + request_reset: false, + thread: None, + } + } +} + +pub struct JobQueue { + #[allow(clippy::vec_box)] + inner: Mutex>>, +} + +impl Default for JobQueue { + fn default() -> Self { + Self::new() + } +} + +impl JobQueue { + pub fn new() -> Self { + Self { + inner: Mutex::new(Vec::new()), + } + } + + /// look up the job by bdev name + fn lookup(&self, name: &str) -> Option> { + let mut inner = self.inner.lock().unwrap(); + if let Some(index) = + inner.iter().position(|job| job.bdev.name() == name) + { + Some(inner.remove(index)) + } else { + None + } + } + + /// start the job + pub fn start(&self, job: Job) { + self.inner.lock().unwrap().push(job.start()); + } + + /// stop the job by bdev name + pub async fn stop(&self, bdevname: &str) { + if let Some(mut job) = self.lookup(bdevname) { + job.stop().await.unwrap(); + job.thread.unwrap().with(|| drop(job)); + } + } + + /// stop all jobs + pub async fn stop_all(&self) { + let mut inner = self.inner.lock().unwrap(); + while let Some(mut job) = inner.pop() { + job.stop().await.unwrap(); + job.thread.unwrap().with(|| drop(job)); + } + } + + /// reset all jobs + pub fn send_reset(&self) { + self.inner.lock().unwrap().iter_mut().for_each(|j| { + j.request_reset = true; + }); + } +} diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index 0399eb34f..2096901e6 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -16,6 +16,7 @@ pub use env::{ MayastorEnvironment, GLOBAL_RC, }; + pub use handle::BdevHandle; pub use reactor::{Reactor, ReactorState, Reactors, REACTOR_LIST}; pub use share::{Protocol, Share}; @@ -28,6 +29,7 @@ mod descriptor; mod dma; mod env; mod handle; +pub mod io_driver; mod reactor; mod share; pub(crate) mod thread; diff --git a/mayastor/src/core/thread.rs b/mayastor/src/core/thread.rs index 071b30aeb..5f5bbdf42 100644 --- a/mayastor/src/core/thread.rs +++ b/mayastor/src/core/thread.rs @@ -59,14 +59,14 @@ impl Mthread { /// Avoid any blocking calls as it will block the whole reactor. Also, avoid /// long-running functions. In general if you follow the nodejs event loop /// model, you should be good. - pub fn with(self, f: F) -> Self { - let _th = Self::current(); + pub fn with T>(self, f: F) -> T { + let th = Self::current(); self.enter(); - f(); - if let Some(t) = _th { + let out = f(); + if let Some(t) = th { t.enter(); } - self + out } #[inline] diff --git a/mayastor/src/nexus_uri.rs b/mayastor/src/nexus_uri.rs index 0a6e23ee5..736f0fdec 100644 --- a/mayastor/src/nexus_uri.rs +++ b/mayastor/src/nexus_uri.rs @@ -1,12 +1,12 @@ use std::{convert::TryFrom, num::ParseIntError, str::ParseBoolError}; +use crate::{bdev::Uri, core::Bdev}; use futures::channel::oneshot::Canceled; use nix::errno::Errno; use snafu::Snafu; +use tracing::instrument; use url::ParseError; -use crate::{bdev::Uri, core::Bdev}; - // parse URI and bdev create/destroy errors common for all types of bdevs #[derive(Debug, Snafu, Clone)] #[snafu(visibility = "pub(crate)")] @@ -70,11 +70,13 @@ pub enum NexusBdevError { /// Parse URI and create bdev described in the URI. /// Return the bdev name (which can be different from URI). +#[instrument] pub async fn bdev_create(uri: &str) -> Result { Uri::parse(uri)?.create().await } /// Parse URI and destroy bdev described in the URI. +#[instrument] pub async fn bdev_destroy(uri: &str) -> Result<(), NexusBdevError> { Uri::parse(uri)?.destroy().await } diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 5e5b7fc50..2031a4d6d 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -211,7 +211,7 @@ type ContainerName = String; /// container ID type ContainerId = String; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ComposeTest { /// used as the network name name: String, @@ -566,10 +566,31 @@ impl ComposeTest { Ok(handles) } + + pub fn down(&self) { + if self.clean { + self.containers.keys().for_each(|c| { + std::process::Command::new("docker") + .args(&["stop", c]) + .output() + .unwrap(); + std::process::Command::new("docker") + .args(&["rm", c]) + .output() + .unwrap(); + }); + + std::process::Command::new("docker") + .args(&["network", "rm", &self.name]) + .output() + .unwrap(); + } + } } /// Mayastor test structure that simplifies sending futures. Mayastor has /// its own reactor, which is not tokio based, so we need to handle properly +#[derive(Debug)] pub struct MayastorTest<'a> { reactor: &'a Reactor, thdl: Option>, diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index 8ce73a109..0afbcd6bd 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -21,10 +21,12 @@ use mayastor::{ use spdk_sys::spdk_get_thread; pub mod bdev_io; -mod compose; +pub mod compose; pub mod error_bdev; pub mod ms_exec; -pub use compose::{Builder, ComposeTest, MayastorTest, RpcHandle}; + +pub use compose::{ComposeTest, MayastorTest, RpcHandle}; + /// call F cnt times, and sleep for a duration between each invocation pub fn retry(mut cnt: u32, timeout: Duration, mut f: F) -> T where diff --git a/mayastor/tests/io_job.rs b/mayastor/tests/io_job.rs new file mode 100644 index 000000000..be4549d95 --- /dev/null +++ b/mayastor/tests/io_job.rs @@ -0,0 +1,199 @@ +use std::sync::Arc; + +use once_cell::sync::OnceCell; +use tokio::time::Duration; + +use mayastor::{ + bdev::{nexus_create, nexus_lookup}, + core::{io_driver, Bdev, MayastorCliArgs}, +}; +use rpc::mayastor::{BdevShareRequest, BdevUri}; + +pub mod common; +use common::compose::{self, ComposeTest, MayastorTest}; +use mayastor::core::io_driver::JobQueue; + +static DOCKER_COMPOSE: OnceCell = OnceCell::new(); +static MAYASTOR: OnceCell = OnceCell::new(); + +// this functions runs with in the context of the mayastorTest instance +async fn create_work(queue: Arc) { + // get a vector of grpc clients to all containers that are part of this test + let mut hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + + // for each grpc client, invoke these methods. + for h in &mut hdls { + // create the bdev + h.bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + // share it over nvmf + h.bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) + .await + .unwrap(); + } + + // get a reference to mayastor (used later) + let ms = MAYASTOR.get().unwrap(); + + // have ms create our nexus to the targets created above to know the IPs of + // the mayastor instances that run in the container, the handles can be + // used. This avoids hardcoded IPs and having magic constants. + ms.spawn(async move { + nexus_create( + "nexus0", + 1024 * 1024 * 50, + None, + &[ + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[1].endpoint.ip() + ), + ], + ) + .await + .unwrap(); + + let bdev = Bdev::lookup_by_name("nexus0").unwrap(); + + // create a job using the bdev we looked up, we are in the context here + // of the ms instance and not the containers. + let job = io_driver::Builder::new() + .core(1) + .bdev(bdev) + .qd(64) + .io_size(512) + .build() + .await; + + // start the first job + queue.start(job); + + // create a new job and start it. Note that the malloc bdev is created + // implicitly with the uri() argument + let job = io_driver::Builder::new() + .core(0) + .uri("malloc:///disk0?size_mb=100") + .qd(64) + .io_size(512) + .build() + .await; + + queue.start(job); + }) + .await +} + +async fn stats() { + // we grab an instance to mayastor test + let ms = MAYASTOR.get().unwrap(); + // and spawn a future on it + ms.spawn(async move { + let bdev = Bdev::bdev_first().unwrap().into_iter(); + for b in bdev { + let result = b.stats().await.unwrap(); + println!("{}: {:?}", b.name(), result); + } + }) + .await; +} + +#[tokio::test] +async fn io_driver() { + // + // We are creating 3 mayastor instances in total. Two of them will be + // running in side a container. Once these two instances are running, we + // will create a malloc bdev on each and share that over nvmf. Using + // these targets a 3de mayastor instance will be started. The third one + // however, is started by means of struct MayastorTest. This way, we can + // interact with it using .spawn() and .send(). + // + // The spawn() method returns an awaitable handle and .send() does a fire + // and forget. Using these methods we create a nexus in the mayastor + // test instance (ms). As part of the test, we also create a malloc bdev + // on that instance + // + // Finally, we create 2 jobs, one for the nexus and one for the malloc bdev + // and let the test run for 5 seconds. + + // To make it easy to get access to the ComposeTest and MayastorTest + // instances they are, after creation stored in the static globals + // + + // the queue that holds our jobs once started. As we pass this around + // between this thread the mayastor instance we keep a ref count. We + // need to keep track of the Jobs to avoid them from being dropped. + let queue = Arc::new(JobQueue::new()); + + // create the docker containers + let compose = compose::Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .add_container("ms2") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); + + // create the mayastor test instance + let mayastor_test = MayastorTest::new(MayastorCliArgs { + log_components: vec!["all".into()], + reactor_mask: "0x3".to_string(), + no_pci: true, + grpc_endpoint: "0.0.0.0".to_string(), + ..Default::default() + }); + + // set the created instances to the globals here such that we can access + // them whenever we want by "getting" them. Because some code is async + // we cannot do this one step as the async runtime cannot be used during + // init. + DOCKER_COMPOSE.set(compose).unwrap(); + + // later down the road we use the ms instance (to spawn futures) so here we + // use get_or_init() it is a shorter way of writing: + // ```rust + // MAYASTOR.set(mayastor); + // let ms = MAYASTOR.get().unwrap(); + // ``` + let ms = MAYASTOR.get_or_init(|| mayastor_test); + + // the creation of the targets -- is done by grpc handles. Subsequently, we + // create the nexus and the malloc bdev (using futures). To keep things + // a bit organised we do that in a single function notice we pass queue + // here as an argument. We could also make a static queue here if we wanted + // too to avoid passing arguments around. + + create_work(queue.clone()).await; + + // the devices have been created and they are pumping IO + tokio::time::delay_for(Duration::from_secs(5)).await; + + // we must stop all jobs otherwise mayastor would never exit (unless we + // signal it) + queue.stop_all().await; + // grab some stats of the bdevs in the ms instance + stats().await; + + // Both ComposeTest and MayastorTest impl Drop. However, we want to control + // the sequence of shut down here, so we destroy the nexus to avoid that + // the system destroys the containers before it destroys mayastor. + ms.spawn(nexus_lookup("nexus0").unwrap().destroy()) + .await + .unwrap(); + // now we manually destroy the docker containers + DOCKER_COMPOSE.get().unwrap().down(); + // ms gets dropped and will call mayastor_env_stop() +} diff --git a/mayastor/tests/lvs_pool_rpc.rs b/mayastor/tests/lvs_pool_rpc.rs index 49b0c5b04..d4eb2c38f 100644 --- a/mayastor/tests/lvs_pool_rpc.rs +++ b/mayastor/tests/lvs_pool_rpc.rs @@ -8,7 +8,7 @@ use rpc::mayastor::{ }; pub mod common; -use common::Builder; +use common::compose::Builder; static DISKNAME1: &str = "/tmp/disk1.img"; #[tokio::test] diff --git a/mayastor/tests/mayastor_compose_basic.rs b/mayastor/tests/mayastor_compose_basic.rs index 699e55899..d5e14d9d4 100644 --- a/mayastor/tests/mayastor_compose_basic.rs +++ b/mayastor/tests/mayastor_compose_basic.rs @@ -6,7 +6,7 @@ use mayastor::{ use rpc::mayastor::{BdevShareRequest, BdevUri, Null}; pub mod common; -use common::{Builder, MayastorTest}; +use common::{compose::Builder, MayastorTest}; #[tokio::test] async fn compose_up_down() { diff --git a/mayastor/tests/nexus_child_location.rs b/mayastor/tests/nexus_child_location.rs index 908b548f4..6992b4511 100644 --- a/mayastor/tests/nexus_child_location.rs +++ b/mayastor/tests/nexus_child_location.rs @@ -5,7 +5,7 @@ use mayastor::{ use rpc::mayastor::{BdevShareRequest, BdevUri, Null}; pub mod common; -use common::{Builder, MayastorTest}; +use common::{compose::Builder, MayastorTest}; static NEXUS_NAME: &str = "child_location_nexus"; diff --git a/mayastor/tests/replica_timeout.rs b/mayastor/tests/replica_timeout.rs index cbd2d5f7f..7071eeac8 100644 --- a/mayastor/tests/replica_timeout.rs +++ b/mayastor/tests/replica_timeout.rs @@ -1,6 +1,6 @@ #![allow(unused_assignments)] -use common::{bdev_io, Builder, MayastorTest}; +use common::{bdev_io, compose::Builder, MayastorTest}; use mayastor::{ bdev::{nexus_create, nexus_lookup}, core::MayastorCliArgs, diff --git a/mayastor/tests/reset.rs b/mayastor/tests/reset.rs index 2bae40148..3f6c395bd 100644 --- a/mayastor/tests/reset.rs +++ b/mayastor/tests/reset.rs @@ -4,10 +4,10 @@ use mayastor::core::{BdevHandle, MayastorCliArgs}; use rpc::mayastor::{BdevShareRequest, BdevUri}; pub mod common; - +use common::compose; #[tokio::test] async fn nexus_reset_mirror() { - let test = common::Builder::new() + let test = compose::Builder::new() .name("cargo-test") .network("10.1.0.0/16") .add_container("ms2") diff --git a/shell.nix b/shell.nix index 6ced2b189..c457eb206 100644 --- a/shell.nix +++ b/shell.nix @@ -21,6 +21,7 @@ mkShell { # fortify does not work with -O0 which is used by spdk when --enable-debug hardeningDisable = [ "fortify" ]; buildInputs = [ + docker-compose clang cowsay e2fsprogs From 603cdcb4968da6263394a6a7b52977a4457aa12d Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 6 Nov 2020 11:25:24 +0100 Subject: [PATCH 38/92] thread: use NonNull<> --- mayastor/src/core/env.rs | 2 +- mayastor/src/core/reactor.rs | 16 +++---- mayastor/src/core/thread.rs | 82 +++++++++++++++++++----------------- mayastor/src/lvs/lvol.rs | 6 +-- mayastor/tests/common/mod.rs | 5 +-- 5 files changed, 54 insertions(+), 57 deletions(-) diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index bcac62f21..a2addd808 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -307,7 +307,7 @@ extern "C" fn mayastor_signal_handler(signo: i32) { SIG_RECEIVED.store(true, SeqCst); unsafe { spdk_thread_send_critical_msg( - Mthread::get_init().0, + Mthread::get_init().into_raw(), Some(signal_trampoline), ); }; diff --git a/mayastor/src/core/reactor.rs b/mayastor/src/core/reactor.rs index 7251f462d..e47559bde 100644 --- a/mayastor/src/core/reactor.rs +++ b/mayastor/src/core/reactor.rs @@ -171,14 +171,14 @@ impl Reactors { let mask = unsafe { spdk_thread_get_cpumask(thread) }; let scheduled = Reactors::iter().any(|r| { if unsafe { spdk_cpuset_get_cpu(mask, r.lcore) } { - let mt = Mthread(thread); + let mt = Mthread::from(thread); info!( "scheduled {} {:p} on core:{}", mt.name(), thread, r.lcore ); - r.incoming.push(Mthread(thread)); + r.incoming.push(mt); return true; } false @@ -343,7 +343,8 @@ impl Reactor { F: Future + 'static, R: 'static, { - let _thread = Mthread::current(); + // hold on to the any potential thread we might be running on right now + let thread = Mthread::current(); Mthread::get_init().enter(); let schedule = |t| QUEUE.with(|(s, _)| s.send(t).unwrap()); let (runnable, task) = async_task::spawn_local(future, schedule); @@ -359,14 +360,9 @@ impl Reactor { match task.as_mut().poll(cx) { Poll::Ready(output) => { Mthread::get_init().exit(); - _thread.map(|t| { - debug!( - "restoring thread from {:?} to {:?}", - Mthread::current(), - _thread - ); + if let Some(t) = thread { t.enter() - }); + } return Some(output); } Poll::Pending => { diff --git a/mayastor/src/core/thread.rs b/mayastor/src/core/thread.rs index 5f5bbdf42..3f2a3af67 100644 --- a/mayastor/src/core/thread.rs +++ b/mayastor/src/core/thread.rs @@ -15,6 +15,7 @@ use spdk_sys::{ }; use crate::core::{cpu_cores::CpuMask, Cores}; +use std::ptr::NonNull; #[derive(Debug, Snafu)] pub enum Error { @@ -27,11 +28,21 @@ pub enum Error { /// should not be confused with an actual thread. Consider it more to be /// analogous to a container to which you can submit work and poll it to drive /// the submitted work to completion. -pub struct Mthread(pub(crate) *mut spdk_thread); +pub struct Mthread(NonNull); + +impl From<*mut spdk_thread> for Mthread { + fn from(t: *mut spdk_thread) -> Self { + let t = NonNull::new(t).expect("thread may not be NULL"); + Mthread(t) + } +} impl Mthread { pub fn get_init() -> Mthread { - Mthread::from_null_checked(unsafe { spdk_thread_get_by_id(1) }).unwrap() + Mthread( + NonNull::new(unsafe { spdk_thread_get_by_id(1) }) + .expect("No init thread allocated"), + ) } /// @@ -42,16 +53,20 @@ impl Mthread { pub fn new(name: String, core: u32) -> Option { let name = CString::new(name).unwrap(); - let t = unsafe { + + if let Some(t) = NonNull::new(unsafe { let mut mask = CpuMask::new(); mask.set_cpu(core, true); spdk_thread_create(name.as_ptr(), mask.as_ptr()) - }; - Self::from_null_checked(t) + }) { + Some(Mthread(t)) + } else { + None + } } pub fn id(&self) -> u64 { - unsafe { (*self.0).id } + unsafe { (self.0.as_ref()).id } } /// /// # Note @@ -70,79 +85,68 @@ impl Mthread { } #[inline] - pub fn poll(self) -> Self { - let _ = unsafe { spdk_thread_poll(self.0, 0, 0) }; - self + pub fn poll(&self) { + let _ = unsafe { spdk_thread_poll(self.0.as_ptr(), 0, 0) }; } #[inline] - pub fn enter(self) -> Self { + pub fn enter(&self) { debug!("setting thread {:?}", self); - unsafe { spdk_set_thread(self.0) }; - self + unsafe { spdk_set_thread(self.0.as_ptr()) }; } #[inline] - pub fn exit(self) -> Self { + pub fn exit(&self) { debug!("exit thread {:?}", self); unsafe { spdk_set_thread(std::ptr::null_mut()) }; - self } pub fn current() -> Option { - Mthread::from_null_checked(unsafe { spdk_get_thread() }) + if let Some(t) = NonNull::new(unsafe { spdk_get_thread() }) { + Some(Mthread(t)) + } else { + None + } } pub fn name(&self) -> &str { unsafe { - std::ffi::CStr::from_ptr(&(*self.0).name[0]) + std::ffi::CStr::from_ptr(&self.0.as_ref().name[0]) .to_str() .unwrap() } } + pub fn into_raw(self) -> *mut spdk_thread { + self.0.as_ptr() + } + /// destroy the given thread waiting for it to become ready to destroy pub fn destroy(self) { debug!("destroying thread {}...{:p}", self.name(), self.0); unsafe { - spdk_set_thread(self.0); + spdk_set_thread(self.0.as_ptr()); // set that we *want* to exit, but we have not exited yet - spdk_thread_exit(self.0); + spdk_thread_exit(self.0.as_ptr()); // now wait until the thread is actually exited the internal // state is updated by spdk_thread_poll() - while !spdk_thread_is_exited(self.0) { - spdk_thread_poll(self.0, 0, 0); + while !spdk_thread_is_exited(self.0.as_ptr()) { + spdk_thread_poll(self.0.as_ptr(), 0, 0); } - spdk_thread_destroy(self.0); + spdk_thread_destroy(self.0.as_ptr()); } debug!("thread {:p} destroyed", self.0); } - pub fn inner(self) -> *const spdk_thread { - self.0 - } - - pub fn inner_mut(self) -> *mut spdk_thread { - self.0 - } - - pub fn from_null_checked(t: *mut spdk_thread) -> Option { - if t.is_null() { - None - } else { - Some(Mthread(t)) - } - } - #[allow(clippy::not_unsafe_ptr_arg_deref)] pub fn send_msg( &self, f: extern "C" fn(ctx: *mut c_void), arg: *mut c_void, ) { - let rc = unsafe { spdk_thread_send_msg(self.0, Some(f), arg) }; + let rc = unsafe { spdk_thread_send_msg(self.0.as_ptr(), Some(f), arg) }; assert_eq!(rc, 0); } @@ -175,7 +179,7 @@ impl Mthread { let rc = unsafe { spdk_thread_send_msg( - self.0, + self.0.as_ptr(), Some(trampoline::), Box::into_raw(ctx).cast(), ) diff --git a/mayastor/src/lvs/lvol.rs b/mayastor/src/lvs/lvol.rs index 84031e5e1..96a913280 100644 --- a/mayastor/src/lvs/lvol.rs +++ b/mayastor/src/lvs/lvol.rs @@ -417,12 +417,10 @@ impl Lvol { error!("vbdev_lvol_create_snapshot errno {}", errno); } // Must complete IO on thread IO was submitted from - let thread = Mthread::from_null_checked(unsafe { + Mthread::from(unsafe { spdk_sys::spdk_bdev_io_get_thread(bio_ptr.cast()) }) - .expect("bio must have been submitted from an spdk_thread"); - thread.enter(); - Nexus::io_completion_local(errno == 0, bio_ptr); + .with(|| Nexus::io_completion_local(errno == 0, bio_ptr)); } let c_snapshot_name = snapshot_name.into_cstring(); diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index 0afbcd6bd..5855f9697 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -18,7 +18,6 @@ use mayastor::{ logger, rebuild::{ClientOperations, RebuildJob, RebuildState}, }; -use spdk_sys::spdk_get_thread; pub mod bdev_io; pub mod compose; @@ -320,8 +319,8 @@ pub fn clean_up_temp() { .unwrap(); } -pub fn thread() -> Option { - Mthread::from_null_checked(unsafe { spdk_get_thread() }) +pub fn thread() -> Mthread { + Mthread::get_init() } pub fn dd_urandom_blkdev(device: &str) -> i32 { From 59ee87903ab2007dae7c50875f6996dc28839227 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 6 Nov 2020 14:00:28 +0100 Subject: [PATCH 39/92] update dependencies --- Cargo.lock | 606 ++++++++++++++++++---------------- Cargo.toml | 2 +- nix/pkgs/mayastor/default.nix | 2 +- 3 files changed, 318 insertions(+), 292 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b00e15cb9..800b96912 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" dependencies = [ "gimli", ] @@ -17,9 +17,9 @@ checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "aho-corasick" -version = "0.7.13" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] @@ -44,15 +44,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fd36ffbb1fb7c834eac128ea8d0e310c5aeb635548f9d58861e1308d46e71c" - -[[package]] -name = "arc-swap" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7" [[package]] name = "arrayref" @@ -62,9 +56,9 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assert_matches" @@ -83,16 +77,6 @@ dependencies = [ "futures-core", ] -[[package]] -name = "async-dup" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7427a12b8dc09291528cfb1da2447059adb4a257388c2acd6497a79d55cf6f7c" -dependencies = [ - "futures-io", - "simple-mutex", -] - [[package]] name = "async-executor" version = "1.3.0" @@ -158,9 +142,9 @@ dependencies = [ [[package]] name = "async-net" -version = "1.4.7" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4c3668eb091d781e97f0026b5289b457c77d407a85749a9bb4c057456c428f" +checksum = "06de475c85affe184648202401d7622afb32f0f74e02192857d0201a16defbe5" dependencies = [ "async-io", "blocking", @@ -184,6 +168,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "async-rustls" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c238bd34d425674d8003b8d674cc04baf74e1b71802f3c62451e3bf86f2858ef" +dependencies = [ + "futures-lite", + "rustls", + "webpki", +] + [[package]] name = "async-stream" version = "0.2.1" @@ -202,27 +197,14 @@ checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] name = "async-task" -version = "4.0.2" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab27c1aa62945039e44edaeee1dc23c74cc0c303dd5fe0fb462a184f1c3a518" - -[[package]] -name = "async-tls" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d85a97c4a0ecce878efd3f945f119c78a646d8975340bca0398f9bb05c30cc52" -dependencies = [ - "futures-core", - "futures-io", - "rustls", - "webpki", - "webpki-roots", -] +checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" @@ -232,7 +214,7 @@ checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -266,9 +248,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.53" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707b586e0e2f247cbde68cdd2c3ce69ea7b7be43e1c5b426e37c9319c4b9838e" +checksum = "2baad346b2d4e94a24347adeee9c7a93f412ee94b9cc26e5b59dea23848e9f28" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -344,9 +326,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec", @@ -373,25 +355,13 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.7.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", - "byte-tools", - "byteorder", "generic-array", ] -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - [[package]] name = "blocking" version = "1.0.2" @@ -427,7 +397,7 @@ dependencies = [ "hyper-unix-connector", "log", "mio-named-pipes", - "pin-project", + "pin-project 0.4.27", "rustls", "rustls-native-certs", "serde", @@ -444,9 +414,9 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.40.5" +version = "1.40.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0039b619b9795bb6203a1ad7156b8418e38d4fdb857bf60984746b5a0fdb04" +checksum = "abf72b3eeb9a5cce41979def2c7522cb830356c0621ca29c0b766128c4e7fded" dependencies = [ "chrono", "serde", @@ -465,12 +435,6 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "byte-unit" version = "3.1.4" @@ -546,7 +510,7 @@ checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "libc", "num-integer", - "num-traits 0.2.12", + "num-traits 0.2.14", "serde", "time", "winapi 0.3.9", @@ -578,15 +542,6 @@ dependencies = [ "vec_map", ] -[[package]] -name = "clear_on_drop" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9cc5db465b294c3fa986d5bbb0f3017cd850bff6dd6c52f9ccff8b4d21b7b08" -dependencies = [ - "cc", -] - [[package]] name = "cloudabi" version = "0.0.3" @@ -640,6 +595,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + [[package]] name = "crc" version = "1.8.1" @@ -785,15 +746,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "1.2.4" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "405681bfe2b7b25ad8660dfd90b6e8be9e470e224ff49e36b587d43f29a22601" +checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" dependencies = [ "byteorder", - "clear_on_drop", "digest", - "rand_core 0.3.1", + "rand_core 0.5.1", "subtle", + "zeroize", ] [[package]] @@ -841,7 +802,7 @@ dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", "strsim 0.9.3", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -863,14 +824,14 @@ checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] name = "data-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d0e2d24e5ee3b23a01de38eefdcd978907890701f08ffffd4cb457ca4ee8d6" +checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" [[package]] name = "derive_builder" @@ -909,9 +870,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ "generic-array", ] @@ -938,11 +899,11 @@ dependencies = [ [[package]] name = "dns-lookup" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f69635ffdfbaea44241d7cca30a5e3a2e1c892613a6a8ad8ef03deeb6803480" +checksum = "093d88961fd18c4ecacb8c80cd0b356463ba941ba11e0e01f9cf5271380b79dc" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "socket2", "winapi 0.3.9", @@ -978,36 +939,36 @@ checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] name = "dyn-clone" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" +checksum = "d55796afa1b20c2945ca8eabfc421839f2b766619209f1ede813cf2484f31804" [[package]] name = "ed25519" -version = "1.0.0-pre.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28f2b738e873c40ce7339dfb8c5a48c936084b4540127e86c47a0fddcaa8624" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" dependencies = [ "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.0-pre.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "845aaacc16f01178f33349e7c992ecd0cee095aa5e577f0f4dee35971bd36455" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "clear_on_drop", "curve25519-dalek", - "failure", - "rand_core 0.3.1", - "rand_os", + "ed25519", + "rand 0.7.3", + "serde", "sha2", + "zeroize", ] [[package]] @@ -1050,15 +1011,15 @@ dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", "rustversion", - "syn 1.0.44", + "syn 1.0.48", "synstructure", ] [[package]] name = "errno" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01" +checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" dependencies = [ "errno-dragonfly", "libc", @@ -1099,16 +1060,10 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", "synstructure", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fastrand" version = "1.4.0" @@ -1130,6 +1085,16 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "form_urlencoded" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] + [[package]] name = "fsio" version = "0.1.3" @@ -1164,9 +1129,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8e3078b7b2a8a671cb7a3d17b4760e4181ea243227776ba83fd043b4ca034e" +checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" dependencies = [ "futures-channel", "futures-core", @@ -1179,9 +1144,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a4d35f7401e948629c9c3d6638fb9bf94e0b2121e96c3b428cc4e631f3eb74" +checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" dependencies = [ "futures-core", "futures-sink", @@ -1189,15 +1154,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d674eaa0056896d5ada519900dbf97ead2e46a7b6621e8160d79e2f2e1e2784b" +checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" [[package]] name = "futures-executor" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc709ca1da6f66143b8c9bec8e6260181869893714e9b5a490b169b0414144ab" +checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" dependencies = [ "futures-core", "futures-task", @@ -1206,15 +1171,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc94b64bb39543b4e432f1790b6bf18e3ee3b74653c5449f63310e9a74b123c" +checksum = "6e1798854a4727ff944a7b12aa999f58ce7aa81db80d2dfaaf2ba06f065ddd2b" [[package]] name = "futures-lite" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381a7ad57b1bad34693f63f6f377e1abded7a9c85c9d3eb6771e11c60aaadab9" +checksum = "5e6c079abfac3ab269e2927ec048dabc89d009ebfdda6b8ee86624f30c689658" dependencies = [ "fastrand", "futures-core", @@ -1227,27 +1192,27 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f57ed14da4603b2554682e9f2ff3c65d7567b53188db96cb71538217fc64581b" +checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] name = "futures-sink" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d8764258ed64ebc5d9ed185cf86a95db5cac810269c5d20ececb32e0088abbd" +checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" [[package]] name = "futures-task" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd26820a9f3637f1302da8bceba3ff33adbe53464b54ca24d4e2d4f1db30f94" +checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" dependencies = [ "once_cell", ] @@ -1260,9 +1225,9 @@ checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" [[package]] name = "futures-util" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a894a0acddba51a2d49a6f4263b1e64b8c579ece8af50fa86503d52cd1eea34" +checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" dependencies = [ "futures-channel", "futures-core", @@ -1271,7 +1236,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project 1.0.1", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1284,13 +1249,27 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" +[[package]] +name = "generator" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", +] + [[package]] name = "generic-array" -version = "0.12.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", + "version_check", ] [[package]] @@ -1306,9 +1285,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" [[package]] name = "git-version" @@ -1329,7 +1308,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -1340,8 +1319,8 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.6" -source = "git+https://github.com/gila/h2?branch=v0.2.6#8d17dfb3409ddaea3a9d7a723e7a011475770758" +version = "0.2.7" +source = "git+https://github.com/gila/h2?branch=v0.2.7#fad645f9900682626de515748aac1f195576ecc8" dependencies = [ "bytes 0.5.6", "fnv", @@ -1452,9 +1431,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.8" +version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f3afcfae8af5ad0576a31e768415edb627824129e8e5a29b8bfccb2f234e835" +checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ "bytes 0.5.6", "futures-channel", @@ -1466,7 +1445,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", + "pin-project 1.0.1", "socket2", "tokio", "tower-service", @@ -1502,7 +1481,7 @@ dependencies = [ "futures-util", "hex", "hyper", - "pin-project", + "pin-project 0.4.27", "tokio", ] @@ -1535,11 +1514,11 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63312a18f7ea8760cdd0a7c5aac1a619752a246b833545e3e36d1f81f7cd9e66" +checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -1654,9 +1633,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" [[package]] name = "libloading" @@ -1693,6 +1672,19 @@ dependencies = [ "cfg-if 0.1.10", ] +[[package]] +name = "loom" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +dependencies = [ + "cfg-if 0.1.10", + "generator", + "scoped-tls", + "serde", + "serde_json", +] + [[package]] name = "loopdev" version = "0.2.1" @@ -1806,9 +1798,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" @@ -1901,18 +1893,18 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "nats" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f0bc27324f2967df06397f8608a1fcfe76fa0fd17d1b6b90a8796f79b4d180f" +checksum = "8512d6f66c58eaa2e7785412b50bc1f6ecded27c01b175b4eb18a3f41beb2c47" dependencies = [ "async-channel", - "async-dup", "async-executor", "async-io", - "async-mutex", + "async-lock", "async-net", - "async-tls", + "async-rustls", "base64-url", + "fastrand", "futures-lite", "itoa", "json", @@ -1920,10 +1912,8 @@ dependencies = [ "nkeys", "nuid", "once_cell", - "rand 0.7.3", "regex", - "rustls", - "rustls-native-certs", + "webpki-roots", ] [[package]] @@ -1975,9 +1965,9 @@ dependencies = [ [[package]] name = "nkeys" -version = "0.0.9" +version = "0.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777c4b6e0fa1c1250e36ee89ff41d2278b52b3abc63f02b107df1c9b8406d912" +checksum = "cd0aa1a33567887c95af653f9f88e482e34df8eaabb98df92cf5c81dfd882b0a" dependencies = [ "byteorder", "data-encoding", @@ -1985,7 +1975,6 @@ dependencies = [ "log", "rand 0.7.3", "signatory", - "signatory-dalek", ] [[package]] @@ -2000,22 +1989,22 @@ dependencies = [ [[package]] name = "nuid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f0a5ab91609e5a16de31debd21baf09292fe9c0fd7b98fb3bcdd206419ae95" +checksum = "8061bec52f76dc109f1a392ee03afcf2fae4c7950953de6388bc2f5a57b61979" dependencies = [ "lazy_static", - "rand 0.6.5", + "rand 0.7.3", ] [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg 1.0.1", - "num-traits 0.2.12", + "num-traits 0.2.14", ] [[package]] @@ -2024,14 +2013,14 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" dependencies = [ - "num-traits 0.2.12", + "num-traits 0.2.14", ] [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg 1.0.1", ] @@ -2065,9 +2054,9 @@ dependencies = [ [[package]] name = "object" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" [[package]] name = "once_cell" @@ -2077,9 +2066,9 @@ checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" [[package]] name = "opaque-debug" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" @@ -2135,7 +2124,16 @@ version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" dependencies = [ - "pin-project-internal", + "pin-project-internal 0.4.27", +] + +[[package]] +name = "pin-project" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +dependencies = [ + "pin-project-internal 1.0.1", ] [[package]] @@ -2146,14 +2144,25 @@ checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", ] [[package]] name = "pin-project-lite" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e555d9e657502182ac97b539fb3dae8b79cda19e3e4f8ffb5e8de4f18df93c95" +checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-utils" @@ -2163,15 +2172,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "polling" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab773feb154f12c49ffcfd66ab8bdcf9a1843f950db48b0d8be9d4393783b058" +checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ "cfg-if 0.1.10", "libc", @@ -2182,9 +2191,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro-error" @@ -2195,7 +2204,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", "version_check", ] @@ -2212,9 +2221,9 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.18" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" @@ -2288,7 +2297,7 @@ dependencies = [ "itertools 0.8.2", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -2516,9 +2525,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f45b719a674bf4b828ff318906d6c133264c793eff7a41e30074a45b5099e2" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" dependencies = [ "aho-corasick", "memchr", @@ -2538,9 +2547,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.19" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17be88d9eaa858870aa5e48cc406c206e4600e983fc4f06bbe5750d93d09761" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] name = "remove_dir_all" @@ -2604,9 +2613,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2610b7f643d18c87dff3b489950269617e6601a51f1f05aa5daefee36f64f0b" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" [[package]] name = "rustc-hash" @@ -2614,6 +2623,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + [[package]] name = "rustls" version = "0.18.1" @@ -2641,14 +2659,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" -dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.7", - "syn 1.0.44", -] +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" [[package]] name = "ryu" @@ -2666,6 +2679,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + [[package]] name = "scopeguard" version = "1.1.0" @@ -2705,31 +2724,46 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "serde" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5" +checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8" +checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] name = "serde_json" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" +checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" dependencies = [ "itoa", "ryu", @@ -2767,14 +2801,14 @@ dependencies = [ "darling 0.10.2", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] name = "serde_yaml" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae3e2dd40a7cdc18ca80db804b7f461a39bb721160a85c9a1fa30134bf3c02a5" +checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" dependencies = [ "dtoa", "linked-hash-map", @@ -2804,23 +2838,25 @@ dependencies = [ [[package]] name = "sha2" -version = "0.8.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" dependencies = [ "block-buffer", + "cfg-if 1.0.0", + "cpuid-bool", "digest", - "fake-simd", "opaque-debug", ] [[package]] name = "sharded-slab" -version = "0.0.9" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" dependencies = [ "lazy_static", + "loom", ] [[package]] @@ -2841,56 +2877,30 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e12110bc539e657a646068aaf5eb5b63af9d0c1f7b29c97113fad80e15f035" +checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" dependencies = [ - "arc-swap", "libc", ] [[package]] name = "signatory" -version = "0.18.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98887ae129a828815e623e2656c6cfcc0a4b2926dcc6104e0c866d9f2f95041c" +checksum = "9eaebd4be561a7d8148803baa108092f85090189c4b8c3ffb81602b15b5c1771" dependencies = [ - "ed25519", "getrandom", "signature", "subtle-encoding", "zeroize", ] -[[package]] -name = "signatory-dalek" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b5d13f80962e02eb1113e2bd0c698b6b50db6cffa9b4c48dcfbf33abe2cbe7" -dependencies = [ - "digest", - "ed25519-dalek", - "sha2", - "signatory", -] - [[package]] name = "signature" -version = "1.0.0-pre.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0cfcdc45066661979294e965c21b60355da35eb5d638af8143e5aa83fdfce53" -dependencies = [ - "digest", -] - -[[package]] -name = "simple-mutex" -version = "1.1.5" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38aabbeafa6f6dead8cebf246fe9fae1f9215c8d29b3a69f93bd62a9e4a3dcd6" -dependencies = [ - "event-listener", -] +checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" [[package]] name = "slab" @@ -2940,7 +2950,7 @@ checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -2989,9 +2999,9 @@ checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" [[package]] name = "structopt" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a7159e7d0dbcab6f9c980d7971ef50f3ff5753081461eeda120d5974a4ee95" +checksum = "126d630294ec449fae0b16f964e35bf3c74f940da9dca17ee9b905f7b3112eb8" dependencies = [ "clap", "lazy_static", @@ -3000,15 +3010,15 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc47de4dfba76248d1e9169ccff240eea2a4dc1e34e309b95b2393109b4b383" +checksum = "65e51c492f9e23a220534971ff5afc14037289de430e3c83f9daf6a1b6ae91e8" dependencies = [ "heck", "proc-macro-error", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -3019,9 +3029,9 @@ checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" [[package]] name = "subtle-encoding" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30492c59ec8bdeee7d6dd2d851711cae5f1361538f10ecfdcd1d377d57c2a783" +checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" dependencies = [ "zeroize", ] @@ -3050,9 +3060,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd" +checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", @@ -3076,7 +3086,7 @@ checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", "unicode-xid 0.2.1", ] @@ -3129,22 +3139,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -3205,7 +3215,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -3264,7 +3274,7 @@ dependencies = [ "http-body 0.3.1", "hyper", "percent-encoding 1.0.1", - "pin-project", + "pin-project 0.4.27", "prost", "prost-derive", "tokio", @@ -3287,7 +3297,7 @@ dependencies = [ "proc-macro2 1.0.24", "prost-build", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -3317,7 +3327,7 @@ dependencies = [ "futures-core", "futures-util", "indexmap", - "pin-project", + "pin-project 0.4.27", "rand 0.7.3", "slab", "tokio", @@ -3337,7 +3347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4887dc2a65d464c8b9b66e0e4d51c2fd6cf5b3373afc72805b0a60bce00446a" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.27", "tokio", "tower-layer", "tower-service", @@ -3351,7 +3361,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f6b5000c3c54d269cc695dff28136bb33d08cbf1df2c48129e143ab65bf3c2a" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.27", "tower-service", ] @@ -3368,7 +3378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92c3040c5dbed68abffaa0d4517ac1a454cd741044f33ab0eefab6b8d1361404" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.27", "tokio", "tower-layer", "tower-load", @@ -3383,7 +3393,7 @@ checksum = "8cc79fc3afd07492b7966d7efa7c6c50f8ed58d768a6075dd7ae6591c5d2017b" dependencies = [ "futures-core", "log", - "pin-project", + "pin-project 0.4.27", "tokio", "tower-discover", "tower-service", @@ -3396,7 +3406,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f021e23900173dc315feb4b6922510dae3e79c689b74c089112066c11f0ae4e" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.27", "tower-layer", "tower-service", ] @@ -3432,7 +3442,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6727956aaa2f8957d4d9232b308fe8e4e65d99db30f42b225646e86c9b6a952" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.27", "tokio", "tower-layer", "tower-service", @@ -3450,7 +3460,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "127b8924b357be938823eaaec0608c482d40add25609481027b96198b2e4b31e" dependencies = [ - "pin-project", + "pin-project 0.4.27", "tokio", "tower-layer", "tower-service", @@ -3464,7 +3474,7 @@ checksum = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" dependencies = [ "futures-core", "futures-util", - "pin-project", + "pin-project 0.4.27", "tower-service", ] @@ -3489,7 +3499,7 @@ checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", ] [[package]] @@ -3507,7 +3517,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" dependencies = [ - "pin-project", + "pin-project 0.4.27", "tracing", ] @@ -3534,9 +3544,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -3632,10 +3642,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" dependencies = [ + "form_urlencoded", "idna", "matches", "percent-encoding 2.1.0", @@ -3733,7 +3744,7 @@ dependencies = [ "log", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", "wasm-bindgen-shared", ] @@ -3755,7 +3766,7 @@ checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.44", + "syn 1.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3797,9 +3808,9 @@ dependencies = [ [[package]] name = "wepoll-sys" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "142bc2cba3fe88be1a8fcb55c727fa4cd5b0cf2d7438722792e22f26f04bc1e0" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] @@ -3881,3 +3892,18 @@ name = "zeroize" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", + "synstructure", +] diff --git a/Cargo.toml b/Cargo.toml index bfabe1b8d..4383debfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [patch.crates-io] -h2 = { git = "https://github.com/gila/h2", branch = "v0.2.6"} +h2 = { git = "https://github.com/gila/h2", branch = "v0.2.7"} partition-identity = { git = "https://github.com/openebs/partition-identity.git" } [profile.dev] diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index c144a30fa..4a2dd1a76 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -39,7 +39,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0smfd31mzxgnprd30536ww2csy9n0mksaac09p6b138pp7gk7lsj"; + cargoSha256 = "08raaybbxpg9vj12nmd91pd8jqxdh3rmvkyin81isbqqmawgnkq7"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" From 64eaec75b3a7859b353efcb1855c31cc78085679 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 6 Nov 2020 22:01:48 +0100 Subject: [PATCH 40/92] use different container name --- mayastor/tests/common/compose.rs | 2 +- mayastor/tests/io_job.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 2031a4d6d..90dbb55bb 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -648,7 +648,7 @@ impl<'a> MayastorTest<'a> { .spawn(move || { MayastorEnvironment::new(args).init(); tx.send(Reactors::master()).unwrap(); - Reactors::master().running(); + Reactors::master().developer_delayed(); Reactors::master().poll_reactor(); }) .unwrap(); diff --git a/mayastor/tests/io_job.rs b/mayastor/tests/io_job.rs index be4549d95..069572d77 100644 --- a/mayastor/tests/io_job.rs +++ b/mayastor/tests/io_job.rs @@ -140,8 +140,8 @@ async fn io_driver() { let compose = compose::Builder::new() .name("cargo-test") .network("10.1.0.0/16") - .add_container("ms2") - .add_container("ms1") + .add_container("nvmf-target1") + .add_container("nvmf-target2") .with_clean(true) .build() .await From f15efc08f813e758cb8474b8958bc8194b99e06d Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Mon, 9 Nov 2020 09:09:28 +0100 Subject: [PATCH 41/92] compose: default to cleanup when shutting down --- mayastor/tests/common/compose.rs | 36 ++++++++++---------------------- mayastor/tests/io_job.rs | 3 ++- mayastor/tests/lvs_pool_rpc.rs | 2 -- 3 files changed, 13 insertions(+), 28 deletions(-) diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 90dbb55bb..343665086 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -6,8 +6,6 @@ use std::{ time::Duration, }; -use crossbeam::crossbeam_channel::bounded; - use bollard::{ container::{ Config, @@ -32,6 +30,7 @@ use bollard::{ }, Docker, }; +use crossbeam::crossbeam_channel::bounded; use futures::TryStreamExt; use ipnetwork::Ipv4Network; use tokio::sync::oneshot::channel; @@ -118,7 +117,7 @@ impl Builder { name: "".to_string(), containers: Default::default(), network: "10.1.0.0".to_string(), - clean: false, + clean: true, } } @@ -292,7 +291,7 @@ impl ComposeTest { }) } - async fn network_remove(&mut self) -> Result<(), Error> { + async fn network_remove(&self) -> Result<(), Error> { // if the network is not found, its not an error, any other error is // reported as such. Networks can only be destroyed when all containers // attached to it are removed. To get a list of attached @@ -351,11 +350,14 @@ impl ComposeTest { Ok(()) } - /// remove all containers - pub async fn remove_all(&mut self) -> Result<(), Error> { + /// remove all containers and its network + async fn remove_all(&self) -> Result<(), Error> { for k in &self.containers { self.stop(&k.0).await?; self.remove_container(&k.0).await?; + while let Ok(_c) = self.docker.inspect_container(&k.0, None).await { + tokio::time::delay_for(Duration::from_millis(500)).await; + } } self.network_remove().await?; Ok(()) @@ -478,7 +480,7 @@ impl ComposeTest { .stop_container( id.0.as_str(), Some(StopContainerOptions { - t: 5, + t: 3, }), ) .await @@ -567,24 +569,8 @@ impl ComposeTest { Ok(handles) } - pub fn down(&self) { - if self.clean { - self.containers.keys().for_each(|c| { - std::process::Command::new("docker") - .args(&["stop", c]) - .output() - .unwrap(); - std::process::Command::new("docker") - .args(&["rm", c]) - .output() - .unwrap(); - }); - - std::process::Command::new("docker") - .args(&["network", "rm", &self.name]) - .output() - .unwrap(); - } + pub async fn down(&self) { + self.remove_all().await.unwrap(); } } diff --git a/mayastor/tests/io_job.rs b/mayastor/tests/io_job.rs index 069572d77..e96ea5038 100644 --- a/mayastor/tests/io_job.rs +++ b/mayastor/tests/io_job.rs @@ -194,6 +194,7 @@ async fn io_driver() { .await .unwrap(); // now we manually destroy the docker containers - DOCKER_COMPOSE.get().unwrap().down(); + DOCKER_COMPOSE.get().unwrap().down().await; + // ms gets dropped and will call mayastor_env_stop() } diff --git a/mayastor/tests/lvs_pool_rpc.rs b/mayastor/tests/lvs_pool_rpc.rs index d4eb2c38f..6b5ebd021 100644 --- a/mayastor/tests/lvs_pool_rpc.rs +++ b/mayastor/tests/lvs_pool_rpc.rs @@ -9,7 +9,6 @@ use rpc::mayastor::{ pub mod common; use common::compose::Builder; -static DISKNAME1: &str = "/tmp/disk1.img"; #[tokio::test] async fn lvs_pool_rpc() { @@ -141,5 +140,4 @@ async fn lvs_pool_rpc() { .unwrap(); test.logs("ms1").await.unwrap(); - common::delete_file(&[DISKNAME1.into()]); } From fd1c39e8eff93d082e3f00a71b174482500a4721 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Thu, 5 Nov 2020 17:12:30 +0000 Subject: [PATCH 42/92] nvmf: Enable ANA reporting when creating subsystem Enable Asymmetric Namespace Access reporting when creating the nvmf subsystem. This allows a Nexus published on nvmf to benefit from multipath access from a supporting initiator, such as the one in the Linux kernel. This requires multiple Nexuses published with the same UUID which means they have the same NQN. Implement the identify controller admin command in BdevHandle and the standalone initiator so that the mocha test can verify that ANA reporting is indeed enabled. --- mayastor-test/test_nexus.js | 22 ++++++++++++++ mayastor/src/bdev/nexus/nexus_io.rs | 5 ++++ mayastor/src/bin/initiator.rs | 38 ++++++++++++++++++++++++ mayastor/src/core/handle.rs | 42 +++++++++++++++++++-------- mayastor/src/subsys/nvmf/subsystem.rs | 28 ++++++++++++++---- 5 files changed, 118 insertions(+), 17 deletions(-) diff --git a/mayastor-test/test_nexus.js b/mayastor-test/test_nexus.js index 4e004436c..8d068dde2 100644 --- a/mayastor-test/test_nexus.js +++ b/mayastor-test/test_nexus.js @@ -614,6 +614,7 @@ describe('nexus', function () { describe('nvmf datapath', function () { const blockFile = '/tmp/test_block'; + const idCtrlrFile = '/tmp/nvme-id-ctrlr'; function rmBlockFile (done) { common.execAsRoot('rm', ['-f', blockFile], () => { @@ -662,6 +663,27 @@ describe('nexus', function () { }); }); + // technically control path but this is nvmf-only + it('should identify nvmf controller', (done) => { + common.execAsRoot(common.getCmdPath('initiator'), [uri, 'id-ctrlr', idCtrlrFile], (err, stdout) => { + if (err) { + done(err); + } else { + fs.readFile(idCtrlrFile, (err, data) => { + if (err) throw err; + // Identify Controller Data Structure + // nvme_id_ctrl or spdk_nvme_ctrlr_data + assert.equal(data.length, 4096); + // model number + assert.equal(data.slice(24, 32).toString(), 'Mayastor'); + // cmic, bit 3 ana_reporting + assert.equal((data[76] & 0x8), 0x8, 'ANA reporting should be enabled'); + }); + done(); + } + }); + }); + it('should write to nvmf replica', (done) => { common.execAsRoot( common.getCmdPath('initiator'), diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index e5b2f76c7..eeb935f93 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -102,6 +102,11 @@ pub mod io_status { /// NVMe Admin opcode, from nvme_spec.h pub mod nvme_admin_opc { + // pub const GET_LOG_PAGE: u8 = 0x02; + pub const IDENTIFY: u8 = 0x06; + // pub const ABORT: u8 = 0x08; + // pub const SET_FEATURES: u8 = 0x09; + // pub const GET_FEATURES: u8 = 0x0a; // Vendor-specific pub const CREATE_SNAPSHOT: u8 = 0xc0; } diff --git a/mayastor/src/bin/initiator.rs b/mayastor/src/bin/initiator.rs index 4ffb909ac..5fec8c9f1 100644 --- a/mayastor/src/bin/initiator.rs +++ b/mayastor/src/bin/initiator.rs @@ -114,6 +114,24 @@ async fn write(uri: &str, offset: u64, file: &str) -> Result<()> { Ok(()) } +/// NVMe Admin. Only works with read commands without a buffer requirement. +async fn nvme_admin(uri: &str, opcode: u8) -> Result<()> { + let bdev = create_bdev(uri).await?; + let h = Bdev::open(&bdev, true).unwrap().into_handle().unwrap(); + h.nvme_admin_custom(opcode).await?; + Ok(()) +} + +/// NVMe Admin identify controller, write output to a file. +async fn identify_ctrlr(uri: &str, file: &str) -> Result<()> { + let bdev = create_bdev(uri).await?; + let h = Bdev::open(&bdev, true).unwrap().into_handle().unwrap(); + let mut buf = h.dma_malloc(4096).unwrap(); + h.nvme_identify_ctrlr(&mut buf).await?; + fs::write(file, buf.as_slice())?; + Ok(()) +} + /// Create a snapshot. async fn create_snapshot(uri: &str) -> Result<()> { let bdev = create_bdev(uri).await?; @@ -157,6 +175,18 @@ fn main() { .help("File to read data from that will be written to the replica") .required(true) .index(1))) + .subcommand(SubCommand::with_name("nvme-admin") + .about("Send a custom NVMe Admin command") + .arg(Arg::with_name("opcode") + .help("Admin command opcode to send") + .required(true) + .index(1))) + .subcommand(SubCommand::with_name("id-ctrlr") + .about("Send NVMe Admin identify controller command") + .arg(Arg::with_name("FILE") + .help("File to write output of identify controller command") + .required(true) + .index(1))) .subcommand(SubCommand::with_name("create-snapshot") .about("Create a snapshot on the replica")) .get_matches(); @@ -187,6 +217,14 @@ fn main() { read(&uri, offset, matches.value_of("FILE").unwrap()).await } else if let Some(matches) = matches.subcommand_matches("write") { write(&uri, offset, matches.value_of("FILE").unwrap()).await + } else if let Some(matches) = matches.subcommand_matches("nvme-admin") { + let opcode: u8 = match matches.value_of("opcode") { + Some(val) => val.parse().expect("Opcode must be a number"), + None => 0, + }; + nvme_admin(&uri, opcode).await + } else if let Some(matches) = matches.subcommand_matches("id-ctrlr") { + identify_ctrlr(&uri, matches.value_of("FILE").unwrap()).await } else if matches.subcommand_matches("create-snapshot").is_some() { create_snapshot(&uri).await } else { diff --git a/mayastor/src/core/handle.rs b/mayastor/src/core/handle.rs index 01d3d0790..ae2da726f 100644 --- a/mayastor/src/core/handle.rs +++ b/mayastor/src/core/handle.rs @@ -206,32 +206,44 @@ impl BdevHandle { } } - /// create a snapshot on all children + /// create a snapshot, only works for nvme bdev /// returns snapshot time as u64 seconds since Unix epoch pub async fn create_snapshot(&self) -> Result { let mut cmd = spdk_sys::spdk_nvme_cmd::default(); cmd.set_opc(nvme_admin_opc::CREATE_SNAPSHOT.into()); let now = subsys::set_snapshot_time(&mut cmd); debug!("Creating snapshot at {}", now); - self.nvme_admin(&cmd).await?; + self.nvme_admin(&cmd, None).await?; Ok(now as u64) } - /// sends an NVMe Admin command with a custom opcode to all children - pub async fn nvme_admin_custom( + /// identify controller + /// buffer must be at least 4096B + pub async fn nvme_identify_ctrlr( &self, - opcode: u8, - ) -> Result { + mut buffer: &mut DmaBuf, + ) -> Result<(), CoreError> { + let mut cmd = spdk_sys::spdk_nvme_cmd::default(); + cmd.set_opc(nvme_admin_opc::IDENTIFY.into()); + cmd.nsid = 0xffffffff; + // Controller Identifier + unsafe { *spdk_sys::nvme_cmd_cdw10_get(&mut cmd) = 1 }; + self.nvme_admin(&cmd, Some(&mut buffer)).await + } + + /// sends an NVMe Admin command, only for read commands without buffer + pub async fn nvme_admin_custom(&self, opcode: u8) -> Result<(), CoreError> { let mut cmd = spdk_sys::spdk_nvme_cmd::default(); cmd.set_opc(opcode.into()); - self.nvme_admin(&cmd).await + self.nvme_admin(&cmd, None).await } - /// sends the specified NVMe Admin command to all children + /// sends the specified NVMe Admin command, only read commands pub async fn nvme_admin( &self, nvme_cmd: &spdk_sys::spdk_nvme_cmd, - ) -> Result { + buffer: Option<&mut DmaBuf>, + ) -> Result<(), CoreError> { trace!("Sending nvme_admin {}", nvme_cmd.opc()); let (s, r) = oneshot::channel::(); // Use the spdk-sys variant spdk_bdev_nvme_admin_passthru that @@ -241,8 +253,14 @@ impl BdevHandle { self.desc.as_ptr(), self.channel.as_ptr(), &*nvme_cmd, - std::ptr::null_mut(), - 0, + match buffer { + Some(ref b) => ***b, + None => std::ptr::null_mut(), + }, + match buffer { + Some(b) => b.len(), + None => 0, + }, Some(Self::io_completion_cb), cb_arg(s), ) @@ -256,7 +274,7 @@ impl BdevHandle { } if r.await.expect("Failed awaiting NVMe Admin IO") { - Ok(0) + Ok(()) } else { Err(CoreError::NvmeAdminFailed { opcode: (*nvme_cmd).opc(), diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 27c7c317b..53ce533ed 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -30,6 +30,7 @@ use spdk_sys::{ spdk_nvmf_subsystem_pause, spdk_nvmf_subsystem_resume, spdk_nvmf_subsystem_set_allow_any_host, + spdk_nvmf_subsystem_set_ana_reporting, spdk_nvmf_subsystem_set_mn, spdk_nvmf_subsystem_set_sn, spdk_nvmf_subsystem_start, @@ -105,6 +106,7 @@ impl Debug for NvmfSubsystem { "allow_any_host", &self.0.as_ref().flags.allow_any_host(), ) + .field("ana_reporting", &self.0.as_ref().flags.ana_reporting()) .field("listeners", &self.listeners_to_vec()) .finish() } @@ -122,6 +124,7 @@ impl TryFrom for NvmfSubsystem { fn try_from(bdev: Bdev) -> Result { let ss = NvmfSubsystem::new(bdev.name().as_str())?; + ss.set_ana_reporting(true)?; ss.allow_any(true); if let Err(e) = ss.add_namespace(&bdev) { ss.destroy(); @@ -168,7 +171,7 @@ impl NvmfSubsystem { .to_result(|e| Error::Subsystem { source: Errno::from_i32(e), nqn: uuid.into(), - msg: "failed to set serial".into(), + msg: "failed to set model number".into(), })?; Ok(NvmfSubsystem(ss)) @@ -178,6 +181,7 @@ impl NvmfSubsystem { /// mostly due to testing. pub fn new_with_uuid(uuid: &str, bdev: &Bdev) -> Result { let ss = NvmfSubsystem::new(uuid)?; + ss.set_ana_reporting(true)?; ss.allow_any(true); ss.add_namespace(bdev)?; Ok(ss) @@ -197,8 +201,8 @@ impl NvmfSubsystem { ) }; - // the first name space should be 1 and we do not (currently) use - // more then one namespace + // the first namespace should be 1 and we do not (currently) use + // more than one namespace if ns_id < 1 { Err(Error::Namespace { @@ -224,6 +228,7 @@ impl NvmfSubsystem { .to_string() } } + /// allow any host to connect to the subsystem pub fn allow_any(&self, enable: bool) { unsafe { @@ -231,6 +236,19 @@ impl NvmfSubsystem { }; } + /// enable Asymmetric Namespace Access (ANA) reporting + pub fn set_ana_reporting(&self, enable: bool) -> Result<(), Error> { + unsafe { + spdk_nvmf_subsystem_set_ana_reporting(self.0.as_ptr(), enable) + } + .to_result(|e| Error::Subsystem { + source: Errno::from_i32(e), + nqn: self.get_nqn(), + msg: format!("failed to set ANA reporting, enable {}", enable), + })?; + Ok(()) + } + // we currently allow all listeners to the subsystem async fn add_listener(&self) -> Result<(), Error> { extern "C" fn listen_cb(arg: *mut c_void, status: i32) { @@ -254,7 +272,7 @@ impl NvmfSubsystem { ); } - r.await.expect("listen a callback gone").to_result(|e| { + r.await.expect("listener callback gone").to_result(|e| { Error::Transport { source: Errno::from_i32(e), msg: "Failed to add listener".to_string(), @@ -392,7 +410,7 @@ impl NvmfSubsystem { r.await.unwrap().to_result(|e| Error::Subsystem { source: Errno::from_i32(e), nqn: self.get_nqn(), - msg: "failed to stop the subsystem".to_string(), + msg: "failed to pause the subsystem".to_string(), }) } From 4cc13e275ce27a1af8107d71831a06eec63d92bd Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 6 Nov 2020 21:57:22 +0100 Subject: [PATCH 43/92] nvme: add uri support Add parsing of nvmf uri's to the admin library --- Cargo.lock | 1 + csi/src/dev/nvmf.rs | 8 +-- nix/pkgs/mayastor/default.nix | 4 +- nvmeadm/Cargo.toml | 1 + nvmeadm/src/error.rs | 4 ++ nvmeadm/src/lib.rs | 2 + nvmeadm/src/nvme_namespaces.rs | 13 ++-- nvmeadm/src/nvme_uri.rs | 128 +++++++++++++++++++++++++++++++++ nvmeadm/src/nvmf_discovery.rs | 18 ++--- 9 files changed, 155 insertions(+), 24 deletions(-) create mode 100644 nvmeadm/src/nvme_uri.rs diff --git a/Cargo.lock b/Cargo.lock index 800b96912..5276629d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2049,6 +2049,7 @@ dependencies = [ "num-traits 0.1.43", "once_cell", "snafu", + "url", "uuid", ] diff --git a/csi/src/dev/nvmf.rs b/csi/src/dev/nvmf.rs index 27624b78d..449a92159 100644 --- a/csi/src/dev/nvmf.rs +++ b/csi/src/dev/nvmf.rs @@ -73,11 +73,9 @@ impl TryFrom<&Url> for NvmfAttach { #[tonic::async_trait] impl Attach for NvmfAttach { async fn attach(&self) -> Result<(), DeviceError> { - if let Err(error) = nvmeadm::nvmf_discovery::connect( - &self.host, - self.port as u32, - &self.nqn, - ) { + if let Err(error) = + nvmeadm::nvmf_discovery::connect(&self.host, self.port, &self.nqn) + { match (error) { nvmeadm::error::NvmeError::ConnectInProgress => return Ok(()), _ => { diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 4a2dd1a76..fc9be8337 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -38,8 +38,8 @@ let version = builtins.readFile "${version_drv}"; buildProps = rec { name = "mayastor"; - #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "08raaybbxpg9vj12nmd91pd8jqxdh3rmvkyin81isbqqmawgnkq7"; + # cargoSha256 = "0000000000000000000000000000000000000000000000000000"; + cargoSha256 = "07d3yvl43pqw5iwjpb1rd9b34s572m8w4p89nmqd68pc0kmpq4d2"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/nvmeadm/Cargo.toml b/nvmeadm/Cargo.toml index f5e293053..e3a419c43 100644 --- a/nvmeadm/Cargo.toml +++ b/nvmeadm/Cargo.toml @@ -16,3 +16,4 @@ num-traits = "^0.1" once_cell = "1.3" snafu = "0.6" uuid = { version = "0.7", features = ["v4"] } +url = "2.2.0" diff --git a/nvmeadm/src/error.rs b/nvmeadm/src/error.rs index 111c1285e..6db1b9b3c 100644 --- a/nvmeadm/src/error.rs +++ b/nvmeadm/src/error.rs @@ -44,6 +44,10 @@ pub enum NvmeError { source: glob::PatternError, path_prefix: String, }, + #[snafu(display("NVMe URI invalid: {}", source))] + UrlError { source: url::ParseError }, + #[snafu(display("Transport type {} not supported", trtype))] + TransportError { trtype: String }, } impl From for NvmeError { diff --git a/nvmeadm/src/lib.rs b/nvmeadm/src/lib.rs index 8cf70793f..c577e4518 100644 --- a/nvmeadm/src/lib.rs +++ b/nvmeadm/src/lib.rs @@ -38,7 +38,9 @@ pub mod nvmf_subsystem; use error::{IoError, NvmeError}; use snafu::ResultExt; +mod nvme_uri; +pub use nvme_uri::NvmeTarget; /// the device entry in /dev for issuing ioctls to the kernels nvme driver const NVME_FABRICS_PATH: &str = "/dev/nvme-fabrics"; /// ioctl for passing any NVMe command to the kernel diff --git a/nvmeadm/src/nvme_namespaces.rs b/nvmeadm/src/nvme_namespaces.rs index 13d371940..b50f449f7 100644 --- a/nvmeadm/src/nvme_namespaces.rs +++ b/nvmeadm/src/nvme_namespaces.rs @@ -26,7 +26,7 @@ pub struct NvmeDevice { /// firmware revision fw_rev: String, /// the nqn of the subsystem this device instance is connected to - subsysnqn: String, + pub subsysnqn: String, } impl NvmeDevice { @@ -47,11 +47,12 @@ impl NvmeDevice { model: parse_value(&subsys, "model")?, serial: parse_value(&subsys, "serial")?, size: parse_value(&source, "size")?, - // NOTE: during my testing, it seems that NON fabric devices - // do not have a UUID, this means that local PCIe devices will - // be filtered out automatically. We should not depend on this - // feature or, bug until we gather more data - uuid: parse_value(&source, "uuid")?, + // /* NOTE: during my testing, it seems that NON fabric devices + // * do not have a UUID, this means that local PCIe devices will + // * be filtered out automatically. We should not depend on this + // * feature or, bug until we gather more data + uuid: parse_value(&source, "uuid") + .unwrap_or_else(|_| String::from("N/A")), wwid: parse_value(&source, "wwid")?, nsid: parse_value(&source, "nsid")?, }) diff --git a/nvmeadm/src/nvme_uri.rs b/nvmeadm/src/nvme_uri.rs new file mode 100644 index 000000000..dbf0d11bd --- /dev/null +++ b/nvmeadm/src/nvme_uri.rs @@ -0,0 +1,128 @@ +use std::{convert::TryFrom, time::Duration}; + +use url::{ParseError, Url}; + +use crate::{ + error::NvmeError, + nvme_namespaces::{NvmeDevice, NvmeDeviceList}, + nvmf_discovery::disconnect, +}; + +use super::nvmf_discovery::connect; + +pub struct NvmeTarget { + host: String, + port: u16, + subsysnqn: String, + trtype: String, +} + +impl TryFrom for NvmeTarget { + type Error = NvmeError; + fn try_from(value: String) -> Result { + NvmeTarget::try_from(value.as_str()) + } +} + +impl TryFrom<&str> for NvmeTarget { + type Error = NvmeError; + + fn try_from(value: &str) -> Result { + let url = Url::parse(&value).map_err(|source| NvmeError::UrlError { + source, + })?; + + let trtype = match url.scheme() { + "nvmf" | "nvmf+tcp" => Ok("tcp"), + _ => Err(NvmeError::UrlError { + source: ParseError::IdnaError, + }), + }? + .into(); + + let host = url + .host_str() + .ok_or(NvmeError::UrlError { + source: ParseError::EmptyHost, + })? + .into(); + + let subnqn = match url.path_segments() { + None => Err(NvmeError::UrlError { + source: ParseError::RelativeUrlWithCannotBeABaseBase, + }), + Some(s) => { + let segments = s.collect::>(); + if segments[0].is_empty() { + Err(NvmeError::UrlError { + source: ParseError::RelativeUrlWithCannotBeABaseBase, + }) + } else { + Ok(segments[0].to_string()) + } + } + }?; + + Ok(Self { + trtype, + host, + port: url.port().unwrap_or(4420), + subsysnqn: subnqn, + }) + } +} + +impl NvmeTarget { + pub fn connect(&self) -> Result, NvmeError> { + if self.trtype != "tcp" { + return Err(NvmeError::TransportError { + trtype: self.trtype.clone(), + }); + } + + connect(&self.host, self.port, &self.subsysnqn)?; + + let mut retries = 10; + let mut all_nvme_devices; + loop { + std::thread::sleep(Duration::from_millis(1000)); + + all_nvme_devices = NvmeDeviceList::new() + .filter_map(Result::ok) + .filter(|b| b.subsysnqn == self.subsysnqn) + .collect::>(); + + retries -= 1; + if retries == 0 || !all_nvme_devices.is_empty() { + break; + } + } + + Ok(all_nvme_devices) + } + + pub fn disconnect(&self) -> Result { + disconnect(&self.subsysnqn) + } +} + +#[test] +fn nvme_parse_uri() { + let target = + NvmeTarget::try_from("nvmf://1.2.3.4:1234/testnqn.what-ever.foo") + .unwrap(); + + assert_eq!(target.port, 1234); + assert_eq!(target.host, "1.2.3.4"); + assert_eq!(target.trtype, "tcp"); + assert_eq!(target.subsysnqn, "testnqn.what-ever.foo"); + + let target = + NvmeTarget::try_from("nvmf+tcp://1.2.3.4:1234/testnqn.what-ever.foo") + .unwrap(); + + assert_eq!(target.port, 1234); + assert_eq!(target.host, "1.2.3.4"); + assert_eq!(target.trtype, "tcp"); + assert_eq!(target.subsysnqn, "testnqn.what-ever.foo"); +} diff --git a/nvmeadm/src/nvmf_discovery.rs b/nvmeadm/src/nvmf_discovery.rs index aeee8ee3d..e98429e59 100644 --- a/nvmeadm/src/nvmf_discovery.rs +++ b/nvmeadm/src/nvmf_discovery.rs @@ -464,7 +464,7 @@ impl DiscoveryLogEntry { pub fn connect( ip_addr: &str, - port: u32, + port: u16, nqn: &str, ) -> Result { let mut connect_args = String::new(); @@ -488,16 +488,12 @@ pub fn connect( }, )?; if let Err(e) = file.write_all(connect_args.as_bytes()) { - match e.kind() { - ErrorKind::AlreadyExists => { - return Err(NvmeError::ConnectInProgress) - } - _ => { - return Err(NvmeError::IoError { - source: e, - }) - } - } + return match e.kind() { + ErrorKind::AlreadyExists => Err(NvmeError::ConnectInProgress), + _ => Err(NvmeError::IoError { + source: e, + }), + }; } let mut buf = String::new(); file.read_to_string(&mut buf).context(ConnectError { From 800b6af8441f5adb8c4cc8382a5191a09592c269 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Thu, 5 Nov 2020 16:33:26 +0000 Subject: [PATCH 44/92] Ensure bdevs are removed when not in use Bdevs are now opened using the spdk_bdev_open_ext() function (note: SPDK has deprecated spdk_bdev_open). This allows a callback to be registered which is called when an asynchronous bdev event, such as removal, occurs. When a NexusChild is closed, destroy is called, resulting in spdk raising a SPDK_BDEV_EVENT_REMOVE event. This calls back to the NexusChild remove() function which is responsible for closing everything down in an orderly fashion before the underlying bdev is removed. Additional changes: - The RebuildJob no longer stores the source and destination handles. These are acquired as and when they are needed. This ensures that a bdev can be removed even while there is an active rebuild process. - cancel_child_rebuild_jobs() now waits for all rebuild jobs to terminate before returning - The reconfigure test case has been removed as it is no longer valid given that a bdev is removed when setting a child offline. - The replica_timeout test is now ignored. It still needs to be determined whether or not this is a valid test case. --- mayastor/src/bdev/nexus/nexus_bdev.rs | 55 +++-- .../src/bdev/nexus/nexus_bdev_children.rs | 65 +++--- mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs | 16 +- mayastor/src/bdev/nexus/nexus_channel.rs | 2 +- mayastor/src/bdev/nexus/nexus_child.rs | 107 ++++++--- mayastor/src/core/bdev.rs | 76 +++--- mayastor/src/core/descriptor.rs | 11 +- mayastor/src/rebuild/rebuild_api.rs | 4 +- mayastor/src/rebuild/rebuild_impl.rs | 53 ++--- mayastor/tests/core.rs | 2 +- mayastor/tests/reconfigure.rs | 217 ------------------ mayastor/tests/replica_timeout.rs | 1 + 12 files changed, 248 insertions(+), 361 deletions(-) delete mode 100644 mayastor/tests/reconfigure.rs diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 849891a6e..8345991fc 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -47,7 +47,7 @@ use crate::{ nexus_nbd::{NbdDisk, NbdError}, }, }, - core::{Bdev, CoreError, DmaError, Share}, + core::{Bdev, CoreError, DmaError, Reactor, Share}, ffihelper::errno_result_from_i32, lvs::Lvol, nexus_uri::{bdev_destroy, NexusBdevError}, @@ -145,6 +145,12 @@ pub enum Error { child: String, name: String, }, + #[snafu(display("Failed to close child {} of nexus {}", child, name))] + CloseChild { + source: NexusBdevError, + child: String, + name: String, + }, #[snafu(display( "Cannot delete the last child {} of nexus {}", child, @@ -467,9 +473,9 @@ impl Nexus { pub async fn open(&mut self) -> Result<(), Error> { debug!("Opening nexus {}", self.name); - self.try_open_children()?; + self.try_open_children().await?; self.sync_labels().await?; - self.register() + self.register().await } pub async fn sync_labels(&mut self) -> Result<(), Error> { @@ -502,9 +508,21 @@ impl Nexus { } trace!("{}: closing, from state: {:?} ", self.name, self.state); - self.children.iter_mut().for_each(|c| { - if c.state() == ChildState::Open { - c.close(); + + let nexus_name = self.name.clone(); + Reactor::block_on(async move { + let nexus = nexus_lookup(&nexus_name).expect("Nexus not found"); + for child in &nexus.children { + if child.state() == ChildState::Open { + if let Err(e) = child.close().await { + error!( + "{}: child {} failed to close with error {}", + nexus.name, + child.name, + e.verbose() + ); + } + } } }); @@ -542,12 +560,14 @@ impl Nexus { } for child in self.children.iter_mut() { - let _ = child.close(); info!("Destroying child bdev {}", child.name); - - let r = child.destroy().await; - if r.is_err() { - error!("Failed to destroy child {}", child.name); + if let Err(e) = child.close().await { + // TODO: should an error be returned here? + error!( + "Failed to close child {} with error {}", + child.name, + e.verbose() + ); } } @@ -578,7 +598,7 @@ impl Nexus { /// register the bdev with SPDK and set the callbacks for io channel /// creation. Once this function is called, the device is visible and can /// be used for IO. - pub(crate) fn register(&mut self) -> Result<(), Error> { + pub(crate) async fn register(&mut self) -> Result<(), Error> { assert_eq!(self.state, NexusState::Init); unsafe { @@ -604,7 +624,16 @@ impl Nexus { unsafe { spdk_io_device_unregister(self.as_ptr(), None); } - self.children.iter_mut().map(|c| c.close()).for_each(drop); + for child in &self.children { + if let Err(e) = child.close().await { + error!( + "{}: child {} failed to close with error {}", + self.name, + child.name, + e.verbose() + ); + } + } self.set_state(NexusState::Closed); Err(err).context(RegisterNexus { name: self.name.clone(), diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index d1f5c99d1..6cdc7ff3b 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -31,7 +31,6 @@ use crate::{ nexus::{ nexus_bdev::{ CreateChild, - DestroyChild, Error, Nexus, NexusState, @@ -116,7 +115,7 @@ impl Nexus { e.verbose() ); match self.get_child_by_name(uri) { - Ok(child) => child.fault(Reason::RebuildFailed), + Ok(child) => child.fault(Reason::RebuildFailed).await, Err(e) => error!( "Failed to find newly added child {}, error: {}", uri, @@ -182,7 +181,7 @@ impl Nexus { // it can never take part in the IO path // of the nexus until it's rebuilt from a healthy child. - child.fault(Reason::OutOfSync); + child.fault(Reason::OutOfSync).await; if ChildStatusConfig::add(&child).is_err() { error!("Failed to add child status information"); } @@ -230,23 +229,22 @@ impl Nexus { Some(val) => val, }; - self.children[idx].close(); - assert_eq!(self.children[idx].state(), ChildState::Closed); + if let Err(e) = self.children[idx].close().await { + return Err(Error::CloseChild { + name: self.name.clone(), + child: self.children[idx].name.clone(), + source: e, + }); + } - let mut child = self.children.remove(idx); + self.children.remove(idx); self.child_count -= 1; // Update child status to remove this child NexusChild::save_state_change(); - self.reconfigure(DREvent::ChildRemove).await; - - let result = child.destroy().await.context(DestroyChild { - name: self.name.clone(), - child: uri, - }); self.start_rebuild_jobs(cancelled_rebuilding_children).await; - result + Ok(()) } /// offline a child device and reconfigure the IO channels @@ -260,7 +258,7 @@ impl Nexus { self.cancel_child_rebuild_jobs(name).await; if let Some(child) = self.children.iter_mut().find(|c| c.name == name) { - child.offline(); + child.offline().await; } else { return Err(Error::ChildNotFound { name: self.name.clone(), @@ -311,7 +309,7 @@ impl Nexus { match child.state() { ChildState::Faulted(_) => {} _ => { - child.fault(reason); + child.fault(reason).await; NexusChild::save_state_change(); self.reconfigure(DREvent::ChildFault).await; } @@ -340,7 +338,7 @@ impl Nexus { trace!("{} Online child request", self.name); if let Some(child) = self.children.iter_mut().find(|c| c.name == name) { - child.online(self.size).context(OpenChild { + child.online(self.size).await.context(OpenChild { child: name.to_owned(), name: self.name.clone(), })?; @@ -356,10 +354,7 @@ impl Nexus { /// destroy all children that are part of this nexus closes any child /// that might be open first pub(crate) async fn destroy_children(&mut self) { - let futures = self.children.iter_mut().map(|c| { - c.close(); - c.destroy() - }); + let futures = self.children.iter_mut().map(|c| c.close()); let results = join_all(futures).await; if results.iter().any(|c| c.is_err()) { error!("{}: Failed to destroy child", self.name); @@ -382,7 +377,7 @@ impl Nexus { } /// try to open all the child devices - pub(crate) fn try_open_children(&mut self) -> Result<(), Error> { + pub(crate) async fn try_open_children(&mut self) -> Result<(), Error> { if self.children.is_empty() || self.children.iter().any(|c| c.bdev.is_none()) { @@ -418,19 +413,23 @@ impl Nexus { // completed yet so we fail the registration all together for now. if !error.is_empty() { - open.into_iter() - .map(Result::unwrap) - .map(|name| { - if let Some(child) = - self.children.iter_mut().find(|c| c.name == name) - { - let _ = child.close(); - } else { - error!("{}: child {} failed to open", self.name, name); + for open_child in open { + let name = open_child.unwrap(); + if let Some(child) = + self.children.iter_mut().find(|c| c.name == name) + { + if let Err(e) = child.close().await { + error!( + "{}: child {} failed to close with error {}", + self.name, + name, + e.verbose() + ); } - }) - .for_each(drop); - + } else { + error!("{}: child {} failed to open", self.name, name); + } + } return Err(Error::NexusIncomplete { name: self.name.clone(), }); diff --git a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs index 345b071fa..c0208d206 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -101,11 +101,17 @@ impl Nexus { /// used for shutdown operations and /// unlike the client operation stop, this command does not fail /// as it overrides the previous client operations - fn terminate_rebuild(&self, name: &str) { + async fn terminate_rebuild(&self, name: &str) { // If a rebuild job is not found that's ok // as we were just going to remove it anyway. if let Ok(rj) = self.get_rebuild_job(name) { - let _ = rj.as_client().terminate(); + let ch = rj.as_client().terminate(); + if let Err(e) = ch.await { + error!( + "Failed to wait on rebuild job for child {} to terminate with error {}", name, + e.verbose() + ); + } } } @@ -187,7 +193,7 @@ impl Nexus { } // terminate the only possible job with the child as a destination - self.terminate_rebuild(name); + self.terminate_rebuild(name).await; rebuilding_children } @@ -260,7 +266,7 @@ impl Nexus { { // todo: retry rebuild using another child as source? } - recovering_child.fault(Reason::RebuildFailed); + recovering_child.fault(Reason::RebuildFailed).await; error!( "Rebuild job for child {} of nexus {} failed, error: {}", &job.destination, @@ -269,7 +275,7 @@ impl Nexus { ); } _ => { - recovering_child.fault(Reason::RebuildFailed); + recovering_child.fault(Reason::RebuildFailed).await; error!( "Rebuild job for child {} of nexus {} failed with state {:?}", &job.destination, diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index 55835f2f9..c5afe19fe 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -210,7 +210,7 @@ impl NexusChannel { /// Refresh the IO channels of the underlying children. Typically, this is /// called when a device is either added or removed. IO that has already - /// may or may not complete. In case of remove that is fine. + /// been issued may or may not complete. In case of remove that is fine. pub extern "C" fn refresh_io_channels(ch_iter: *mut spdk_io_channel_iter) { let channel = unsafe { spdk_io_channel_iter_get_channel(ch_iter) }; diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 5ba17763b..7b9b0b7ab 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -4,18 +4,19 @@ use nix::errno::Errno; use serde::{export::Formatter, Serialize}; use snafu::{ResultExt, Snafu}; -use spdk_sys::{spdk_bdev_module_release_bdev, spdk_io_channel}; - use crate::{ bdev::{ nexus::{ + nexus_channel::DREvent, nexus_child::ChildState::Faulted, nexus_child_status_config::ChildStatusConfig, }, + nexus_lookup, NexusErrStore, + VerboseError, }, - core::{Bdev, BdevHandle, CoreError, Descriptor}, - nexus_uri::{bdev_destroy, NexusBdevError}, + core::{Bdev, BdevHandle, CoreError, Descriptor, Reactor}, + nexus_uri::{bdev_create, bdev_destroy, NexusBdevError}, rebuild::{ClientOperations, RebuildJob}, subsys::Config, }; @@ -46,6 +47,11 @@ pub enum ChildError { OpenWithoutBdev {}, #[snafu(display("Failed to create a BdevHandle for child"))] HandleCreate { source: CoreError }, + #[snafu(display("Failed to create a Bdev for child {}", child))] + ChildBdevCreate { + child: String, + source: NexusBdevError, + }, } #[derive(Debug, Serialize, PartialEq, Deserialize, Copy, Clone)] @@ -119,9 +125,6 @@ pub struct NexusChild { /// the bdev wrapped in Bdev pub(crate) bdev: Option, #[serde(skip_serializing)] - /// channel on which we submit the IO - pub(crate) ch: *mut spdk_io_channel, - #[serde(skip_serializing)] pub(crate) desc: Option>, /// current state of the child #[serde(skip_serializing)] @@ -235,13 +238,20 @@ impl NexusChild { /// Fault the child with a specific reason. /// We do not close the child if it is out-of-sync because it will /// subsequently be rebuilt. - pub(crate) fn fault(&mut self, reason: Reason) { + pub(crate) async fn fault(&mut self, reason: Reason) { match reason { Reason::OutOfSync => { self.set_state(ChildState::Faulted(reason)); } _ => { - self._close(); + if let Err(e) = self.close().await { + error!( + "{}: child {} failed to close with error {}", + self.parent, + self.name, + e.verbose() + ); + } self.set_state(ChildState::Faulted(reason)); } } @@ -249,19 +259,39 @@ impl NexusChild { } /// Set the child as temporarily offline - /// TODO: channels need to be updated when bdevs are closed - pub(crate) fn offline(&mut self) { - self.close(); + pub(crate) async fn offline(&mut self) { + if let Err(e) = self.close().await { + error!( + "{}: child {} failed to close with error {}", + self.parent, + self.name, + e.verbose() + ); + } NexusChild::save_state_change(); } /// Online a previously offlined child. /// The child is set out-of-sync so that it will be rebuilt. /// TODO: channels need to be updated when bdevs are opened - pub(crate) fn online( + pub(crate) async fn online( &mut self, parent_size: u64, ) -> Result { + // Only online a child if it was previously set offline. Check for a + // "Closed" state as that is what offlining a child will set it to. + match self.state { + ChildState::Closed => { + // Re-create the bdev as it will have been previously destroyed. + let name = + bdev_create(&self.name).await.context(ChildBdevCreate { + child: self.name.clone(), + })?; + self.bdev = Bdev::lookup_by_name(&name); + } + _ => return Err(ChildError::ChildNotClosed {}), + } + let result = self.open(parent_size); self.set_state(ChildState::Faulted(Reason::OutOfSync)); NexusChild::save_state_change(); @@ -298,26 +328,41 @@ impl NexusChild { } } - /// closed the descriptor and handle, does not destroy the bdev - fn _close(&mut self) { - trace!("{}: Closing child {}", self.parent, self.name); - if let Some(bdev) = self.bdev.as_ref() { - unsafe { - if !(*bdev.as_ptr()).internal.claim_module.is_null() { - spdk_bdev_module_release_bdev(bdev.as_ptr()); - } - } + /// Close the nexus child. + pub(crate) async fn close(&self) -> Result<(), NexusBdevError> { + info!("Closing child {}", self.name); + if self.desc.is_some() && self.bdev.is_some() { + self.desc.as_ref().unwrap().unclaim(); } - // just to be explicit - let desc = self.desc.take(); - drop(desc); + // Destruction raises an SPDK_BDEV_EVENT_REMOVE event. + self.destroy().await } - /// close the bdev -- we have no means of determining if this succeeds - pub(crate) fn close(&mut self) -> ChildState { - self._close(); + /// Called in response to a SPDK_BDEV_EVENT_REMOVE event. + /// All the necessary teardown should be performed here before the bdev is + /// removed. + /// + /// Note: The descriptor *must* be dropped for the remove to complete. + pub(crate) fn remove(&mut self) { + info!("Removing child {}", self.name); + + // Remove the child from the I/O path. self.set_state(ChildState::Closed); - ChildState::Closed + let nexus_name = self.parent.clone(); + Reactor::block_on(async move { + match nexus_lookup(&nexus_name) { + Some(n) => n.reconfigure(DREvent::ChildRemove).await, + None => error!("Nexus {} not found", nexus_name), + } + }); + + // The bdev is being removed, so ensure we don't use it again. + self.bdev = None; + + // Dropping the last descriptor results in the bdev being removed. + // This must be performed in this function. + let desc = self.desc.take(); + drop(desc); } /// create a new nexus child @@ -327,16 +372,14 @@ impl NexusChild { bdev, parent, desc: None, - ch: std::ptr::null_mut(), state: ChildState::Init, err_store: None, } } /// destroy the child bdev - pub(crate) async fn destroy(&mut self) -> Result<(), NexusBdevError> { + pub(crate) async fn destroy(&self) -> Result<(), NexusBdevError> { trace!("destroying child {:?}", self); - assert_eq!(self.state(), ChildState::Closed); if let Some(_bdev) = &self.bdev { bdev_destroy(&self.name).await } else { diff --git a/mayastor/src/core/bdev.rs b/mayastor/src/core/bdev.rs index 093af2902..1c7993192 100644 --- a/mayastor/src/core/bdev.rs +++ b/mayastor/src/core/bdev.rs @@ -13,6 +13,7 @@ use snafu::ResultExt; use spdk_sys::{ spdk_bdev, + spdk_bdev_event_type, spdk_bdev_first, spdk_bdev_get_aliases, spdk_bdev_get_block_size, @@ -26,7 +27,7 @@ use spdk_sys::{ spdk_bdev_io_stat, spdk_bdev_io_type_supported, spdk_bdev_next, - spdk_bdev_open, + spdk_bdev_open_ext, spdk_uuid_generate, }; @@ -140,30 +141,6 @@ impl Share for Bdev { } impl Bdev { - /// bdevs are created and destroyed in order, adding a bdev to the nexus - /// does interferes with this order. There we traverse all nexuses - /// looking for our a child and then close it when found. - /// - /// By default -- when opening the bdev through the ['Bdev'] module - /// we by default, pass the context of the bdev being opened. If we - /// need/want to optimize the performance (o^n) we can opt for passing - /// a reference to the nexus instead avoiding the lookup. - /// - /// This does not handle any deep level of nesting - extern "C" fn hot_remove(ctx: *mut c_void) { - let bdev = Bdev(NonNull::new(ctx as *mut spdk_bdev).unwrap()); - instances().iter_mut().for_each(|n| { - n.children.iter_mut().for_each(|b| { - // note: it would perhaps be wise to close all children - // here in one blow to avoid unneeded lookups - if b.bdev.as_ref().unwrap().name() == bdev.name() { - info!("hot remove {} from {}", b.name, b.parent); - b.close(); - } - }); - }); - } - /// open a bdev by its name in read_write mode. pub fn open_by_name( name: &str, @@ -178,16 +155,56 @@ impl Bdev { } } + /// Called by spdk when there is an asynchronous bdev event i.e. removal. + extern "C" fn event_cb( + event: spdk_bdev_event_type, + bdev: *mut spdk_bdev, + _ctx: *mut c_void, + ) { + let bdev = Bdev(NonNull::new(bdev).unwrap()); + // Take the appropriate action for the given event type + match event { + spdk_sys::SPDK_BDEV_EVENT_REMOVE => { + info!("Received remove event for bdev {}", bdev.name()); + instances().iter_mut().for_each(|n| { + n.children + .iter_mut() + .filter(|c| { + c.bdev.is_some() + && c.bdev.as_ref().unwrap().name() + == bdev.name() + }) + .for_each(|c| { + c.remove(); + }); + }); + } + spdk_sys::SPDK_BDEV_EVENT_RESIZE => { + info!("Received resize event for bdev {}", bdev.name()) + } + spdk_sys::SPDK_BDEV_EVENT_MEDIA_MANAGEMENT => info!( + "Received media management event for bdev {}", + bdev.name() + ), + _ => error!( + "Received unknown event {} for bdev {}", + event, + bdev.name() + ), + } + } + /// open the current bdev, the bdev can be opened multiple times resulting /// in a new descriptor for each call. pub fn open(&self, read_write: bool) -> Result { let mut descriptor = std::ptr::null_mut(); + let cname = CString::new(self.name()).unwrap(); let rc = unsafe { - spdk_bdev_open( - self.as_ptr(), + spdk_bdev_open_ext( + cname.as_ptr(), read_write, - Some(Self::hot_remove), - self.as_ptr() as *mut _, + Some(Self::event_cb), + std::ptr::null_mut(), &mut descriptor, ) }; @@ -371,6 +388,7 @@ impl Bdev { unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender) }; sender.send(errno).expect("stat_cb receiver is gone"); } + /// Get bdev stats or errno value in case of an error. pub async fn stats(&self) -> Result { let mut stat: spdk_bdev_io_stat = Default::default(); diff --git a/mayastor/src/core/descriptor.rs b/mayastor/src/core/descriptor.rs index 0fa6748dd..7373091f6 100644 --- a/mayastor/src/core/descriptor.rs +++ b/mayastor/src/core/descriptor.rs @@ -61,6 +61,15 @@ impl Descriptor { err == 0 } + /// unclaim a previously claimed bdev + pub(crate) fn unclaim(&self) { + unsafe { + if !(*self.get_bdev().as_ptr()).internal.claim_module.is_null() { + spdk_bdev_module_release_bdev(self.get_bdev().as_ptr()); + } + } + } + /// release a previously claimed bdev pub fn release(&self) { unsafe { @@ -165,7 +174,7 @@ extern "C" fn _bdev_close(arg: *mut c_void) { } } -/// when we get hot-removed we might be asked to close ourselves +/// when we get removed we might be asked to close ourselves /// however, this request might come from a different thread as /// targets (for example) are running on their own thread. impl Drop for Descriptor { diff --git a/mayastor/src/rebuild/rebuild_api.rs b/mayastor/src/rebuild/rebuild_api.rs index 01515f9e2..89f157fb4 100644 --- a/mayastor/src/rebuild/rebuild_api.rs +++ b/mayastor/src/rebuild/rebuild_api.rs @@ -8,7 +8,7 @@ use snafu::Snafu; use crate::{ bdev::VerboseError, - core::{BdevHandle, CoreError, Descriptor, DmaError}, + core::{CoreError, Descriptor, DmaError}, nexus_uri::NexusBdevError, }; @@ -116,10 +116,8 @@ pub struct RebuildJob { pub(super) nexus_descriptor: Descriptor, /// source URI of the healthy child to rebuild from pub source: String, - pub(super) source_hdl: BdevHandle, /// target URI of the out of sync child in need of a rebuild pub destination: String, - pub(super) destination_hdl: BdevHandle, pub(super) block_size: u64, pub(super) range: std::ops::Range, pub(super) next: u64, diff --git a/mayastor/src/rebuild/rebuild_impl.rs b/mayastor/src/rebuild/rebuild_impl.rs index 74c7b31ce..99423f3ec 100644 --- a/mayastor/src/rebuild/rebuild_impl.rs +++ b/mayastor/src/rebuild/rebuild_impl.rs @@ -111,26 +111,9 @@ impl RebuildJob { range: std::ops::Range, notify_fn: fn(String, String) -> (), ) -> Result { - let source_hdl = BdevHandle::open( - &bdev_get_name(source).context(BdevInvalidURI { - uri: source.to_string(), - })?, - false, - false, - ) - .context(NoBdevHandle { - bdev: source, - })?; - let destination_hdl = BdevHandle::open( - &bdev_get_name(destination).context(BdevInvalidURI { - uri: destination.to_string(), - })?, - true, - false, - ) - .context(NoBdevHandle { - bdev: destination, - })?; + let source_hdl = RebuildJob::open_handle(source, false, false)?; + let destination_hdl = + RebuildJob::open_handle(destination, true, false)?; if !Self::validate( &source_hdl.get_bdev(), @@ -180,9 +163,7 @@ impl RebuildJob { nexus, nexus_descriptor, source, - source_hdl, destination, - destination_hdl, next: range.start, range, block_size, @@ -308,6 +289,9 @@ impl RebuildJob { blk: u64, ) -> Result<(), RebuildError> { let mut copy_buffer: DmaBuf; + let source_hdl = RebuildJob::open_handle(&self.source, false, false)?; + let destination_hdl = + RebuildJob::open_handle(&self.destination, true, false)?; let copy_buffer = if self.get_segment_size_blks(blk) == self.segment_size_blks @@ -321,22 +305,21 @@ impl RebuildJob { self.segment_size_blks, segment_size_blks, blk, self.range, ); - copy_buffer = self - .destination_hdl + copy_buffer = destination_hdl .dma_malloc(segment_size_blks * self.block_size) .context(NoCopyBuffer {})?; &mut copy_buffer }; - self.source_hdl + source_hdl .read_at(blk * self.block_size, copy_buffer) .await .context(ReadIoError { bdev: &self.source, })?; - self.destination_hdl + destination_hdl .write_at(blk * self.block_size, copy_buffer) .await .context(WriteIoError { @@ -438,6 +421,24 @@ impl RebuildJob { unsafe { &mut *global_instances.inner.get() } } + + /// Open a bdev handle for the given uri + fn open_handle( + uri: &str, + read_write: bool, + claim: bool, + ) -> Result { + BdevHandle::open( + &bdev_get_name(uri).context(BdevInvalidURI { + uri: uri.to_string(), + })?, + read_write, + claim, + ) + .context(NoBdevHandle { + bdev: uri, + }) + } } #[derive(Debug)] diff --git a/mayastor/tests/core.rs b/mayastor/tests/core.rs index f50d398cc..3f60bcf8d 100644 --- a/mayastor/tests/core.rs +++ b/mayastor/tests/core.rs @@ -136,7 +136,7 @@ async fn core_4() { let nexus_size: u64 = 10 * 1024 * 1024; // 10MiB let nexus_name: &str = "nexus_sizes"; - // nexus size is always NEXUS_SIZE + // nexus size is always "nexus_size" // (size of child1, create success, size of child2, add child2 success) let test_cases = vec![ (nexus_size, true, nexus_size * 2, true), diff --git a/mayastor/tests/reconfigure.rs b/mayastor/tests/reconfigure.rs deleted file mode 100644 index 76dd7ec7b..000000000 --- a/mayastor/tests/reconfigure.rs +++ /dev/null @@ -1,217 +0,0 @@ -#![allow(clippy::cognitive_complexity)] - -use std::process::Command; - -use mayastor::{ - bdev::{nexus_create, nexus_lookup, NexusStatus}, - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, - rebuild::RebuildState, -}; - -static DISKNAME1: &str = "/tmp/disk1.img"; -static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; - -static DISKNAME2: &str = "/tmp/disk2.img"; -static BDEVNAME2: &str = "aio:///tmp/disk2.img?blk_size=512"; -pub mod common; -#[test] -fn reconfigure() { - common::mayastor_test_init(); - - // setup our test files - - let output = Command::new("truncate") - .args(&["-s", "64m", DISKNAME1]) - .output() - .expect("failed exec truncate"); - - assert_eq!(output.status.success(), true); - - let output = Command::new("truncate") - .args(&["-s", "64m", DISKNAME2]) - .output() - .expect("failed exec truncate"); - - assert_eq!(output.status.success(), true); - - let rc = MayastorEnvironment::new(MayastorCliArgs::default()) - .start(|| Reactor::block_on(works()).unwrap()) - .unwrap(); - - assert_eq!(rc, 0); - - let output = Command::new("rm") - .args(&["-rf", DISKNAME1, DISKNAME2]) - .output() - .expect("failed delete test file"); - - assert_eq!(output.status.success(), true); -} - -fn buf_compare(first: &[u8], second: &[u8]) { - for i in 0 .. first.len() { - assert_eq!(first[i], second[i]); - } -} - -async fn stats_compare(first: &Bdev, second: &Bdev) { - let stats1 = first.stats().await.unwrap(); - let stats2 = second.stats().await.unwrap(); - - assert_eq!(stats1.num_write_ops, stats2.num_write_ops); -} - -async fn works() { - let child1 = BDEVNAME1.to_string(); - let child2 = BDEVNAME2.to_string(); - - let children = vec![child1.clone(), child2.clone()]; - - nexus_create("hello", 512 * 131_072, None, &children) - .await - .unwrap(); - - let nexus = nexus_lookup("hello").unwrap(); - - // open the nexus in read write - let nd_bdev = Bdev::lookup_by_name("hello").expect("failed to lookup bdev"); - let nd = nd_bdev - .open(true) - .expect("failed open bdev") - .into_handle() - .unwrap(); - assert_eq!(nexus.status(), NexusStatus::Online); - // open the children in RO - - let cd1_bdev = - Bdev::lookup_by_name(BDEVNAME1).expect("failed to lookup bdev"); - let cd2_bdev = - Bdev::lookup_by_name(BDEVNAME2).expect("failed to lookup bdev"); - let cd1 = cd1_bdev - .open(false) - .expect("failed open bdev") - .into_handle() - .unwrap(); - let cd2 = cd2_bdev - .open(false) - .expect("failed open bdev") - .into_handle() - .unwrap(); - - let bdev1 = cd1.get_bdev(); - let bdev2 = cd2.get_bdev(); - - // write out a region of blocks to ensure a specific data pattern - let mut buf = nd.dma_malloc(4096).expect("failed to allocate buffer"); - buf.fill(0xff); - - // allocate buffer for child to read - let mut buf1 = cd1.dma_malloc(4096).unwrap(); - let mut buf2 = cd2.dma_malloc(4096).unwrap(); - - // write out 0xff to the nexus, all children should have the same - for i in 0 .. 10 { - nd.write_at(i * 4096, &buf).await.unwrap(); - } - - // verify that both children have the same write count - stats_compare(&bdev1, &bdev2).await; - - // compare all buffers byte for byte - for i in 0 .. 10 { - // account for the offset (in number of blocks) - cd1.read_at((i * 4096) + (10240 * 512), &mut buf1) - .await - .unwrap(); - cd2.read_at((i * 4096) + (10240 * 512), &mut buf2) - .await - .unwrap(); - buf_compare(buf1.as_slice(), buf2.as_slice()); - } - - // fill the nexus buffer with 0xF - buf.fill(0xF0); - - // turn one child offline - nexus.offline_child(&child2).await.unwrap(); - assert_eq!(nexus.status(), NexusStatus::Degraded); - - // write 0xF0 to the nexus - for i in 0 .. 10 { - nd.write_at(i * 4096, &buf).await.unwrap(); - } - - // verify that only child2 has the 0xF0 pattern set, child2 still has 0xff - for i in 0 .. 10 { - buf1.fill(0x0); - buf2.fill(0x0); - - cd1.read_at((i * 4096) + (10240 * 512), &mut buf1) - .await - .unwrap(); - cd2.read_at((i * 4096) + (10240 * 512), &mut buf2) - .await - .unwrap(); - - buf1.as_slice() - .iter() - .map(|b| assert_eq!(*b, 0xf0)) - .for_each(drop); - buf2.as_slice() - .iter() - .map(|b| assert_eq!(*b, 0xff)) - .for_each(drop); - } - - // bring back the offlined child - nexus.online_child(&child2).await.unwrap(); - assert_eq!(nexus.status(), NexusStatus::Degraded); - - common::wait_for_rebuild( - child2.to_string(), - RebuildState::Completed, - std::time::Duration::from_secs(20), - ) - .unwrap(); - - assert_eq!(nexus.status(), NexusStatus::Online); - - buf.fill(0xAA); - // write 0xAA to the nexus - for i in 0 .. 10 { - nd.write_at(i * 4096, &buf).await.unwrap(); - } - - // both children should have 0xAA set - - for i in 0 .. 10 { - buf1.fill(0x0); - buf2.fill(0x0); - cd1.read_at((i * 4096) + (10240 * 512), &mut buf1) - .await - .unwrap(); - cd2.read_at((i * 4096) + (10240 * 512), &mut buf2) - .await - .unwrap(); - buf1.as_slice() - .iter() - .map(|b| assert_eq!(*b, 0xAA)) - .for_each(drop); - buf2.as_slice() - .iter() - .map(|b| assert_eq!(*b, 0xAA)) - .for_each(drop); - } - - drop(cd1); - drop(cd2); - drop(nd); - - mayastor_env_stop(0); -} diff --git a/mayastor/tests/replica_timeout.rs b/mayastor/tests/replica_timeout.rs index 7071eeac8..50ddc2110 100644 --- a/mayastor/tests/replica_timeout.rs +++ b/mayastor/tests/replica_timeout.rs @@ -11,6 +11,7 @@ use tokio::time::Duration; pub mod common; static NXNAME: &str = "nexus"; +#[ignore] #[tokio::test] async fn replica_stop_cont() { let test = Builder::new() From 348d278651f35171188a1a8afe1c97f6e5d9e4d0 Mon Sep 17 00:00:00 2001 From: Tom Marsh Date: Thu, 5 Nov 2020 13:29:33 +0000 Subject: [PATCH 45/92] tests: Add scripts to bringup a test environment Add scripts that can bring up a cluster in vagrant, and deploy locally built mayastor images to it. --- mayastor-test/e2e/example-parallel.sh | 25 ++++ mayastor-test/e2e/example-simple.sh | 15 ++ mayastor-test/e2e/install/deploy/README.md | 1 + .../deploy/csi-daemonset.yaml.template | 128 ++++++++++++++++++ .../deploy/mayastor-daemonset.yaml.template | 84 ++++++++++++ .../deploy/moac-deployment.yaml.template | 84 ++++++++++++ mayastor-test/e2e/install/install_test.go | 36 +++-- mayastor-test/e2e/setup/README.md | 68 ++++++++++ mayastor-test/e2e/setup/bringup-cluster.sh | 89 ++++++++++++ mayastor-test/e2e/setup/test-registry.yaml | 32 +++++ scripts/release.sh | 126 ++++++++++++----- 11 files changed, 647 insertions(+), 41 deletions(-) create mode 100755 mayastor-test/e2e/example-parallel.sh create mode 100755 mayastor-test/e2e/example-simple.sh create mode 100644 mayastor-test/e2e/install/deploy/README.md create mode 100644 mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template create mode 100644 mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template create mode 100644 mayastor-test/e2e/install/deploy/moac-deployment.yaml.template create mode 100644 mayastor-test/e2e/setup/README.md create mode 100755 mayastor-test/e2e/setup/bringup-cluster.sh create mode 100644 mayastor-test/e2e/setup/test-registry.yaml diff --git a/mayastor-test/e2e/example-parallel.sh b/mayastor-test/e2e/example-parallel.sh new file mode 100755 index 000000000..87454c29c --- /dev/null +++ b/mayastor-test/e2e/example-parallel.sh @@ -0,0 +1,25 @@ +#! /usr/bin/env bash + +set -euxo pipefail +cd "$(dirname ${BASH_SOURCE[0]})" + +# Example of how to bringup the test cluster and build the images in parallel, +# then run the tests. + +pushd setup + ./bringup-cluster.sh & +popd +../../scripts/release.sh --skip-publish-to-dockerhub & + +for job in $(jobs -p); do + wait $job +done + +# Now that everything up and built, push the images... +../../scripts/release.sh --skip-publish-to-dockerhub --skip-build --private-registry "172.18.8.101:30291" + +# ... and install mayastor. +pushd install + go test +popd + diff --git a/mayastor-test/e2e/example-simple.sh b/mayastor-test/e2e/example-simple.sh new file mode 100755 index 000000000..21490acc7 --- /dev/null +++ b/mayastor-test/e2e/example-simple.sh @@ -0,0 +1,15 @@ +#! /usr/bin/env bash + +set -euxo pipefail +cd "$(dirname ${BASH_SOURCE[0]})" + +pushd setup + ./bringup-cluster.sh +popd + +../../scripts/release.sh --private-registry "172.18.8.101:30291" --skip-publish-to-dockerhub + +pushd install + go test +popd + diff --git a/mayastor-test/e2e/install/deploy/README.md b/mayastor-test/e2e/install/deploy/README.md new file mode 100644 index 000000000..8f1341769 --- /dev/null +++ b/mayastor-test/e2e/install/deploy/README.md @@ -0,0 +1 @@ +Contains the templates for applying the CI versions of our images diff --git a/mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template b/mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template new file mode 100644 index 000000000..71e436db7 --- /dev/null +++ b/mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template @@ -0,0 +1,128 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: mayastor + name: mayastor-csi + labels: + openebs/engine: mayastor +spec: + selector: + matchLabels: + app: mayastor-csi + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: mayastor-csi + spec: + hostNetwork: true + nodeSelector: + kubernetes.io/arch: amd64 + # NOTE: Each container must have mem/cpu limits defined in order to + # belong to Guaranteed QoS class, hence can never get evicted in case of + # pressure unless they exceed those limits. limits and requests must be + # the same. + containers: + - name: mayastor-csi + image: ${IMAGE_NAME} + imagePullPolicy: Always + # we need privileged because we mount filesystems and use mknod + securityContext: + privileged: true + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: RUST_BACKTRACE + value: "1" + args: + - "--csi-socket=/csi/csi.sock" + - "--node-name=$(MY_NODE_NAME)" + - "--grpc-endpoint=$(MY_POD_IP):10199" + - "-v" + volumeMounts: + - name: device + mountPath: /dev + - name: sys + mountPath: /sys + - name: run-udev + mountPath: /run/udev + - name: host-root + mountPath: /host + - name: plugin-dir + mountPath: /csi + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "100m" + memory: "50Mi" + - name: csi-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 + args: + - "--csi-address=/csi/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/mayastor.openebs.io/csi.sock" + lifecycle: + preStop: + exec: + # this is needed in order for CSI to detect that the plugin is gone + command: ["/bin/sh", "-c", "rm -f /registration/io.openebs.csi-mayastor-reg.sock /csi/csi.sock"] + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "100m" + memory: "50Mi" + # Mayastor node plugin gRPC server + ports: + - containerPort: 10199 + protocol: TCP + name: mayastor-node + volumes: + - name: device + hostPath: + path: /dev + type: Directory + - name: sys + hostPath: + path: /sys + type: Directory + - name: run-udev + hostPath: + path: /run/udev + type: Directory + - name: host-root + hostPath: + path: / + type: Directory + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/mayastor.openebs.io/ + type: DirectoryOrCreate + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory diff --git a/mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template b/mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template new file mode 100644 index 000000000..894d470f6 --- /dev/null +++ b/mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: mayastor + name: mayastor + labels: + openebs/engine: mayastor +spec: + selector: + matchLabels: + app: mayastor + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: mayastor + spec: + hostNetwork: true + # To resolve services from mayastor namespace + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + openebs.io/engine: mayastor + kubernetes.io/arch: amd64 + # NOTE: Each container must have mem/cpu limits defined in order to + # belong to Guaranteed QoS class, hence can never get evicted in case of + # pressure unless they exceed those limits. limits and requests must be + # the same. + initContainers: + - name: message-bus-probe + image: busybox:latest + command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] + containers: + - name: mayastor + image: ${IMAGE_NAME} + imagePullPolicy: Always + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + args: + - "-N$(MY_NODE_NAME)" + - "-g$(MY_POD_IP)" + - "-nnats" + securityContext: + privileged: true + volumeMounts: + - name: device + mountPath: /dev + - name: dshm + mountPath: /dev/shm + resources: + limits: + cpu: "1" + memory: "500Mi" + hugepages-2Mi: "1Gi" + requests: + cpu: "1" + memory: "500Mi" + hugepages-2Mi: "1Gi" + ports: + - containerPort: 10124 + protocol: TCP + name: mayastor + volumes: + - name: device + hostPath: + path: /dev + type: Directory + - name: dshm + emptyDir: + medium: Memory + sizeLimit: "1Gi" + - name: hugepage + emptyDir: + medium: HugePages diff --git a/mayastor-test/e2e/install/deploy/moac-deployment.yaml.template b/mayastor-test/e2e/install/deploy/moac-deployment.yaml.template new file mode 100644 index 000000000..04592dd35 --- /dev/null +++ b/mayastor-test/e2e/install/deploy/moac-deployment.yaml.template @@ -0,0 +1,84 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: moac + namespace: mayastor +spec: + replicas: 1 + selector: + matchLabels: + app: moac + template: + metadata: + labels: + app: moac + spec: + serviceAccount: moac + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.6.0 + args: + - "--v=2" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v2.2.0 + args: + - "--v=2" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: moac + image: ${IMAGE_NAME} + imagePullPolicy: Always + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--namespace=$(MY_POD_NAMESPACE)" + - "--port=4000" + - "--message-bus=nats" + - "-v" + env: + - name: CSI_ENDPOINT + value: /var/lib/csi/sockets/pluginproxy/csi.sock + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + ports: + - containerPort: 4000 + protocol: TCP + name: "rest-api" + volumes: + - name: socket-dir + emptyDir: +--- +kind: Service +apiVersion: v1 +metadata: + name: moac + namespace: mayastor +spec: + selector: + app: moac + ports: + - protocol: TCP + port: 4000 + targetPort: 4000 diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go index 5deca5729..aded431d7 100644 --- a/mayastor-test/e2e/install/install_test.go +++ b/mayastor-test/e2e/install/install_test.go @@ -42,6 +42,25 @@ func applyDeployYaml(filename string) { Expect(err).ToNot(HaveOccurred()) } +// Encapsulate the logic to find where the templated yamls are +func getTemplateYamlDir() string { + _, filename, _, _ := runtime.Caller(0) + return path.Clean(filename + "/../deploy") +} + +func makeImageName(registryaddress string, registryport string, imagename string, imageversion string) string { + return registryaddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion +} + +func applyTemplatedYaml(filename string, imagename string) { + fullimagename := makeImageName("172.18.8.101", "30291", imagename, "ci") + bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl apply -f -" + cmd := exec.Command("bash", "-c", bashcmd) + cmd.Dir = getTemplateYamlDir() + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + // We expect this to fail a few times before it succeeds, // so no throwing errors from here. func mayastorReadyPodCount() int { @@ -50,7 +69,6 @@ func mayastorReadyPodCount() int { fmt.Println("Failed to get mayastor DaemonSet") return -1 } - return int(mayastorDaemonSet.Status.CurrentNumberScheduled) } @@ -62,16 +80,16 @@ func installMayastor() { applyDeployYaml("moac-rbac.yaml") applyDeployYaml("mayastorpoolcrd.yaml") applyDeployYaml("nats-deployment.yaml") - applyDeployYaml("csi-daemonset.yaml") - applyDeployYaml("moac-deployment.yaml") - applyDeployYaml("mayastor-daemonset.yaml") + applyTemplatedYaml("csi-daemonset.yaml.template", "mayastor-csi") + applyTemplatedYaml("moac-deployment.yaml.template", "moac") + applyTemplatedYaml("mayastor-daemonset.yaml.template", "mayastor") // Given the yamls and the environment described in the test readme, - // we expect mayastor to be running on exactly 2 nodes. - Eventually(mayastorReadyPodCount(), - "60s", // timeout - "1s", // polling interval - ).Should(Equal(2)) + // we expect mayastor to be running on exactly 3 nodes. + Eventually(mayastorReadyPodCount, + "120s", // timeout + "1s", // polling interval + ).Should(Equal(3)) } func TestInstallSuite(t *testing.T) { diff --git a/mayastor-test/e2e/setup/README.md b/mayastor-test/e2e/setup/README.md new file mode 100644 index 000000000..e5cb7cab2 --- /dev/null +++ b/mayastor-test/e2e/setup/README.md @@ -0,0 +1,68 @@ + +This directory contains the scripts to bring up and environment ready for testing +(for example by CI). + + +## Pre-requisites + +### Step 1 + +Install libvirt plugin +``` +# From https://github.com/vagrant-libvirt/vagrant-libvirt#installation +# apt-get build-dep vagrant ruby-libvirt # Usually not necessary +sudo apt-get install -y qemu libvirt-daemon-system libvirt-clients ebtables dnsmasq-base ruby-libvirt +sudo apt-get install -y libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev +vagrant plugin install vagrant-libvirt +``` + +### Step 2 + +Clone kubespray (tested at v2.14.2): +``` +git clone --branch v2.14.2 git@github.com:kubernetes-sigs/kubespray.git +``` + +NB You may need to update $KUBESPRAY_REPO in bringup-cluster.sh to +point to the location where you cloned kubespray. + +### Step 3 + +Install ansible (tested at v2.9.6). +https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html + +### Step 4 (Optional) + +If you're going to want to push images from a host to the insecure registry in the cluster, +You'll need to add the following to your `/etc/docker/daemon.json`: +``` +{ + "insecure-registries" : ["172.18.8.101:30291"] +} +``` +The IP is static and set by the Vagrantfile, so shouldn't require changing between deployments. +The port is set in test-registry.yaml, and also shouldn't vary. + +## Bringing up the cluster + +``` +./bringup-cluster.sh +``` + + +## Cleanup + +From the kubespray repo: +``` +vagrant destroy -f +``` + +## Cluster Spec + +The created cluster is based on the defaults provided by the kubespray repo, with +some values are overwritten in by the config.rb file created by bringup-cluster.sh. + +In summary, the cluster consists of 3 nodes (1 master, 2 workers). +All 3 are labelled for use by mayasotr (ie including the master), to try +to still get 3 copies of mayastor but reduce resource usage a little. +Each node has 2 2G data disks (sda, sdb) available, as well as 4 CPUs and 6GiB of RAM. diff --git a/mayastor-test/e2e/setup/bringup-cluster.sh b/mayastor-test/e2e/setup/bringup-cluster.sh new file mode 100755 index 000000000..9e92d521d --- /dev/null +++ b/mayastor-test/e2e/setup/bringup-cluster.sh @@ -0,0 +1,89 @@ +#! /usr/bin/env bash + +# Script to bringup a 3 node k8s cluster, ready for mayastor install. +# To cleanup after this script: +# cd $KUBESPRAY_REPO +# vagrant destroy -f + +set -euxo pipefail +cd "$(dirname ${BASH_SOURCE[0]})" + +# Config variables +KUBESPRAY_REPO="$HOME/work/kubespray" +KUBECONFIG_TO_USE="$HOME/.kube/config" + +# Globals +MASTER_NODE_NAME="k8s-1" # Set in the Vagrantfile +MASTER_NODE_IP="172.18.8.101" # Set in the Vagrantfile +REGISTRY_PORT="30291" +REGISTRY_ENDPOINT="${MASTER_NODE_IP}:${REGISTRY_PORT}" + +prepare_kubespray_repo() { + pushd $KUBESPRAY_REPO + mkdir -p vagrant + cat << EOF > vagrant/config.rb +# DO NOT EDIT. This file is autogenerated. +\$vm_memory = 6144 +\$vm_cpus = 4 +\$kube_node_instances_with_disks = true +\$kube_node_instances_with_disks_size = "2G" +\$kube_master_instances = 1 +EOF + popd +} + +bringup_cluster() { + #vagrant plugin install vagrant-libvirt # TODO Put this in the nix environment + pushd $KUBESPRAY_REPO + vagrant up --provider=libvirt + vagrant ssh $MASTER_NODE_NAME -c "sudo cat /etc/kubernetes/admin.conf" > $KUBECONFIG_TO_USE + kubectl get nodes # Debug + popd + kubectl apply -f test-registry.yaml +} + +# Runs in a timeout, so we need to pass in $MASTER_NODE_IP and $REGISTRY_PORT +wait_for_ready() { + while ! kubectl get nodes; do + sleep 1 + done + + # Wait for the registry to be accessible + while ! nc -z $1 $2; do + sleep 1 + done +} + +# TODO We should consider if we can do this in ansible. +setup_one_node() { + local node_name=$1 + pushd $KUBESPRAY_REPO + vagrant ssh $node_name -c "echo 1024 | sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" + vagrant ssh $node_name -c "echo \"vm.nr_hugepages = 1024\" | sudo tee -a /etc/sysctl.d/10-kubeadm.conf" + vagrant ssh $node_name -c "sudo systemctl restart kubelet" + + vagrant ssh $node_name -c "echo \"{\\\"insecure-registries\\\" : [\\\"${REGISTRY_ENDPOINT}\\\"]}\" | sudo tee /etc/docker/daemon.json" + vagrant ssh $node_name -c "sudo service docker restart" + + # Make sure everything's back after those restarts... + export -f wait_for_ready + timeout 180s bash -c "wait_for_ready $MASTER_NODE_IP $REGISTRY_PORT" + + kubectl label node $node_name openebs.io/engine=mayastor + popd +} + +# Parallel setup of each node. +setup_all_nodes() { + NODES="$MASTER_NODE_NAME k8s-2 k8s-3" + for node in $NODES; do + setup_one_node $node & + done + for job in $(jobs -p); do + wait $job + done +} + +prepare_kubespray_repo # Don't really need to run this everytime... +bringup_cluster +setup_all_nodes diff --git a/mayastor-test/e2e/setup/test-registry.yaml b/mayastor-test/e2e/setup/test-registry.yaml new file mode 100644 index 000000000..c4ab7eae0 --- /dev/null +++ b/mayastor-test/e2e/setup/test-registry.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: test-registry +spec: + selector: + app: test-registry + type: NodePort + ports: + - name: registry + protocol: TCP + port: 5000 + nodePort: 30291 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-registry +spec: + replicas: 1 + selector: + matchLabels: + app: test-registry + template: + metadata: + labels: + app: test-registry + spec: + containers: + - name: test-registry + image: registry:2 + \ No newline at end of file diff --git a/scripts/release.sh b/scripts/release.sh index 71519630c..ac479b4fd 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -2,7 +2,8 @@ # Build and upload mayastor docker images to dockerhub repository. # Use --dry-run to just see what would happen. -# The script assumes that a user is logged on to dockerhub. +# The script assumes that a user is logged on to dockerhub for public images, +# or has insecure registry access setup for CI. set -euo pipefail @@ -18,6 +19,22 @@ get_tag() { echo -n $vers } +help() { + cat < Push the built images to the provided registry. + --skip-build Don't perform nix-build. + --skip-publish-to-dockerhub Don't publish to Dockerhub. + +Examples: + $(basename $0) --private-registry 127.0.0.1:5000 +EOF +} + DOCKER="docker" NIX_BUILD="nix-build" RM="rm" @@ -26,6 +43,9 @@ IMAGES="mayastor mayastor-csi moac" TAG=`get_tag` BRANCH=`git rev-parse --abbrev-ref HEAD` UPLOAD= +SKIP_PUSH_TO_DOCKERHUB= +PRIVATE_REGISTRY= +SKIP_BUILD= # Check if all needed tools are installed curl --version >/dev/null @@ -38,29 +58,60 @@ if [ $? -ne 0 ]; then echo "Missing docker - install it and put it to your PATH" exit 1 fi -if [ "$#" -gt 0 ]; then - if [ "$1" == "--dry-run" ]; then - DOCKER="echo $DOCKER" - NIX_BUILD="echo $NIX_BUILD" - RM="echo $RM" - else - echo "Usage: release.sh [--dry-run]" - fi -fi + +# Parse arguments +while [ "$#" -gt 0 ]; do + case $1 in + -d|--dry-run) + DOCKER="echo $DOCKER" + NIX_BUILD="echo $NIX_BUILD" + RM="echo $RM" + shift + ;; + -h|--help) + help + exit 0 + shift + ;; + --private-registry) + shift + PRIVATE_REGISTRY=$1 + shift + ;; + --skip-build) + SKIP_BUILD="yes" + shift + ;; + --skip-publish-to-dockerhub) + SKIP_PUSH_TO_DOCKERHUB="yes" + shift + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done cd $SCRIPTDIR/.. -# Build all images first for name in $IMAGES; do - archive=${name}-image - image=mayadata/$name - if docker_tag_exists $image $TAG; then - echo "Skipping $image:$TAG that already exists" + image="mayadata/${name}" + if [ -z $SKIP_BUILD ]; then + archive=${name}-image + if docker_tag_exists $image $TAG; then + echo "Skipping $image:$TAG that already exists" + else + echo "Building $image:$TAG ..." + $NIX_BUILD --out-link $archive -A images.$archive + $DOCKER load -i $archive + $RM $archive + UPLOAD="$UPLOAD $image" + fi else - echo "Building $image:$TAG ..." - $NIX_BUILD --out-link $archive --no-build-output -A images.$archive - $DOCKER load -i $archive - $RM $archive + # If we're skipping the build, then we just want to upload + # the images we already have locally. + # We should do this for all images. UPLOAD="$UPLOAD $image" fi done @@ -68,21 +119,32 @@ done # Nothing to upload? [ -z "$UPLOAD" ] && exit 0 -# Upload them -for img in $UPLOAD; do - echo "Uploading $img:$TAG to registry ..." - $DOCKER push $img:$TAG -done - -# Create aliases -if [ "$BRANCH" == "develop" ]; then +if [ -z $SKIP_PUSH_TO_DOCKERHUB ]; then + # Upload them for img in $UPLOAD; do - $DOCKER tag $img:$TAG $img:develop - $DOCKER push $img:develop + echo "Uploading $img:$TAG to registry ..." + $DOCKER push $img:$TAG done -elif [ "$BRANCH" == "master" ]; then + + # Create aliases + if [ "$BRANCH" == "develop" ]; then + for img in $UPLOAD; do + $DOCKER tag $img:$TAG $img:develop + $DOCKER push $img:develop + done + elif [ "$BRANCH" == "master" ]; then + for img in $UPLOAD; do + $DOCKER tag $img:$TAG $img:latest + $DOCKER push $img:latest + done + fi +fi + +# If a private registry was specified (ie for ci) +# then we push to it here. +if [ ! -z $PRIVATE_REGISTRY ]; then for img in $UPLOAD; do - $DOCKER tag $img:$TAG $img:latest - $DOCKER push $img:latest + $DOCKER tag $img:$TAG ${PRIVATE_REGISTRY}/$img:ci + $DOCKER push ${PRIVATE_REGISTRY}/$img:ci done fi From 2fe1c70f08b14fc14dbcd64df4e2ea28e0d2cbd6 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Wed, 11 Nov 2020 10:56:14 +0000 Subject: [PATCH 46/92] Cancel all rebuild jobs on nexus destruction When destroying a nexus all rebuild jobs should be cancelled before closing the children. This ensures that the close on the children can complete properly as any handles held by the rebuild job will no longer exist. Previously, destroying the nexus resulted in the rebuild jobs being stopped. However, this just issued a stop request but did not wait for the rebuild job to actually stop. When removing the nexus child, set the bdev to None before calling reconfigure. This should prevent multiple calls to remove for the same child. Note: This change attempts to resolve the issues seen on CI where rebuild test cases have been observed to hang. --- mayastor/src/bdev/nexus/nexus_bdev.rs | 4 +++- mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs | 7 ++----- mayastor/src/bdev/nexus/nexus_child.rs | 6 +++--- mayastor/tests/nexus_rebuild.rs | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 8345991fc..09c377c64 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -555,8 +555,10 @@ impl Nexus { // gone self.bdev.unshare().await.unwrap(); + // wait for all rebuild jobs to be cancelled before proceeding with the + // destruction of the nexus for child in self.children.iter() { - self.stop_rebuild(&child.name).await.ok(); + self.cancel_child_rebuild_jobs(&child.name).await; } for child in self.children.iter_mut() { diff --git a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs index c0208d206..9cb00e72b 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -171,10 +171,7 @@ impl Nexus { /// Cancels all rebuilds jobs associated with the child. /// Returns a list of rebuilding children whose rebuild job was cancelled. - pub async fn cancel_child_rebuild_jobs( - &mut self, - name: &str, - ) -> Vec { + pub async fn cancel_child_rebuild_jobs(&self, name: &str) -> Vec { let mut src_jobs = self.get_rebuild_job_src(name); let mut terminated_jobs = Vec::new(); let mut rebuilding_children = Vec::new(); @@ -210,7 +207,7 @@ impl Nexus { /// Return rebuild job associated with the src child name. /// Return error if no rebuild job associated with it. fn get_rebuild_job_src<'a>( - &mut self, + &self, name: &'a str, ) -> Vec<&'a mut RebuildJob> { let jobs = RebuildJob::lookup_src(&name); diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 7b9b0b7ab..70469c216 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -346,6 +346,9 @@ impl NexusChild { pub(crate) fn remove(&mut self) { info!("Removing child {}", self.name); + // The bdev is being removed, so ensure we don't use it again. + self.bdev = None; + // Remove the child from the I/O path. self.set_state(ChildState::Closed); let nexus_name = self.parent.clone(); @@ -356,9 +359,6 @@ impl NexusChild { } }); - // The bdev is being removed, so ensure we don't use it again. - self.bdev = None; - // Dropping the last descriptor results in the bdev being removed. // This must be performed in this function. let desc = self.desc.take(); diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index a3f5f880c..f2bb040bb 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -122,7 +122,7 @@ fn rebuild_test_add() { .await .expect("rebuild not expected to be present"); - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); + nexus.destroy().await.unwrap(); }); test_fini(); From 55fd174c4059218c39ae65144e347e482c6ad05c Mon Sep 17 00:00:00 2001 From: Mikhail Tcymbaliuk Date: Wed, 11 Nov 2020 12:14:56 +0100 Subject: [PATCH 47/92] CAS-512 Refactor mount_fs testcases to use NVMe protocol instead of NBD --- Cargo.lock | 1 + mayastor/Cargo.toml | 1 + mayastor/tests/mount_fs.rs | 291 +++++++++++++++------------------ nix/pkgs/mayastor/default.nix | 2 +- nvmeadm/src/nvme_namespaces.rs | 2 +- 5 files changed, 138 insertions(+), 159 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5276629d4..ebf8c8c15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1742,6 +1742,7 @@ dependencies = [ "mbus_api", "nats", "nix 0.16.1", + "nvmeadm", "once_cell", "pin-utils", "proc-mounts", diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index e46127b7f..9b65fcf89 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -81,6 +81,7 @@ dns-lookup = "1.0.4" ipnetwork = "0.17.0" bollard = "0.8.0" mbus_api = { path = "../mbus-api" } +nvmeadm = {path = "../nvmeadm", version = "0.1.0"} [dependencies.rpc] path = "../rpc" diff --git a/mayastor/tests/mount_fs.rs b/mayastor/tests/mount_fs.rs index 7d0e1b657..44837d6dd 100644 --- a/mayastor/tests/mount_fs.rs +++ b/mayastor/tests/mount_fs.rs @@ -1,15 +1,16 @@ -use crossbeam::channel::unbounded; +use once_cell::sync::OnceCell; +use std::convert::TryFrom; + +extern crate nvmeadm; use mayastor::{ bdev::{nexus_create, nexus_lookup}, - core::{ - mayastor_env_stop, - MayastorCliArgs, - MayastorEnvironment, - Mthread, - Reactor, - }, + core::MayastorCliArgs, }; + +pub mod common; +use common::compose::MayastorTest; + use rpc::mayastor::ShareProtocolNexus; static DISKNAME1: &str = "/tmp/disk1.img"; @@ -18,175 +19,151 @@ static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; static DISKNAME2: &str = "/tmp/disk2.img"; static BDEVNAME2: &str = "aio:///tmp/disk2.img?blk_size=512"; -pub mod common; +static MAYASTOR: OnceCell = OnceCell::new(); -#[test] -fn mount_fs() { - // test xfs as well as ext4 - async fn mirror_fs_test<'a>(fstype: String) { - create_nexus().await; - let nexus = nexus_lookup("nexus").unwrap(); +macro_rules! prepare_storage { + () => { + common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]); + common::truncate_file(DISKNAME1, 64 * 1024); + common::truncate_file(DISKNAME2, 64 * 1024); + }; +} - //TODO: repeat this test for NVMF and ISCSI - let device = common::device_path_from_uri( +fn get_ms() -> &'static MayastorTest<'static> { + let instance = + MAYASTOR.get_or_init(|| MayastorTest::new(MayastorCliArgs::default())); + &instance +} + +async fn create_connected_nvmf_nexus( + ms: &'static MayastorTest<'static>, +) -> (nvmeadm::NvmeTarget, String) { + let uri = ms + .spawn(async { + create_nexus().await; + let nexus = nexus_lookup("nexus").unwrap(); nexus - .share(ShareProtocolNexus::NexusNbd, None) + .share(ShareProtocolNexus::NexusNvmf, None) .await - .unwrap(), - ); - - // create an XFS filesystem on the nexus device - { - let (s, r) = unbounded(); - let mkfs_dev = device.clone(); - Mthread::spawn_unaffinitized(move || { - if !common::mkfs(&mkfs_dev, &fstype) { - s.send(format!( - "Failed to format {} with {}", - mkfs_dev, fstype - )) - .unwrap(); - } else { - s.send("".to_string()).unwrap(); - } - }); - - assert_reactor_poll!(r, ""); - } - - // mount the device, create a file and return the md5 of that file - { - let (s, r) = unbounded(); - Mthread::spawn_unaffinitized(move || { - s.send(match common::mount_and_write_file(&device) { - Ok(_) => "".to_owned(), - Err(err) => err, - }) - }); - - assert_reactor_poll!(r, ""); - } - // destroy the share and the nexus - nexus.unshare_nexus().await.unwrap(); - nexus.destroy().await.unwrap(); + .unwrap() + }) + .await; - // create a split nexus, i.e two nexus devices which each one leg of the - // mirror - create_nexus_splitted().await; + // Create and connect NVMF target. + let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); + let devices = target.connect().unwrap(); - let left = nexus_lookup("left").unwrap(); - let right = nexus_lookup("right").unwrap(); + assert_eq!(devices.len(), 1); + (target, devices[0].path.to_string()) +} - // share both nexuses - // TODO: repeat this test for NVMF and ISCSI, and permutations? - let left_device = common::device_path_from_uri( - left.share(ShareProtocolNexus::NexusNbd, None) - .await - .unwrap(), - ); +async fn mount_test(ms: &'static MayastorTest<'static>, fstype: &str) { + let (target, nvmf_dev) = create_connected_nvmf_nexus(ms).await; - let right_device = common::device_path_from_uri( - right - .share(ShareProtocolNexus::NexusNbd, None) - .await - .unwrap(), - ); - - let (s, r) = unbounded(); - let s1 = s.clone(); - Mthread::spawn_unaffinitized(move || { - s1.send(common::mount_and_get_md5(&left_device)) - }); - let md5_left; - reactor_poll!(r, md5_left); - assert!(md5_left.is_ok()); - - left.unshare_nexus().await.unwrap(); - left.destroy().await.unwrap(); - - let s1 = s.clone(); - // read the md5 of the right side of the mirror - Mthread::spawn_unaffinitized(move || { - s1.send(common::mount_and_get_md5(&right_device)) - }); - - let md5_right; - reactor_poll!(r, md5_right); - assert!(md5_right.is_ok()); - right.unshare_nexus().await.unwrap(); - right.destroy().await.unwrap(); - assert_eq!(md5_left.unwrap(), md5_right.unwrap()); + // Create a filesystem with test file. + assert!(common::mkfs(&nvmf_dev, &fstype)); + let md5sum = match common::mount_and_write_file(&nvmf_dev) { + Ok(r) => r, + Err(e) => panic!("Failed to create test file: {}", e), + }; + + // Disconnect NVMF target, then unshare and destroy nexus. + target.disconnect().unwrap(); + + ms.spawn(async { + let nexus = nexus_lookup("nexus").unwrap(); + nexus.unshare_nexus().await.unwrap(); + nexus.destroy().await.unwrap(); + }) + .await; + + /* Create 2 single-disk nexuses for every existing disk (already) + * populated with test data file, and check overall data consistency + * by accessing each disk separately via its own nexus. + */ + ms.spawn(async { + create_nexus_splitted().await; + }) + .await; + + for n in ["left", "right"].iter() { + let uri = ms + .spawn(async move { + let nexus = nexus_lookup(n).unwrap(); + nexus + .share(ShareProtocolNexus::NexusNvmf, None) + .await + .unwrap() + }) + .await; + + // Create and connect NVMF target. + let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); + let devices = target.connect().unwrap(); + + assert_eq!(devices.len(), 1); + let nvmf_dev = &devices[0].path; + let md5 = common::mount_and_get_md5(&nvmf_dev).unwrap(); + + assert_eq!(md5, md5sum); + + // Cleanup target. + target.disconnect().unwrap(); + ms.spawn(async move { + let nexus = nexus_lookup(n).unwrap(); + nexus.unshare_nexus().await.unwrap(); + nexus.destroy().await.unwrap(); + }) + .await; } +} - test_init!(); +#[tokio::test] +async fn mount_fs_mirror() { + let ms = get_ms(); - common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]); - common::truncate_file(DISKNAME1, 64 * 1024); - common::truncate_file(DISKNAME2, 64 * 1024); + prepare_storage!(); - Reactor::block_on(async { - mirror_fs_test("xfs".into()).await; - mirror_fs_test("ext4".into()).await; - }); + mount_test(ms, "xfs").await; + mount_test(ms, "ext4").await; } -#[test] -fn mount_fs_1() { - test_init!(); - Reactor::block_on(async { - let (s, r) = unbounded::(); - create_nexus().await; - let nexus = nexus_lookup("nexus").unwrap(); +#[tokio::test] +async fn mount_fs_multiple() { + let ms = get_ms(); - //TODO: repeat this test for NVMF and ISCSI - let device = common::device_path_from_uri( - nexus - .share(ShareProtocolNexus::NexusNbd, None) - .await - .unwrap(), - ); - - Mthread::spawn_unaffinitized(move || { - for _i in 0 .. 10 { - if let Err(err) = common::mount_umount(&device) { - return s.send(err); - } - } - s.send("".into()) - }); - - assert_reactor_poll!(r, ""); + prepare_storage!(); + let (target, nvmf_dev) = create_connected_nvmf_nexus(ms).await; + + for _i in 0 .. 10 { + common::mount_umount(&nvmf_dev).unwrap(); + } + + target.disconnect().unwrap(); + ms.spawn(async move { + let nexus = nexus_lookup("nexus").unwrap(); + nexus.unshare_nexus().await.unwrap(); nexus.destroy().await.unwrap(); - }); + }) + .await; } -#[test] -fn mount_fs_2() { - test_init!(); - Reactor::block_on(async { - create_nexus().await; - let nexus = nexus_lookup("nexus").unwrap(); +#[tokio::test] +async fn mount_fn_fio() { + let ms = get_ms(); - //TODO: repeat this test for NVMF and ISCSI - let device = common::device_path_from_uri( - nexus - .share(ShareProtocolNexus::NexusNbd, None) - .await - .unwrap(), - ); - let (s, r) = unbounded::(); - - Mthread::spawn_unaffinitized(move || { - s.send(match common::fio_run_verify(&device) { - Ok(_) => "".to_owned(), - Err(err) => err, - }) - }); - assert_reactor_poll!(r, ""); - nexus.destroy().await.unwrap(); - }); + prepare_storage!(); + let (target, nvmf_dev) = create_connected_nvmf_nexus(ms).await; + + common::fio_run_verify(&nvmf_dev).unwrap(); - mayastor_env_stop(0); + target.disconnect().unwrap(); + ms.spawn(async move { + let nexus = nexus_lookup("nexus").unwrap(); + nexus.unshare_nexus().await.unwrap(); + nexus.destroy().await.unwrap(); + }) + .await; } async fn create_nexus() { diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index fc9be8337..ff558e493 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -39,7 +39,7 @@ let buildProps = rec { name = "mayastor"; # cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "07d3yvl43pqw5iwjpb1rd9b34s572m8w4p89nmqd68pc0kmpq4d2"; + cargoSha256 = "02dfgdi1h0g4nydwg1760wqx9i7f3g5bpxj0wbq81kvmx44ic6ii"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/nvmeadm/src/nvme_namespaces.rs b/nvmeadm/src/nvme_namespaces.rs index b50f449f7..c6f5450cd 100644 --- a/nvmeadm/src/nvme_namespaces.rs +++ b/nvmeadm/src/nvme_namespaces.rs @@ -10,7 +10,7 @@ use std::{os::unix::fs::FileTypeExt, path::Path}; #[derive(Debug, Default)] pub struct NvmeDevice { /// device path of the device - path: String, + pub path: String, /// the device model defined by the manufacturer model: String, /// serial number of the device From 0bcecc2ecd8cdc50ab6f36f5cc5246d2698295b1 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Fri, 13 Nov 2020 18:33:58 +0000 Subject: [PATCH 48/92] cargo tests: Limit debug logging to mayastor component This reduces the copious logging from the h2 component. Parse the internal logging initialisation string the same way as the RUST_LOG environment variable. --- mayastor/src/logger.rs | 17 +++++++---------- mayastor/tests/common/mod.rs | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index 8840b07fb..d80ec8321 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -1,4 +1,4 @@ -use std::{ffi::CStr, os::raw::c_char, str::FromStr}; +use std::{ffi::CStr, os::raw::c_char}; use tracing_log::format_trace; use tracing_subscriber::{ @@ -78,14 +78,11 @@ pub fn init(level: &str) { .with_timer(CustomTime("%FT%T%.9f%Z")) .with_span_events(FmtSpan::FULL); - if let Ok(filter) = EnvFilter::try_from_default_env() { - let subscriber = builder.with_env_filter(filter).finish(); - tracing::subscriber::set_global_default(subscriber) + let subscriber = if let Ok(filter) = EnvFilter::try_from_default_env() { + builder.with_env_filter(filter).finish() } else { - let max_level = - tracing::Level::from_str(level).unwrap_or(tracing::Level::INFO); - let subscriber = builder.with_max_level(max_level).finish(); - tracing::subscriber::set_global_default(subscriber) - } - .expect("failed to set default subscriber"); + builder.with_env_filter(level).finish() + }; + tracing::subscriber::set_global_default(subscriber) + .expect("failed to set default subscriber"); } diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index 5855f9697..cf581ad86 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -134,7 +134,7 @@ pub fn mayastor_test_init() { panic!("binary: {} not present in path", binary); } }); - logger::init("DEBUG"); + logger::init("mayastor=DEBUG"); mayastor::CPS_INIT!(); } From 7998b47deb1f936e7092c7c6b5551d62c1dd5a9a Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 12 Nov 2020 14:35:40 +0000 Subject: [PATCH 49/92] e2e vagrant deployment usability fixes - Enable and start iscsi daemeon service in the nodes - Use ubuntu2004 and load nvme kernel modules - Add creation of pools as part of the deployment --- mayastor-test/e2e/install/deploy/pool.yaml | 9 ++++++ mayastor-test/e2e/install/install_test.go | 33 +++++++++++++++++++++- mayastor-test/e2e/setup/bringup-cluster.sh | 6 ++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 mayastor-test/e2e/install/deploy/pool.yaml diff --git a/mayastor-test/e2e/install/deploy/pool.yaml b/mayastor-test/e2e/install/deploy/pool.yaml new file mode 100644 index 000000000..3cdfd52f6 --- /dev/null +++ b/mayastor-test/e2e/install/deploy/pool.yaml @@ -0,0 +1,9 @@ +apiVersion: "openebs.io/v1alpha1" +kind: MayastorPool +metadata: + name: pool-${NODE_NAME} + #generateName: pool-${NODE_NAME} + namespace: mayastor +spec: + node: ${NODE_NAME} + disks: ["/dev/sda"] diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go index aded431d7..925802267 100644 --- a/mayastor-test/e2e/install/install_test.go +++ b/mayastor-test/e2e/install/install_test.go @@ -3,6 +3,7 @@ package basic_test import ( "context" "fmt" + corev1 "k8s.io/api/core/v1" "os/exec" "path" "runtime" @@ -69,7 +70,16 @@ func mayastorReadyPodCount() int { fmt.Println("Failed to get mayastor DaemonSet") return -1 } - return int(mayastorDaemonSet.Status.CurrentNumberScheduled) + return int(mayastorDaemonSet.Status.NumberAvailable) +} + +func moacReadyPodCount() int { + var moacDeployment appsv1.Deployment + if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "moac", Namespace: "mayastor"}, &moacDeployment) != nil { + fmt.Println("Failed to get MOAC deployment") + return -1 + } + return int(moacDeployment.Status.AvailableReplicas) } // Install mayastor on the cluster under test. @@ -77,6 +87,7 @@ func mayastorReadyPodCount() int { // objects, so that we can verfiy the local deploy yamls are correct. func installMayastor() { applyDeployYaml("namespace.yaml") + applyDeployYaml("storage-class.yaml") applyDeployYaml("moac-rbac.yaml") applyDeployYaml("mayastorpoolcrd.yaml") applyDeployYaml("nats-deployment.yaml") @@ -90,6 +101,26 @@ func installMayastor() { "120s", // timeout "1s", // polling interval ).Should(Equal(3)) + + Eventually(moacReadyPodCount(), + "60s", // timeout + "1s", // polling interval + ).Should(Equal(1)) + + // Now create pools on all nodes. + // Note the disk for use on each node has been set in deploy/pool.yaml + nodeList := corev1.NodeList{} + if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { + fmt.Println("Failed to list Nodes, pools not created") + return + } + for _, k8node := range nodeList.Items { + bashcmd := "NODE_NAME=" + k8node.Name + " envsubst < " + "pool.yaml" + " | kubectl apply -f -" + cmd := exec.Command("bash", "-c", bashcmd) + cmd.Dir = getTemplateYamlDir() + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + } } func TestInstallSuite(t *testing.T) { diff --git a/mayastor-test/e2e/setup/bringup-cluster.sh b/mayastor-test/e2e/setup/bringup-cluster.sh index 9e92d521d..8215b54f9 100755 --- a/mayastor-test/e2e/setup/bringup-cluster.sh +++ b/mayastor-test/e2e/setup/bringup-cluster.sh @@ -27,6 +27,9 @@ prepare_kubespray_repo() { \$vm_cpus = 4 \$kube_node_instances_with_disks = true \$kube_node_instances_with_disks_size = "2G" +\$kube_node_instances_with_disks_number = 1 +\$os = "ubuntu2004" +\$etcd_instances = 1 \$kube_master_instances = 1 EOF popd @@ -64,6 +67,9 @@ setup_one_node() { vagrant ssh $node_name -c "echo \"{\\\"insecure-registries\\\" : [\\\"${REGISTRY_ENDPOINT}\\\"]}\" | sudo tee /etc/docker/daemon.json" vagrant ssh $node_name -c "sudo service docker restart" + vagrant ssh $node_name -c "sudo systemctl enable iscsid" + vagrant ssh $node_name -c "sudo systemctl start iscsid" + vagrant ssh $node_name -c "sudo modprobe nvme-tcp nvmet" # Make sure everything's back after those restarts... export -f wait_for_ready From cb36e206a3e502ec4e621ab08c70be4fa7a4d6e2 Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Mon, 16 Nov 2020 14:35:25 +0100 Subject: [PATCH 50/92] Remove convoluted when{} statements and add guard for `develop` from Jenkinsfile --- Jenkinsfile | 77 +++++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6e1e378f9..84f400418 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,5 +1,13 @@ #!/usr/bin/env groovy +// Will ABORT current job for cases when we don't want to build +if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && + BRANCH_NAME == "develop") { + print "INFO: Branch Indexing, aborting job." + currentBuild.result = 'ABORTED' + return +} + // Update status of a commit in github def updateGithubCommitStatus(commit, msg, state) { step([ @@ -20,10 +28,23 @@ def updateGithubCommitStatus(commit, msg, state) { ]) } +// Searches previous builds to find first non aborted one +def getLastNonAbortedBuild(build) { + if (build == null) { + return null; + } + + if(build.result.toString().equals("ABORTED")) { + return getLastNonAbortedBuild(build.getPreviousBuild()); + } else { + return build; + } +} + // Send out a slack message if branch got broken or has recovered def notifySlackUponStateChange(build) { def cur = build.getResult() - def prev = build.getPreviousBuild().getResult() + def prev = getLastNonAbortedBuild(build.getPreviousBuild())?.getResult() if (cur != prev) { if (cur == 'SUCCESS') { slackSend( @@ -41,10 +62,13 @@ def notifySlackUponStateChange(build) { } } +// Only schedule regular builds on develop branch, so we don't need to guard against it +String cron_schedule = BRANCH_NAME == "develop" ? "0 2 * * *" : "" + pipeline { agent none triggers { - cron('0 2 * * *') + cron(cron_schedule) } stages { @@ -52,14 +76,10 @@ pipeline { agent { label 'nixos-mayastor' } when { beforeAgent true - anyOf { - allOf { - branch 'staging' - not { triggeredBy 'TimerTrigger' } - } - allOf { - branch 'trying' - not { triggeredBy 'TimerTrigger' } + not { + anyOf { + branch 'master' + branch 'release/*' } } } @@ -73,21 +93,10 @@ pipeline { stage('test') { when { beforeAgent true - anyOf { - allOf { - branch 'staging' - not { triggeredBy 'TimerTrigger' } - } - allOf { - branch 'trying' - not { triggeredBy 'TimerTrigger' } - } - allOf { - branch 'develop' - anyOf { - triggeredBy 'TimerTrigger' - triggeredBy cause: 'UserIdCause' - } + not { + anyOf { + branch 'master' + branch 'release/*' } } } @@ -153,21 +162,9 @@ pipeline { when { beforeAgent true anyOf { - allOf { - branch 'master' - not { triggeredBy 'TimerTrigger' } - } - allOf { - branch 'release/*' - not { triggeredBy 'TimerTrigger' } - } - allOf { - branch 'develop' - anyOf { - triggeredBy 'TimerTrigger' - triggeredBy cause: 'UserIdCause' - } - } + branch 'master' + branch 'release/*' + branch 'develop' } } steps { From 969f98aa1a3d71a6471d1b628dbfcf0d9a8dc39b Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Mon, 16 Nov 2020 08:42:14 +0000 Subject: [PATCH 51/92] CAS-495 Include more callsite information in trace events: - Created custom formatter for default subscriber to include additional information such as file and line number. - Only generate ansi escape sequences to display LEVEL in colour if stdout is attached to a terminal. --- Cargo.lock | 3 + mayastor/Cargo.toml | 7 +- mayastor/src/logger.rs | 238 +++++++++++++++++++++++++++++++--- nix/pkgs/mayastor/default.nix | 2 +- 4 files changed, 227 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ebf8c8c15..de58d1160 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1714,10 +1714,12 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" name = "mayastor" version = "0.1.0" dependencies = [ + "ansi_term 0.12.1", "assert_matches", "async-mutex", "async-task", "async-trait", + "atty", "bincode", "bollard", "byte-unit", @@ -1765,6 +1767,7 @@ dependencies = [ "tonic", "tower", "tracing", + "tracing-core", "tracing-futures", "tracing-log", "tracing-subscriber", diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index 9b65fcf89..9350fde3d 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -33,9 +33,11 @@ name = "casperf" path = "src/bin/casperf.rs" [dependencies] +ansi_term = "0.12" async-mutex = "1.4.0" async-task = "4.0.2" async-trait = "0.1.36" +atty = "0.2" bincode = "1.2" byte-unit = "3.0.1" bytes = "0.4.12" @@ -71,9 +73,10 @@ nats = "0.8" tonic = "0.1" tower = "0.3" tracing = "0.1" +tracing-core = "0.1" tracing-futures = "0.2.4" -tracing-log = "0.1.1" -tracing-subscriber = "0.2.0" +tracing-log = "0.1" +tracing-subscriber = "0.2" udev = "0.4" url = "2.1" smol = "1.0.0" diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index d80ec8321..d98c3c9c0 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -1,8 +1,16 @@ -use std::{ffi::CStr, os::raw::c_char}; +use std::{ffi::CStr, fmt::Write, os::raw::c_char, path::Path}; -use tracing_log::format_trace; +use ansi_term::{Colour, Style}; + +use tracing_core::{event::Event, Metadata}; +use tracing_log::{format_trace, NormalizeEvent}; use tracing_subscriber::{ - fmt::{format::FmtSpan, time::FormatTime, Subscriber}, + fmt::{ + format::{FmtSpan, FormatEvent, FormatFields}, + FmtContext, + FormattedFields, + }, + registry::LookupSpan, EnvFilter, }; @@ -43,27 +51,213 @@ pub extern "C" fn log_impl( return; } - // remove new line characters from the log messages if any - let fmt = + let arg = unsafe { CStr::from_ptr(buf).to_string_lossy().trim_end().to_string() }; let filename = unsafe { CStr::from_ptr(file).to_str().unwrap() }; format_trace( &log::Record::builder() - .args(format_args!("{}", fmt)) - .target(module_path!()) + .args(format_args!("{}", arg)) + .level(from_spdk_level(spdk_level)) + .target("mayastor::spdk") .file(Some(filename)) .line(Some(line)) - .level(from_spdk_level(spdk_level)) .build(), ) .unwrap(); } -struct CustomTime<'a>(&'a str); -impl FormatTime for CustomTime<'_> { - fn format_time(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result { - write!(w, "{}", chrono::Local::now().format(self.0)) +// Custom struct used to format the log/trace LEVEL +struct FormatLevel<'a> { + level: &'a tracing::Level, + ansi: bool, +} + +impl<'a> FormatLevel<'a> { + fn new(level: &'a tracing::Level, ansi: bool) -> Self { + Self { + level, + ansi, + } + } +} + +// Display trace LEVEL. +impl std::fmt::Display for FormatLevel<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + const TRACE: &str = "TRACE"; + const DEBUG: &str = "DEBUG"; + const INFO: &str = " INFO"; + const WARN: &str = " WARN"; + const ERROR: &str = "ERROR"; + + if self.ansi { + match *self.level { + tracing::Level::TRACE => { + write!(f, "{}", Colour::Purple.paint(TRACE)) + } + tracing::Level::DEBUG => { + write!(f, "{}", Colour::Blue.paint(DEBUG)) + } + tracing::Level::INFO => { + write!(f, "{}", Colour::Green.paint(INFO)) + } + tracing::Level::WARN => { + write!(f, "{}", Colour::Yellow.paint(WARN)) + } + tracing::Level::ERROR => { + write!(f, "{}", Colour::Red.paint(ERROR)) + } + } + } else { + match *self.level { + tracing::Level::TRACE => f.pad(TRACE), + tracing::Level::DEBUG => f.pad(DEBUG), + tracing::Level::INFO => f.pad(INFO), + tracing::Level::WARN => f.pad(WARN), + tracing::Level::ERROR => f.pad(ERROR), + } + } + } +} + +// Custom struct used to format trace context (span) information +struct CustomContext<'a, S, N> +where + S: tracing_core::subscriber::Subscriber + for<'s> LookupSpan<'s>, + N: for<'w> FormatFields<'w> + 'static, +{ + context: &'a FmtContext<'a, S, N>, + span: Option<&'a tracing_core::span::Id>, + ansi: bool, +} + +impl<'a, S, N: 'a> CustomContext<'a, S, N> +where + S: tracing_core::subscriber::Subscriber + for<'s> LookupSpan<'s>, + N: for<'w> FormatFields<'w> + 'static, +{ + fn new( + context: &'a FmtContext<'a, S, N>, + span: Option<&'a tracing_core::span::Id>, + ansi: bool, + ) -> Self { + Self { + context, + span, + ansi, + } + } +} + +// Display trace context (span) information +impl<'a, S, N> std::fmt::Display for CustomContext<'a, S, N> +where + S: tracing_core::subscriber::Subscriber + for<'s> LookupSpan<'s>, + N: for<'w> FormatFields<'w> + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let bold = if self.ansi { + Style::new().bold() + } else { + Style::new() + }; + + let scope = self + .span + .and_then(|ref id| self.context.span(id)) + .or_else(|| self.context.lookup_current()) + .into_iter() + .flat_map(|span| span.from_root().chain(std::iter::once(span))); + + for span in scope { + write!(f, "{}", bold.paint(span.metadata().name()))?; + + let extensions = span.extensions(); + + let fields = &extensions + .get::>() + .expect("unable to find FormattedFields in extensions"); + + if !fields.is_empty() { + write!(f, "{}{}{}", bold.paint("{"), fields, bold.paint("}"))?; + } + + f.write_char(' ')?; + } + + Ok(()) + } +} + +fn basename(path: &str) -> &str { + Path::new(path).file_name().unwrap().to_str().unwrap() +} + +// Custom struct used to format a callsite location (filename and line number) +struct Location<'a> { + meta: &'a Metadata<'a>, +} + +impl<'a> Location<'a> { + fn new(meta: &'a Metadata<'a>) -> Self { + Self { + meta, + } + } +} + +// Display callsite location (filename and line number) from metadata +impl std::fmt::Display for Location<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let Some(file) = self.meta.file() { + if let Some(line) = self.meta.line() { + write!(f, "({}:{}) ", basename(file), line)?; + } + } + Ok(()) + } +} + +// Custom struct used to format trace events. +struct CustomFormat { + ansi: bool, +} + +// Format a trace event. +impl FormatEvent for CustomFormat +where + S: tracing_core::subscriber::Subscriber + for<'s> LookupSpan<'s>, + N: for<'w> FormatFields<'w> + 'static, +{ + fn format_event( + &self, + context: &FmtContext<'_, S, N>, + writer: &mut dyn std::fmt::Write, + event: &Event<'_>, + ) -> std::fmt::Result { + let normalized = event.normalized_metadata(); + let meta = normalized.as_ref().unwrap_or_else(|| event.metadata()); + + write!( + writer, + "[{} {} {}] ", + chrono::Local::now().format("%FT%T%.9f%Z"), + FormatLevel::new(meta.level(), self.ansi), + meta.target() + )?; + + write!( + writer, + "{}", + CustomContext::new(context, event.parent(), self.ansi) + )?; + + write!(writer, "{}", Location::new(&meta))?; + + context.format_fields(writer, event)?; + + writeln!(writer) } } @@ -74,15 +268,19 @@ impl FormatTime for CustomTime<'_> { /// We might want to suppress certain messages, as some of them are redundant, /// in particular, the NOTICE messages as such, they are mapped to debug. pub fn init(level: &str) { - let builder = Subscriber::builder() - .with_timer(CustomTime("%FT%T%.9f%Z")) - .with_span_events(FmtSpan::FULL); - - let subscriber = if let Ok(filter) = EnvFilter::try_from_default_env() { - builder.with_env_filter(filter).finish() - } else { - builder.with_env_filter(level).finish() + let format = CustomFormat { + ansi: atty::is(atty::Stream::Stdout), + }; + + let builder = tracing_subscriber::fmt::Subscriber::builder() + .with_span_events(FmtSpan::FULL) + .event_format(format); + + let subscriber = match EnvFilter::try_from_default_env() { + Ok(filter) => builder.with_env_filter(filter).finish(), + Err(_) => builder.with_env_filter(level).finish(), }; + tracing::subscriber::set_global_default(subscriber) .expect("failed to set default subscriber"); } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index ff558e493..e66376803 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -39,7 +39,7 @@ let buildProps = rec { name = "mayastor"; # cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "02dfgdi1h0g4nydwg1760wqx9i7f3g5bpxj0wbq81kvmx44ic6ii"; + cargoSha256 = "0rrkj111h7h5blj6qx28166hygag3y92zn5isqig03fnib2zx3mi"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" From 7c5cb7f54602fc7be47a819299336dd883ec0f5b Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Tue, 17 Nov 2020 12:01:42 +0000 Subject: [PATCH 52/92] CAS-517 Ensure that "log" messages from third party crates are displayed as trace events. --- mayastor/src/logger.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index d98c3c9c0..3793cee22 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -3,7 +3,7 @@ use std::{ffi::CStr, fmt::Write, os::raw::c_char, path::Path}; use ansi_term::{Colour, Style}; use tracing_core::{event::Event, Metadata}; -use tracing_log::{format_trace, NormalizeEvent}; +use tracing_log::{LogTracer, NormalizeEvent}; use tracing_subscriber::{ fmt::{ format::{FmtSpan, FormatEvent, FormatFields}, @@ -55,7 +55,7 @@ pub extern "C" fn log_impl( unsafe { CStr::from_ptr(buf).to_string_lossy().trim_end().to_string() }; let filename = unsafe { CStr::from_ptr(file).to_str().unwrap() }; - format_trace( + log::logger().log( &log::Record::builder() .args(format_args!("{}", arg)) .level(from_spdk_level(spdk_level)) @@ -63,8 +63,7 @@ pub extern "C" fn log_impl( .file(Some(filename)) .line(Some(line)) .build(), - ) - .unwrap(); + ); } // Custom struct used to format the log/trace LEVEL @@ -132,7 +131,7 @@ where ansi: bool, } -impl<'a, S, N: 'a> CustomContext<'a, S, N> +impl<'a, S, N> CustomContext<'a, S, N> where S: tracing_core::subscriber::Subscriber + for<'s> LookupSpan<'s>, N: for<'w> FormatFields<'w> + 'static, @@ -268,10 +267,17 @@ where /// We might want to suppress certain messages, as some of them are redundant, /// in particular, the NOTICE messages as such, they are mapped to debug. pub fn init(level: &str) { + // Set up a "logger" that simply translates any "log" messages it receives + // to trace events. This is for our custom spdk log messages, but also + // for any other third party crates still using the logging facade. + LogTracer::init().expect("failed to initialise LogTracer"); + + // Our own custom format for displaying trace events. let format = CustomFormat { ansi: atty::is(atty::Stream::Stdout), }; + // Create a default subscriber. let builder = tracing_subscriber::fmt::Subscriber::builder() .with_span_events(FmtSpan::FULL) .event_format(format); From 8243cc8da5883ab92731e462c384a2e70c9e7672 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 16 Nov 2020 15:08:36 +0000 Subject: [PATCH 53/92] Move common compose bits into a library... ... so that other components of our stack can make use of it. --- Cargo.lock | 50 ++- Cargo.toml | 3 +- composer/Cargo.toml | 16 + composer/src/lib.rs | 565 +++++++++++++++++++++++++++++++ mayastor/Cargo.toml | 1 + mayastor/tests/common/compose.rs | 565 +------------------------------ mayastor/tests/common/mod.rs | 2 +- 7 files changed, 619 insertions(+), 583 deletions(-) create mode 100644 composer/Cargo.toml create mode 100644 composer/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index de58d1160..b572b1305 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -564,6 +564,19 @@ dependencies = [ "serde_json", ] +[[package]] +name = "composer" +version = "0.1.0" +dependencies = [ + "bollard", + "crossbeam", + "futures", + "ipnetwork", + "rpc", + "tokio", + "tonic", +] + [[package]] name = "concurrent-queue" version = "1.2.2" @@ -1129,9 +1142,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" +checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" dependencies = [ "futures-channel", "futures-core", @@ -1144,9 +1157,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" +checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" dependencies = [ "futures-core", "futures-sink", @@ -1154,15 +1167,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" +checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" [[package]] name = "futures-executor" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" +checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" dependencies = [ "futures-core", "futures-task", @@ -1171,9 +1184,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1798854a4727ff944a7b12aa999f58ce7aa81db80d2dfaaf2ba06f065ddd2b" +checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" [[package]] name = "futures-lite" @@ -1192,9 +1205,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" +checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", @@ -1204,15 +1217,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" +checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" [[package]] name = "futures-task" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" +checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" dependencies = [ "once_cell", ] @@ -1225,9 +1238,9 @@ checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" [[package]] name = "futures-util" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" +checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" dependencies = [ "futures-channel", "futures-core", @@ -1727,6 +1740,7 @@ dependencies = [ "chrono", "clap", "colored_json", + "composer", "crc", "crossbeam", "crossbeam-sync", diff --git a/Cargo.toml b/Cargo.toml index 4383debfa..33070a527 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,5 +15,6 @@ members = [ "rpc", "sysfs", "services", - "mbus-api" + "mbus-api", + "composer", ] diff --git a/composer/Cargo.toml b/composer/Cargo.toml new file mode 100644 index 000000000..c8201af77 --- /dev/null +++ b/composer/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "composer" +version = "0.1.0" +authors = ["Tiago Castro "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tokio = { version = "0.2", features = ["full"] } +futures = "0.3.8" +tonic = "0.1" +crossbeam = "0.7.3" +rpc = { path = "../rpc" } +ipnetwork = "0.17.0" +bollard = "0.8.0" diff --git a/composer/src/lib.rs b/composer/src/lib.rs new file mode 100644 index 000000000..453fc4277 --- /dev/null +++ b/composer/src/lib.rs @@ -0,0 +1,565 @@ +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddr, TcpStream}, + thread, + time::Duration, +}; + +use bollard::{ + container::{ + Config, + CreateContainerOptions, + ListContainersOptions, + LogsOptions, + NetworkingConfig, + RemoveContainerOptions, + StopContainerOptions, + }, + errors::Error, + network::{CreateNetworkOptions, ListNetworksOptions}, + service::{ + ContainerSummaryInner, + EndpointIpamConfig, + EndpointSettings, + HostConfig, + Ipam, + Mount, + MountTypeEnum, + Network, + }, + Docker, +}; +use futures::TryStreamExt; +use ipnetwork::Ipv4Network; +use tonic::transport::Channel; + +use bollard::models::ContainerInspectResponse; +use rpc::mayastor::{ + bdev_rpc_client::BdevRpcClient, + mayastor_client::MayastorClient, +}; + +#[derive(Clone)] +pub struct RpcHandle { + pub name: String, + pub endpoint: SocketAddr, + pub mayastor: MayastorClient, + pub bdev: BdevRpcClient, +} + +impl RpcHandle { + /// connect to the containers and construct a handle + async fn connect(name: String, endpoint: SocketAddr) -> Self { + loop { + if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) + .is_ok() + { + break; + } else { + thread::sleep(Duration::from_millis(101)); + } + } + + let mayastor = + MayastorClient::connect(format!("http://{}", endpoint.to_string())) + .await + .unwrap(); + let bdev = + BdevRpcClient::connect(format!("http://{}", endpoint.to_string())) + .await + .unwrap(); + + Self { + name, + mayastor, + bdev, + endpoint, + } + } +} + +pub struct Builder { + /// name of the experiment this name will be used as a network and labels + /// this way we can "group" all objects within docker to match this test + /// test. It is highly recommend you use a sane name for this as it will + /// help you during debugging + name: String, + /// containers we want to create, note these are mayastor containers + /// only + containers: Vec, + /// the network for the tests used + network: String, + /// delete the container and network when dropped + clean: bool, +} + +impl Default for Builder { + fn default() -> Self { + Builder::new() + } +} + +impl Builder { + /// construct a new builder for `[ComposeTest'] + pub fn new() -> Self { + Self { + name: "".to_string(), + containers: Default::default(), + network: "10.1.0.0".to_string(), + clean: true, + } + } + + /// set the network for this test + pub fn network(mut self, network: &str) -> Builder { + self.network = network.to_owned(); + self + } + + /// the name to be used as labels and network name + pub fn name(mut self, name: &str) -> Builder { + self.name = name.to_owned(); + self + } + + /// add a mayastor container with a name + pub fn add_container(mut self, name: &str) -> Builder { + self.containers.push(name.to_owned()); + self + } + + /// clean on drop? + pub fn with_clean(mut self, enable: bool) -> Builder { + self.clean = enable; + self + } + + /// build the config and start the containers + pub async fn build( + self, + ) -> Result> { + let net: Ipv4Network = self.network.parse()?; + + let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); + let srcdir = path.parent().unwrap().to_string_lossy().into(); + let binary = format!("{}/target/debug/mayastor", srcdir); + + let docker = Docker::connect_with_unix_defaults()?; + + let mut cfg = HashMap::new(); + cfg.insert( + "Subnet".to_string(), + format!("{}/{}", net.network().to_string(), net.prefix()), + ); + cfg.insert("Gateway".into(), net.nth(1).unwrap().to_string()); + + let ipam = Ipam { + driver: Some("default".into()), + config: Some(vec![cfg]), + options: None, + }; + + let mut compose = ComposeTest { + name: self.name.clone(), + srcdir, + binary, + docker, + network_id: "".to_string(), + containers: Default::default(), + ipam, + label: format!("io.mayastor.test.{}", self.name), + clean: self.clean, + }; + + compose.network_id = + compose.network_create().await.map_err(|e| e.to_string())?; + + // containers are created where the IPs are ordinal + for (i, name) in self.containers.iter().enumerate() { + compose + .create_container( + name, + &net.nth((i + 2) as u32).unwrap().to_string(), + ) + .await?; + } + + compose.start_all().await?; + Ok(compose) + } +} + +/// +/// Some types to avoid confusion when +/// +/// different networks are referred to, internally as networkId in docker +type NetworkId = String; +/// container name +type ContainerName = String; +/// container ID +type ContainerId = String; + +#[derive(Clone, Debug)] +pub struct ComposeTest { + /// used as the network name + name: String, + /// the source dir the tests are run in + srcdir: String, + /// the binary we are using relative to srcdir + binary: String, + /// handle to the docker daemon + docker: Docker, + /// the network id is used to attach containers to networks + network_id: NetworkId, + /// the name of containers and their (IDs, Ipv4) we have created + /// perhaps not an ideal data structure, but we can improve it later + /// if we need to + containers: HashMap, + /// the default network configuration we use for our test cases + ipam: Ipam, + /// set on containers and networks + label: String, + /// automatically clean up the things we have created for this test + clean: bool, +} + +impl Drop for ComposeTest { + /// destroy the containers and network. Notice that we use sync code here + fn drop(&mut self) { + if self.clean { + self.containers.keys().for_each(|c| { + std::process::Command::new("docker") + .args(&["stop", c]) + .output() + .unwrap(); + std::process::Command::new("docker") + .args(&["rm", c]) + .output() + .unwrap(); + }); + + std::process::Command::new("docker") + .args(&["network", "rm", &self.name]) + .output() + .unwrap(); + } + } +} + +impl ComposeTest { + /// Create a new network, with default settings. If a network with the same + /// name already exists it will be reused. Note that we do not check the + /// networking IP and/or subnets + async fn network_create(&mut self) -> Result { + let mut net = self.network_list().await?; + + if !net.is_empty() { + let first = net.pop().unwrap(); + self.network_id = first.id.unwrap(); + return Ok(self.network_id.clone()); + } + + let create_opts = CreateNetworkOptions { + name: self.name.as_str(), + check_duplicate: true, + driver: "bridge", + internal: true, + attachable: true, + ingress: false, + ipam: self.ipam.clone(), + enable_ipv6: false, + options: vec![("com.docker.network.bridge.name", "mayabridge0")] + .into_iter() + .collect(), + labels: vec![(self.label.as_str(), "true")].into_iter().collect(), + }; + + self.docker.create_network(create_opts).await.map(|r| { + self.network_id = r.id.unwrap(); + self.network_id.clone() + }) + } + + async fn network_remove(&self) -> Result<(), Error> { + // if the network is not found, its not an error, any other error is + // reported as such. Networks can only be destroyed when all containers + // attached to it are removed. To get a list of attached + // containers, use network_list() + if let Err(e) = self.docker.remove_network(&self.name).await { + if !matches!(e, Error::DockerResponseNotFoundError{..}) { + return Err(e); + } + } + + Ok(()) + } + + /// list all the docker networks + pub async fn network_list(&self) -> Result, Error> { + self.docker + .list_networks(Some(ListNetworksOptions { + filters: vec![("name", vec![self.name.as_str()])] + .into_iter() + .collect(), + })) + .await + } + + /// list containers + pub async fn list_containers( + &self, + ) -> Result, Error> { + self.docker + .list_containers(Some(ListContainersOptions { + all: true, + filters: vec![( + "label", + vec![format!("{}=true", self.label).as_str()], + )] + .into_iter() + .collect(), + ..Default::default() + })) + .await + } + + /// remove a container from the configuration + async fn remove_container(&self, name: &str) -> Result<(), Error> { + self.docker + .remove_container( + name, + Some(RemoveContainerOptions { + v: true, + force: true, + link: false, + }), + ) + .await?; + + Ok(()) + } + + /// remove all containers and its network + async fn remove_all(&self) -> Result<(), Error> { + for k in &self.containers { + self.stop(&k.0).await?; + self.remove_container(&k.0).await?; + while let Ok(_c) = self.docker.inspect_container(&k.0, None).await { + tokio::time::delay_for(Duration::from_millis(500)).await; + } + } + self.network_remove().await?; + Ok(()) + } + + /// we need to construct several objects to create a setup that meets our + /// liking: + /// + /// (1) hostconfig: that configures the host side of the container, i.e what + /// features/settings from the host perspective do we want too setup + /// for the container. (2) endpoints: this allows us to plugin in the + /// container into our network configuration (3) config: the actual + /// config which includes the above objects + async fn create_container( + &mut self, + name: &str, + ipv4: &str, + ) -> Result<(), Error> { + let host_config = HostConfig { + binds: Some(vec![ + format!("{}:{}", self.srcdir, self.srcdir), + "/nix:/nix:ro".into(), + "/dev/hugepages:/dev/hugepages:rw".into(), + ]), + mounts: Some(vec![ + // DPDK needs to have a /tmp + Mount { + target: Some("/tmp".into()), + typ: Some(MountTypeEnum::TMPFS), + ..Default::default() + }, + // mayastor needs to have a /var/tmp + Mount { + target: Some("/var/tmp".into()), + typ: Some(MountTypeEnum::TMPFS), + ..Default::default() + }, + ]), + cap_add: Some(vec![ + "SYS_ADMIN".to_string(), + "IPC_LOCK".into(), + "SYS_NICE".into(), + ]), + security_opt: Some(vec!["seccomp:unconfined".into()]), + ..Default::default() + }; + + let mut endpoints_config = HashMap::new(); + endpoints_config.insert( + self.name.as_str(), + EndpointSettings { + network_id: Some(self.network_id.to_string()), + ipam_config: Some(EndpointIpamConfig { + ipv4_address: Some(ipv4.into()), + ..Default::default() + }), + ..Default::default() + }, + ); + + let env = format!("MY_POD_IP={}", ipv4); + + let config = Config { + cmd: Some(vec![self.binary.as_str(), "-g", "0.0.0.0"]), + env: Some(vec![&env]), + image: None, // notice we do not have a base image here + hostname: Some(name), + host_config: Some(host_config), + networking_config: Some(NetworkingConfig { + endpoints_config, + }), + working_dir: Some(self.srcdir.as_str()), + volumes: Some( + vec![ + ("/dev/hugepages", HashMap::new()), + ("/nix", HashMap::new()), + (self.srcdir.as_str(), HashMap::new()), + ] + .into_iter() + .collect(), + ), + labels: Some( + vec![(self.label.as_str(), "true")].into_iter().collect(), + ), + ..Default::default() + }; + + let container = self + .docker + .create_container( + Some(CreateContainerOptions { + name, + }), + config, + ) + .await + .unwrap(); + + self.containers + .insert(name.to_string(), (container.id, ipv4.parse().unwrap())); + + Ok(()) + } + + /// start the container + async fn start(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + self.docker + .start_container::<&str>(id.0.as_str(), None) + .await?; + + Ok(()) + } + + /// stop the container + async fn stop(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + if let Err(e) = self + .docker + .stop_container( + id.0.as_str(), + Some(StopContainerOptions { + t: 3, + }), + ) + .await + { + // where already stopped + if !matches!(e, Error::DockerResponseNotModifiedError{..}) { + return Err(e); + } + } + + Ok(()) + } + + /// get the logs from the container. It would be nice to make it implicit + /// that is, when you make a rpc call, whatever logs where created due to + /// that are returned + pub async fn logs(&self, name: &str) -> Result<(), Error> { + let logs = self + .docker + .logs( + name, + Some(LogsOptions { + follow: false, + stdout: true, + stderr: true, + since: 0, // TODO log lines since last call? + until: 0, + timestamps: false, + tail: "all", + }), + ) + .try_collect::>() + .await?; + + logs.iter().for_each(|l| print!("{}:{}", name, l)); + Ok(()) + } + + /// start all the containers + async fn start_all(&mut self) -> Result<(), Error> { + for k in &self.containers { + self.start(&k.0).await?; + } + + Ok(()) + } + + /// inspect the given container + pub async fn inspect( + &self, + name: &str, + ) -> Result { + self.docker.inspect_container(name, None).await + } + + /// pause the container; unfortunately, when the API returns it does not + /// mean that the container indeed is frozen completely, in the sense + /// that it's not to be assumed that right after a call -- the container + /// stops responding. + pub async fn pause(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + self.docker.pause_container(id.0.as_str()).await?; + + Ok(()) + } + + /// un_pause the container + pub async fn thaw(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + self.docker.unpause_container(id.0.as_str()).await + } + + /// return grpc handles to the containers + pub async fn grpc_handles(&self) -> Result, ()> { + let mut handles = Vec::new(); + for v in &self.containers { + handles.push( + RpcHandle::connect( + v.0.clone(), + format!("{}:10124", v.1.1).parse::().unwrap(), + ) + .await, + ); + } + + Ok(handles) + } + + /// explicitly remove all containers + pub async fn down(&self) { + self.remove_all().await.unwrap(); + } +} diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index 9350fde3d..e36e9af6a 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -110,3 +110,4 @@ version = "0.7" [dev-dependencies] assert_matches = "1.2" run_script = "*" +composer = { path = "../composer" } diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 343665086..ad6c26bd2 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -1,47 +1,10 @@ -use std::{ - collections::HashMap, - future::Future, - net::{Ipv4Addr, SocketAddr, TcpStream}, - thread, - time::Duration, -}; +pub use composer::*; -use bollard::{ - container::{ - Config, - CreateContainerOptions, - ListContainersOptions, - LogsOptions, - NetworkingConfig, - RemoveContainerOptions, - StopContainerOptions, - }, - errors::Error, - network::{CreateNetworkOptions, ListNetworksOptions}, - service::{ - ContainerSummaryInner, - EndpointIpamConfig, - EndpointSettings, - HostConfig, - Ipam, - Mount, - MountTypeEnum, - Network, - }, - Docker, -}; use crossbeam::crossbeam_channel::bounded; -use futures::TryStreamExt; -use ipnetwork::Ipv4Network; +use std::future::Future; use tokio::sync::oneshot::channel; -use tonic::transport::Channel; use crate::common::mayastor_test_init; -use ::rpc::mayastor::{ - bdev_rpc_client::BdevRpcClient, - mayastor_client::MayastorClient, -}; -use bollard::models::ContainerInspectResponse; use mayastor::core::{ mayastor_env_stop, MayastorCliArgs, @@ -50,530 +13,6 @@ use mayastor::core::{ Reactors, }; -#[derive(Clone)] -pub struct RpcHandle { - pub name: String, - pub endpoint: SocketAddr, - pub mayastor: MayastorClient, - pub bdev: BdevRpcClient, -} - -impl RpcHandle { - /// connect to the containers and construct a handle - async fn connect(name: String, endpoint: SocketAddr) -> Self { - loop { - if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) - .is_ok() - { - break; - } else { - thread::sleep(Duration::from_millis(101)); - } - } - - let mayastor = - MayastorClient::connect(format!("http://{}", endpoint.to_string())) - .await - .unwrap(); - let bdev = - BdevRpcClient::connect(format!("http://{}", endpoint.to_string())) - .await - .unwrap(); - - Self { - name, - mayastor, - bdev, - endpoint, - } - } -} - -pub struct Builder { - /// name of the experiment this name will be used as a network and labels - /// this way we can "group" all objects within docker to match this test - /// test. It is highly recommend you use a sane name for this as it will - /// help you during debugging - name: String, - /// containers we want to create, note these are mayastor containers - /// only - containers: Vec, - /// the network for the tests used - network: String, - /// delete the container and network when dropped - clean: bool, -} - -impl Default for Builder { - fn default() -> Self { - Builder::new() - } -} - -impl Builder { - /// construct a new builder for `[ComposeTest'] - pub fn new() -> Self { - Self { - name: "".to_string(), - containers: Default::default(), - network: "10.1.0.0".to_string(), - clean: true, - } - } - - /// set the network for this test - pub fn network(mut self, network: &str) -> Builder { - self.network = network.to_owned(); - self - } - - /// the name to be used as labels and network name - pub fn name(mut self, name: &str) -> Builder { - self.name = name.to_owned(); - self - } - - /// add a mayastor container with a name - pub fn add_container(mut self, name: &str) -> Builder { - self.containers.push(name.to_owned()); - self - } - - /// clean on drop? - pub fn with_clean(mut self, enable: bool) -> Builder { - self.clean = enable; - self - } - - /// build the config and start the containers - pub async fn build( - self, - ) -> Result> { - let net: Ipv4Network = self.network.parse()?; - - let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); - let srcdir = path.parent().unwrap().to_string_lossy().into(); - let binary = format!("{}/target/debug/mayastor", srcdir); - - let docker = Docker::connect_with_unix_defaults()?; - - let mut cfg = HashMap::new(); - cfg.insert( - "Subnet".to_string(), - format!("{}/{}", net.network().to_string(), net.prefix()), - ); - cfg.insert("Gateway".into(), net.nth(1).unwrap().to_string()); - - let ipam = Ipam { - driver: Some("default".into()), - config: Some(vec![cfg]), - options: None, - }; - - let mut compose = ComposeTest { - name: self.name.clone(), - srcdir, - binary, - docker, - network_id: "".to_string(), - containers: Default::default(), - ipam, - label: format!("io.mayastor.test.{}", self.name), - clean: self.clean, - }; - - compose.network_id = - compose.network_create().await.map_err(|e| e.to_string())?; - - // containers are created where the IPs are ordinal - for (i, name) in self.containers.iter().enumerate() { - compose - .create_container( - name, - &net.nth((i + 2) as u32).unwrap().to_string(), - ) - .await?; - } - - compose.start_all().await?; - Ok(compose) - } -} - -/// -/// Some types to avoid confusion when -/// -/// different networks are referred to, internally as networkId in docker -type NetworkId = String; -/// container name -type ContainerName = String; -/// container ID -type ContainerId = String; - -#[derive(Clone, Debug)] -pub struct ComposeTest { - /// used as the network name - name: String, - /// the source dir the tests are run in - srcdir: String, - /// the binary we are using relative to srcdir - binary: String, - /// handle to the docker daemon - docker: Docker, - /// the network id is used to attach containers to networks - network_id: NetworkId, - /// the name of containers and their (IDs, Ipv4) we have created - /// perhaps not an ideal data structure, but we can improve it later - /// if we need to - containers: HashMap, - /// the default network configuration we use for our test cases - ipam: Ipam, - /// set on containers and networks - label: String, - /// automatically clean up the things we have created for this test - clean: bool, -} - -impl Drop for ComposeTest { - /// destroy the containers and network. Notice that we use sync code here - fn drop(&mut self) { - if self.clean { - self.containers.keys().for_each(|c| { - std::process::Command::new("docker") - .args(&["stop", c]) - .output() - .unwrap(); - std::process::Command::new("docker") - .args(&["rm", c]) - .output() - .unwrap(); - }); - - std::process::Command::new("docker") - .args(&["network", "rm", &self.name]) - .output() - .unwrap(); - } - } -} - -impl ComposeTest { - /// Create a new network, with default settings. If a network with the same - /// name already exists it will be reused. Note that we do not check the - /// networking IP and/or subnets - async fn network_create(&mut self) -> Result { - let mut net = self.network_list().await?; - - if !net.is_empty() { - let first = net.pop().unwrap(); - self.network_id = first.id.unwrap(); - return Ok(self.network_id.clone()); - } - - let create_opts = CreateNetworkOptions { - name: self.name.as_str(), - check_duplicate: true, - driver: "bridge", - internal: true, - attachable: true, - ingress: false, - ipam: self.ipam.clone(), - enable_ipv6: false, - options: vec![("com.docker.network.bridge.name", "mayabridge0")] - .into_iter() - .collect(), - labels: vec![(self.label.as_str(), "true")].into_iter().collect(), - }; - - self.docker.create_network(create_opts).await.map(|r| { - self.network_id = r.id.unwrap(); - self.network_id.clone() - }) - } - - async fn network_remove(&self) -> Result<(), Error> { - // if the network is not found, its not an error, any other error is - // reported as such. Networks can only be destroyed when all containers - // attached to it are removed. To get a list of attached - // containers, use network_list() - if let Err(e) = self.docker.remove_network(&self.name).await { - if !matches!(e, Error::DockerResponseNotFoundError{..}) { - return Err(e); - } - } - - Ok(()) - } - - /// list all the docker networks - pub async fn network_list(&self) -> Result, Error> { - self.docker - .list_networks(Some(ListNetworksOptions { - filters: vec![("name", vec![self.name.as_str()])] - .into_iter() - .collect(), - })) - .await - } - - /// list containers - pub async fn list_containers( - &self, - ) -> Result, Error> { - self.docker - .list_containers(Some(ListContainersOptions { - all: true, - filters: vec![( - "label", - vec![format!("{}=true", self.label).as_str()], - )] - .into_iter() - .collect(), - ..Default::default() - })) - .await - } - - /// remove a container from the configuration - async fn remove_container(&self, name: &str) -> Result<(), Error> { - self.docker - .remove_container( - name, - Some(RemoveContainerOptions { - v: true, - force: true, - link: false, - }), - ) - .await?; - - Ok(()) - } - - /// remove all containers and its network - async fn remove_all(&self) -> Result<(), Error> { - for k in &self.containers { - self.stop(&k.0).await?; - self.remove_container(&k.0).await?; - while let Ok(_c) = self.docker.inspect_container(&k.0, None).await { - tokio::time::delay_for(Duration::from_millis(500)).await; - } - } - self.network_remove().await?; - Ok(()) - } - - /// we need to construct several objects to create a setup that meets our - /// liking: - /// - /// (1) hostconfig: that configures the host side of the container, i.e what - /// features/settings from the host perspective do we want too setup - /// for the container. (2) endpoints: this allows us to plugin in the - /// container into our network configuration (3) config: the actual - /// config which includes the above objects - async fn create_container( - &mut self, - name: &str, - ipv4: &str, - ) -> Result<(), Error> { - let host_config = HostConfig { - binds: Some(vec![ - format!("{}:{}", self.srcdir, self.srcdir), - "/nix:/nix:ro".into(), - "/dev/hugepages:/dev/hugepages:rw".into(), - ]), - mounts: Some(vec![ - // DPDK needs to have a /tmp - Mount { - target: Some("/tmp".into()), - typ: Some(MountTypeEnum::TMPFS), - ..Default::default() - }, - // mayastor needs to have a /var/tmp - Mount { - target: Some("/var/tmp".into()), - typ: Some(MountTypeEnum::TMPFS), - ..Default::default() - }, - ]), - cap_add: Some(vec![ - "SYS_ADMIN".to_string(), - "IPC_LOCK".into(), - "SYS_NICE".into(), - ]), - security_opt: Some(vec!["seccomp:unconfined".into()]), - ..Default::default() - }; - - let mut endpoints_config = HashMap::new(); - endpoints_config.insert( - self.name.as_str(), - EndpointSettings { - network_id: Some(self.network_id.to_string()), - ipam_config: Some(EndpointIpamConfig { - ipv4_address: Some(ipv4.into()), - ..Default::default() - }), - ..Default::default() - }, - ); - - let env = format!("MY_POD_IP={}", ipv4); - - let config = Config { - cmd: Some(vec![self.binary.as_str(), "-g", "0.0.0.0"]), - env: Some(vec![&env]), - image: None, // notice we do not have a base image here - hostname: Some(name), - host_config: Some(host_config), - networking_config: Some(NetworkingConfig { - endpoints_config, - }), - working_dir: Some(self.srcdir.as_str()), - volumes: Some( - vec![ - ("/dev/hugepages", HashMap::new()), - ("/nix", HashMap::new()), - (self.srcdir.as_str(), HashMap::new()), - ] - .into_iter() - .collect(), - ), - labels: Some( - vec![(self.label.as_str(), "true")].into_iter().collect(), - ), - ..Default::default() - }; - - let container = self - .docker - .create_container( - Some(CreateContainerOptions { - name, - }), - config, - ) - .await - .unwrap(); - - self.containers - .insert(name.to_string(), (container.id, ipv4.parse().unwrap())); - - Ok(()) - } - - /// start the container - async fn start(&self, name: &str) -> Result<(), Error> { - let id = self.containers.get(name).unwrap(); - self.docker - .start_container::<&str>(id.0.as_str(), None) - .await?; - - Ok(()) - } - - /// stop the container - async fn stop(&self, name: &str) -> Result<(), Error> { - let id = self.containers.get(name).unwrap(); - if let Err(e) = self - .docker - .stop_container( - id.0.as_str(), - Some(StopContainerOptions { - t: 3, - }), - ) - .await - { - // where already stopped - if !matches!(e, Error::DockerResponseNotModifiedError{..}) { - return Err(e); - } - } - - Ok(()) - } - - /// get the logs from the container. It would be nice to make it implicit - /// that is, when you make a rpc call, whatever logs where created due to - /// that are returned - pub async fn logs(&self, name: &str) -> Result<(), Error> { - let logs = self - .docker - .logs( - name, - Some(LogsOptions { - follow: false, - stdout: true, - stderr: true, - since: 0, // TODO log lines since last call? - until: 0, - timestamps: false, - tail: "all", - }), - ) - .try_collect::>() - .await?; - - logs.iter().for_each(|l| print!("{}:{}", name, l)); - Ok(()) - } - - /// start all the containers - async fn start_all(&mut self) -> Result<(), Error> { - for k in &self.containers { - self.start(&k.0).await?; - } - - Ok(()) - } - - /// inspect the given container - pub async fn inspect( - &self, - name: &str, - ) -> Result { - self.docker.inspect_container(name, None).await - } - - /// pause the container; unfortunately, when the API returns it does not - /// mean that the container indeed is frozen completely, in the sense - /// that it's not to be assumed that right after a call -- the container - /// stops responding. - pub async fn pause(&self, name: &str) -> Result<(), Error> { - let id = self.containers.get(name).unwrap(); - self.docker.pause_container(id.0.as_str()).await?; - - Ok(()) - } - - /// un_pause the container - pub async fn thaw(&self, name: &str) -> Result<(), Error> { - let id = self.containers.get(name).unwrap(); - self.docker.unpause_container(id.0.as_str()).await - } - - /// return grpc handles to the containers - pub async fn grpc_handles(&self) -> Result, ()> { - let mut handles = Vec::new(); - for v in &self.containers { - handles.push( - RpcHandle::connect( - v.0.clone(), - format!("{}:10124", v.1.1).parse::().unwrap(), - ) - .await, - ); - } - - Ok(handles) - } - - pub async fn down(&self) { - self.remove_all().await.unwrap(); - } -} - /// Mayastor test structure that simplifies sending futures. Mayastor has /// its own reactor, which is not tokio based, so we need to handle properly #[derive(Debug)] diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index cf581ad86..c63b5f817 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -24,7 +24,7 @@ pub mod compose; pub mod error_bdev; pub mod ms_exec; -pub use compose::{ComposeTest, MayastorTest, RpcHandle}; +pub use compose::MayastorTest; /// call F cnt times, and sleep for a duration between each invocation pub fn retry(mut cnt: u32, timeout: Duration, mut f: F) -> T From 8dc59fae4ba879871caa91d9b16f1c4d1d25d291 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 16 Nov 2020 21:36:45 +0000 Subject: [PATCH 54/92] Extend composer to allow for containers... ... with different binaries, eg: nats-server add port mapping so we map ports to the host add init to the containers to facilitate the tear down as it seems the bollard stop timeout isn't working. add simple test to test the library set container logs to debug but with h2 set to info :) (run with --nocapture to get the logs) --- composer/Cargo.toml | 3 + composer/src/lib.rs | 280 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 263 insertions(+), 20 deletions(-) diff --git a/composer/Cargo.toml b/composer/Cargo.toml index c8201af77..315795f46 100644 --- a/composer/Cargo.toml +++ b/composer/Cargo.toml @@ -14,3 +14,6 @@ crossbeam = "0.7.3" rpc = { path = "../rpc" } ipnetwork = "0.17.0" bollard = "0.8.0" + +[dev-dependencies] +tokio = { version = "0.2", features = ["full"] } diff --git a/composer/src/lib.rs b/composer/src/lib.rs index 453fc4277..d7cd137df 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -26,6 +26,7 @@ use bollard::{ Mount, MountTypeEnum, Network, + PortMap, }, Docker, }; @@ -49,7 +50,11 @@ pub struct RpcHandle { impl RpcHandle { /// connect to the containers and construct a handle - async fn connect(name: String, endpoint: SocketAddr) -> Self { + async fn connect( + name: String, + endpoint: SocketAddr, + ) -> Result { + let mut attempts = 60; loop { if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) .is_ok() @@ -58,6 +63,13 @@ impl RpcHandle { } else { thread::sleep(Duration::from_millis(101)); } + attempts -= 1; + if attempts == 0 { + return Err(format!( + "Failed to connect to {}/{}", + name, endpoint + )); + } } let mayastor = @@ -69,15 +81,142 @@ impl RpcHandle { .await .unwrap(); - Self { + Ok(Self { name, mayastor, bdev, endpoint, + }) + } +} + +/// Path to local binary and arguments +#[derive(Default, Clone)] +pub struct Binary { + path: String, + arguments: Vec, +} + +impl Binary { + /// Setup local binary from target debug and arguments + pub fn from_dbg(name: &str) -> Self { + let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); + let srcdir = path.parent().unwrap().to_string_lossy(); + + Self::new(format!("{}/target/debug/{}", srcdir, name), vec![]) + } + /// Setup nix shell binary from path and arguments + pub fn from_nix(name: &str) -> Self { + Self::new(Self::which(name).expect("binary should exist"), vec![]) + } + /// Add single argument + /// Only one argument can be passed per use. So instead of: + /// + /// # Self::from_dbg("hello") + /// .with_arg("-n nats") + /// # ; + /// + /// usage would be: + /// + /// # Self::from_dbg("hello") + /// .with_arg("-n") + /// .with_arg("nats") + /// # ; + pub fn with_arg(mut self, arg: &str) -> Self { + self.arguments.push(arg.into()); + self + } + /// Add multiple arguments via a vector + pub fn with_args>(mut self, mut args: Vec) -> Self { + self.arguments.extend(args.drain(..).map(|s| s.into())); + self + } + + fn which(name: &str) -> std::io::Result { + let output = std::process::Command::new("which").arg(name).output()?; + Ok(String::from_utf8_lossy(&output.stdout).trim().into()) + } + fn new(path: String, args: Vec) -> Self { + Self { + path, + arguments: args, } } } +impl Into> for Binary { + fn into(self) -> Vec { + let mut v = vec![self.path.clone()]; + v.extend(self.arguments); + v + } +} + +/// Specs of the allowed containers include only the binary path +/// (relative to src) and the required arguments +#[derive(Default, Clone)] +pub struct ContainerSpec { + /// Name of the container + name: ContainerName, + /// Binary configuration + binary: Binary, + /// Port mapping to host ports + port_map: Option, + /// Use Init container + init: Option, + /// Key-Map of environment variables + /// Starts with RUST_LOG=debug,h2=info + env: HashMap, +} + +impl Into> for &ContainerSpec { + fn into(self) -> Vec { + self.binary.clone().into() + } +} + +impl ContainerSpec { + /// Create new ContainerSpec from name and binary + pub fn new(name: &str, binary: Binary) -> Self { + let mut env = HashMap::new(); + env.insert("RUST_LOG".to_string(), "debug,h2=info".to_string()); + Self { + name: name.into(), + binary, + init: Some(true), + env, + ..Default::default() + } + } + /// Add port mapping from container to host + pub fn with_portmap(mut self, from: &str, to: &str) -> Self { + let from = format!("{}/tcp", from); + let mut port_map = bollard::service::PortMap::new(); + let binding = bollard::service::PortBinding { + host_ip: None, + host_port: Some(to.into()), + }; + port_map.insert(from, Some(vec![binding])); + self.port_map = Some(port_map); + self + } + /// Add environment key-val, eg for setting the RUST_LOG + /// If a key already exists, the value is replaced + pub fn with_env(mut self, key: &str, val: &str) -> Self { + if let Some(old) = self.env.insert(key.into(), val.into()) { + println!("Replaced key {} val {} with val {}", key, old, val); + } + self + } + fn env_to_vec(&self) -> Vec { + let mut vec = vec![]; + self.env.iter().for_each(|(k, v)| { + vec.push(format!("{}={}", k, v)); + }); + vec + } +} + pub struct Builder { /// name of the experiment this name will be used as a network and labels /// this way we can "group" all objects within docker to match this test @@ -86,7 +225,7 @@ pub struct Builder { name: String, /// containers we want to create, note these are mayastor containers /// only - containers: Vec, + containers: Vec, /// the network for the tests used network: String, /// delete the container and network when dropped @@ -124,10 +263,22 @@ impl Builder { /// add a mayastor container with a name pub fn add_container(mut self, name: &str) -> Builder { - self.containers.push(name.to_owned()); + self.containers + .push(ContainerSpec::new(name, Binary::from_dbg("mayastor"))); + self + } + + /// add a generic container which runs a local binary + pub fn add_container_spec(mut self, spec: ContainerSpec) -> Builder { + self.containers.push(spec); self } + /// add a generic container which runs a local binary + pub fn add_container_bin(self, name: &str, bin: Binary) -> Builder { + self.add_container_spec(ContainerSpec::new(name, bin)) + } + /// clean on drop? pub fn with_clean(mut self, enable: bool) -> Builder { self.clean = enable; @@ -137,13 +288,20 @@ impl Builder { /// build the config and start the containers pub async fn build( self, + ) -> Result> { + let mut compose = self.build_only().await?; + compose.start_all().await?; + Ok(compose) + } + + /// build the config but don't start the containers + pub async fn build_only( + self, ) -> Result> { let net: Ipv4Network = self.network.parse()?; let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); let srcdir = path.parent().unwrap().to_string_lossy().into(); - let binary = format!("{}/target/debug/mayastor", srcdir); - let docker = Docker::connect_with_unix_defaults()?; let mut cfg = HashMap::new(); @@ -162,7 +320,6 @@ impl Builder { let mut compose = ComposeTest { name: self.name.clone(), srcdir, - binary, docker, network_id: "".to_string(), containers: Default::default(), @@ -175,16 +332,15 @@ impl Builder { compose.network_create().await.map_err(|e| e.to_string())?; // containers are created where the IPs are ordinal - for (i, name) in self.containers.iter().enumerate() { + for (i, spec) in self.containers.iter().enumerate() { compose .create_container( - name, + spec, &net.nth((i + 2) as u32).unwrap().to_string(), ) .await?; } - compose.start_all().await?; Ok(compose) } } @@ -205,8 +361,6 @@ pub struct ComposeTest { name: String, /// the source dir the tests are run in srcdir: String, - /// the binary we are using relative to srcdir - binary: String, /// handle to the docker daemon docker: Docker, /// the network id is used to attach containers to networks @@ -263,7 +417,7 @@ impl ComposeTest { name: self.name.as_str(), check_duplicate: true, driver: "bridge", - internal: true, + internal: false, attachable: true, ingress: false, ipam: self.ipam.clone(), @@ -362,7 +516,7 @@ impl ComposeTest { /// config which includes the above objects async fn create_container( &mut self, - name: &str, + spec: &ContainerSpec, ipv4: &str, ) -> Result<(), Error> { let host_config = HostConfig { @@ -391,6 +545,8 @@ impl ComposeTest { "SYS_NICE".into(), ]), security_opt: Some(vec!["seccomp:unconfined".into()]), + init: spec.init, + port_bindings: spec.port_map.clone(), ..Default::default() }; @@ -407,11 +563,23 @@ impl ComposeTest { }, ); - let env = format!("MY_POD_IP={}", ipv4); + let mut env = spec.env_to_vec(); + env.push(format!("MY_POD_IP={}", ipv4)); + + let cmd: Vec = spec.into(); + let name = spec.name.as_str(); + + // figure out why ports to expose based on the port mapping + let mut exposed_ports = HashMap::new(); + if let Some(map) = spec.port_map.as_ref() { + map.iter().for_each(|binding| { + exposed_ports.insert(binding.0.as_str(), HashMap::new()); + }) + } let config = Config { - cmd: Some(vec![self.binary.as_str(), "-g", "0.0.0.0"]), - env: Some(vec![&env]), + cmd: Some(cmd.iter().map(|s| s.as_str()).collect()), + env: Some(env.iter().map(|s| s.as_str()).collect()), image: None, // notice we do not have a base image here hostname: Some(name), host_config: Some(host_config), @@ -431,6 +599,7 @@ impl ComposeTest { labels: Some( vec![(self.label.as_str(), "true")].into_iter().collect(), ), + exposed_ports: Some(exposed_ports), ..Default::default() }; @@ -452,7 +621,7 @@ impl ComposeTest { } /// start the container - async fn start(&self, name: &str) -> Result<(), Error> { + pub async fn start(&self, name: &str) -> Result<(), Error> { let id = self.containers.get(name).unwrap(); self.docker .start_container::<&str>(id.0.as_str(), None) @@ -508,6 +677,16 @@ impl ComposeTest { Ok(()) } + /// get the logs from all of the containers. It would be nice to make it + /// implicit that is, when you make a rpc call, whatever logs where + /// created due to that are returned + pub async fn logs_all(&self) -> Result<(), Error> { + for container in &self.containers { + let _ = self.logs(&container.0).await; + } + Ok(()) + } + /// start all the containers async fn start_all(&mut self) -> Result<(), Error> { for k in &self.containers { @@ -517,6 +696,17 @@ impl ComposeTest { Ok(()) } + /// start the containers + pub async fn start_containers( + &self, + containers: Vec<&str>, + ) -> Result<(), Error> { + for k in containers { + self.start(k).await?; + } + Ok(()) + } + /// inspect the given container pub async fn inspect( &self, @@ -543,7 +733,7 @@ impl ComposeTest { } /// return grpc handles to the containers - pub async fn grpc_handles(&self) -> Result, ()> { + pub async fn grpc_handles(&self) -> Result, String> { let mut handles = Vec::new(); for v in &self.containers { handles.push( @@ -551,15 +741,65 @@ impl ComposeTest { v.0.clone(), format!("{}:10124", v.1.1).parse::().unwrap(), ) - .await, + .await?, ); } Ok(handles) } + /// return grpc handle to the container + pub async fn grpc_handle(&self, name: &str) -> Result { + match self.containers.iter().find(|&c| c.0 == name) { + Some(container) => Ok(RpcHandle::connect( + container.0.clone(), + format!("{}:10124", container.1.1) + .parse::() + .unwrap(), + ) + .await?), + None => Err(format!("Container {} not found!", name)), + } + } + /// explicitly remove all containers pub async fn down(&self) { self.remove_all().await.unwrap(); } } + +#[cfg(test)] +mod tests { + use super::*; + use rpc::mayastor::Null; + + #[tokio::test] + async fn compose() { + let test = Builder::new() + .name("composer") + .network("10.1.0.0/16") + .add_container_spec( + ContainerSpec::new( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .with_portmap("4222", "4222"), + ) + .add_container("mayastor") + .add_container_bin( + "mayastor2", + Binary::from_dbg("mayastor") + .with_args(vec!["-n", "nats.composer"]), + ) + .with_clean(true) + .build() + .await + .unwrap(); + + let mut hdl = test.grpc_handle("mayastor").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.expect("list nexus"); + + // run with --nocapture to get the logs + test.logs_all().await.unwrap(); + } +} From 9a8a9a5540c49a86f097b3585cc821f6d4968145 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Wed, 18 Nov 2020 12:38:07 +0000 Subject: [PATCH 55/92] Add simple versioning to the message bus... ... channels and message id's. add shared state to the services library. switch from log to the tracing crate. --- Cargo.lock | 44 +++++- csi/moac/mbus.js | 8 +- csi/moac/nats.js | 6 +- csi/moac/test/nats_test.js | 18 +-- mayastor-test/test_nats.js | 12 +- mayastor/src/core/env.rs | 17 ++- mayastor/src/subsys/mbus/registration.rs | 2 +- mbus-api/Cargo.toml | 3 + mbus-api/examples/client/main.rs | 15 +- mbus-api/examples/server/main.rs | 7 +- mbus-api/src/lib.rs | 172 +++++++++------------ mbus-api/src/mbus_nats.rs | 2 +- mbus-api/src/receive.rs | 23 ++- mbus-api/src/send.rs | 8 +- mbus-api/src/v0.rs | 185 +++++++++++++++++++++++ services/Cargo.toml | 13 +- services/common/src/lib.rs | 116 ++++++++++---- services/examples/kiiss-client/main.rs | 19 ++- services/examples/service/main.rs | 5 +- services/kiiss/src/server.rs | 34 +++-- 20 files changed, 494 insertions(+), 215 deletions(-) create mode 100644 mbus-api/src/v0.rs diff --git a/Cargo.lock b/Cargo.lock index b572b1305..666cce3f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1008,7 +1008,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", - "humantime", + "humantime 1.3.0", "log", "regex", "termcolor", @@ -1442,6 +1442,12 @@ dependencies = [ "quick-error", ] +[[package]] +name = "humantime" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" + [[package]] name = "hyper" version = "0.13.9" @@ -1811,7 +1817,10 @@ dependencies = [ "smol", "snafu", "structopt", + "strum", + "strum_macros", "tokio", + "tracing", ] [[package]] @@ -2840,19 +2849,24 @@ name = "services" version = "0.1.0" dependencies = [ "async-trait", + "composer", "dyn-clonable", - "env_logger", "futures", + "humantime 2.0.1", "lazy_static", - "log", "mbus_api", "nats", + "rpc", "serde", "serde_json", "smol", "snafu", + "state", "structopt", "tokio", + "tracing", + "tracing-futures", + "tracing-subscriber", ] [[package]] @@ -2998,6 +3012,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "state" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" + [[package]] name = "strsim" version = "0.7.0" @@ -3040,6 +3060,24 @@ dependencies = [ "syn 1.0.48", ] +[[package]] +name = "strum" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" + +[[package]] +name = "strum_macros" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" +dependencies = [ + "heck", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] + [[package]] name = "subtle" version = "2.3.0" diff --git a/csi/moac/mbus.js b/csi/moac/mbus.js index a00e2a0f1..ac700fea0 100755 --- a/csi/moac/mbus.js +++ b/csi/moac/mbus.js @@ -51,8 +51,8 @@ const opts = yargs const nc = nats.connect(opts.s); nc.on('connect', () => { if (opts._[0] === 'register') { - nc.publish('registry', JSON.stringify({ - id: "register", + nc.publish('v0/registry', JSON.stringify({ + id: "v0/register", sender: "moac", data: { id: opts.node, @@ -60,8 +60,8 @@ nc.on('connect', () => { } })); } else if (opts._[0] === 'deregister') { - nc.publish('registry', JSON.stringify({ - id: "deregister", + nc.publish('v0/registry', JSON.stringify({ + id: "v0/deregister", sender: "moac", data: { id: opts.node diff --git a/csi/moac/nats.js b/csi/moac/nats.js index 154b1b240..df667178d 100644 --- a/csi/moac/nats.js +++ b/csi/moac/nats.js @@ -117,7 +117,7 @@ class MessageBus { } _subscribe () { - this.nc.subscribe('registry', (err, msg) => { + this.nc.subscribe('v0/registry', (err, msg) => { if (err) { log.error(`Error receiving a registry message: ${err}`); return; @@ -127,9 +127,9 @@ class MessageBus { return; } - if (payload.id == "register") { + if (payload.id == "v0/register") { this._registrationReceived(payload.data); - } else if (payload.id == "deregister") { + } else if (payload.id == "v0/deregister") { this._deregistrationReceived(payload.data); } else { const id = payload.id; diff --git a/csi/moac/test/nats_test.js b/csi/moac/test/nats_test.js index cbce5f69f..4e19394e1 100644 --- a/csi/moac/test/nats_test.js +++ b/csi/moac/test/nats_test.js @@ -90,8 +90,8 @@ module.exports = function () { }); it('should register a node', async () => { - nc.publish('registry', JSON.stringify({ - id: 'register', + nc.publish('v0/registry', JSON.stringify({ + id: 'v0/register', data: { id: NODE_NAME, grpcEndpoint: GRPC_ENDPOINT } })); await waitUntil(async () => { @@ -103,8 +103,8 @@ module.exports = function () { }); it('should ignore register request with missing node name', async () => { - nc.publish('registry', JSON.stringify({ - id: 'register', + nc.publish('v0/registry', JSON.stringify({ + id: 'v0/register', data: { grpcEndpoint: GRPC_ENDPOINT } })); // small delay to wait for a possible crash of moac @@ -112,8 +112,8 @@ module.exports = function () { }); it('should ignore register request with missing grpc endpoint', async () => { - nc.publish('registry', JSON.stringify({ - id: 'register', + nc.publish('v0/registry', JSON.stringify({ + id: 'v0/register', data: { id: NODE_NAME } })); // small delay to wait for a possible crash of moac @@ -121,14 +121,14 @@ module.exports = function () { }); it('should not crash upon a request with invalid JSON', async () => { - nc.publish('register', '{"id": "NODE", "grpcEndpoint": "something"'); + nc.publish('v0/register', '{"id": "NODE", "grpcEndpoint": "something"'); // small delay to wait for a possible crash of moac await sleep(10); }); it('should deregister a node', async () => { - nc.publish('registry', JSON.stringify({ - id: 'deregister', + nc.publish('v0/registry', JSON.stringify({ + id: 'v0/deregister', data: { id: NODE_NAME } })); await waitUntil(async () => { diff --git a/mayastor-test/test_nats.js b/mayastor-test/test_nats.js index 89f73e0dc..f56d0d8f7 100644 --- a/mayastor-test/test_nats.js +++ b/mayastor-test/test_nats.js @@ -53,7 +53,7 @@ function stopNats (done) { } function assertRegisterMessage (msg) { - assert(JSON.parse(msg).id == "register" ); + assert(JSON.parse(msg).id == "v0/register" ); const args = JSON.parse(msg).data; assert.hasAllKeys(args, ['id', 'grpcEndpoint']); assert.strictEqual(args.id, NODE_NAME); @@ -91,7 +91,7 @@ describe('nats', function () { MAYASTOR_HB_INTERVAL: HB_INTERVAL }); // wait for the register message - const sid = client.subscribe('registry', (msg) => { + const sid = client.subscribe('v0/registry', (msg) => { client.unsubscribe(sid); assertRegisterMessage(msg); done(); @@ -100,7 +100,7 @@ describe('nats', function () { }); it('should keep sending registration messages', (done) => { - const sid = client.subscribe('registry', (msg) => { + const sid = client.subscribe('v0/registry', (msg) => { client.unsubscribe(sid); assertRegisterMessage(msg); done(); @@ -111,7 +111,7 @@ describe('nats', function () { // simulate outage of NATS server for a duration of two heartbeats stopNats(() => { setTimeout(() => { - const sid = client.subscribe('registry', (msg) => { + const sid = client.subscribe('v0/registry', (msg) => { client.unsubscribe(sid); assertRegisterMessage(msg); done(); @@ -124,9 +124,9 @@ describe('nats', function () { }); it('should send a deregistration message when mayastor is shut down', (done) => { - const sid = client.subscribe('registry', (msg) => { + const sid = client.subscribe('v0/registry', (msg) => { client.unsubscribe(sid); - assert(JSON.parse(msg).id == "deregister" ); + assert(JSON.parse(msg).id == "v0/deregister" ); const args = JSON.parse(msg).data; assert.hasAllKeys(args, ['id']); assert.strictEqual(args.id, NODE_NAME); diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index a2addd808..d4e25aacf 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -14,7 +14,10 @@ use std::{ use byte_unit::{Byte, ByteUnit}; use futures::{channel::oneshot, future}; -use mbus_api::{ConfigGetCurrent, Message, ReplyConfig}; +use mbus_api::{ + v0::{Config::MayastorConfig, ConfigGetCurrent, ReplyConfig}, + Message, +}; use once_cell::sync::{Lazy, OnceCell}; use snafu::Snafu; use structopt::StructOpt; @@ -590,17 +593,15 @@ impl MayastorEnvironment { } #[allow(dead_code)] - async fn get_service_config(&self) -> ReplyConfig { + async fn get_service_config(&self) -> Result { if self.mbus_endpoint.is_some() { - ConfigGetCurrent { - kind: mbus_api::Config::MayastorConfig, + Ok(ConfigGetCurrent { + kind: MayastorConfig, } .request() - .await - // we need the library to be able to retry - .unwrap() + .await?) } else { - Default::default() + Ok(Default::default()) } } diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs index 538388927..2f9dc6f47 100644 --- a/mayastor/src/subsys/mbus/registration.rs +++ b/mayastor/src/subsys/mbus/registration.rs @@ -8,7 +8,7 @@ //! containing the node name and the grpc endpoint. use futures::{select, FutureExt, StreamExt}; -use mbus_api::*; +use mbus_api::{v0::*, *}; use once_cell::sync::OnceCell; use snafu::Snafu; use std::{env, time::Duration}; diff --git a/mbus-api/Cargo.toml b/mbus-api/Cargo.toml index dd67505d6..106804bb6 100644 --- a/mbus-api/Cargo.toml +++ b/mbus-api/Cargo.toml @@ -16,6 +16,9 @@ dyn-clonable = "0.9.0" smol = "1.0.0" once_cell = "1.4.1" snafu = "0.6" +strum = "0.19" +strum_macros = "0.19" +tracing = "0.1" [dependencies.serde] features = ["derive"] diff --git a/mbus-api/examples/client/main.rs b/mbus-api/examples/client/main.rs index f3f74c6fc..488ac3d20 100644 --- a/mbus-api/examples/client/main.rs +++ b/mbus-api/examples/client/main.rs @@ -14,7 +14,7 @@ struct CliArgs { url: String, /// Channel to send to - #[structopt(long, short, default_value = "default")] + #[structopt(long, short, default_value = "v0/default")] channel: Channel, /// With server in this binary @@ -65,6 +65,8 @@ async fn main() { .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); let cli_args = CliArgs::from_args(); + log::info!("Using args: {:?}", cli_args); + message_bus_init(cli_args.url).await; if cli_args.server { @@ -82,9 +84,12 @@ async fn main() { info!("Received reply: {:?}", reply); // We can also use the following api to specify a different channel and bus - let reply = - DummyRequest::Request(&DummyRequest {}, Channel::Default, bus()) - .await - .unwrap(); + let reply = DummyRequest::Request( + &DummyRequest {}, + Channel::v0(v0::ChannelVs::Default), + bus(), + ) + .await + .unwrap(); info!("Received reply: {:?}", reply); } diff --git a/mbus-api/examples/server/main.rs b/mbus-api/examples/server/main.rs index 802eda80d..27d14d47e 100644 --- a/mbus-api/examples/server/main.rs +++ b/mbus-api/examples/server/main.rs @@ -13,7 +13,7 @@ struct CliArgs { url: String, /// Channel to listen on - #[structopt(long, short, default_value = "default")] + #[structopt(long, short, default_value = "v0/default")] channel: Channel, /// Receiver version @@ -68,6 +68,9 @@ async fn main() { .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); let cli_args = CliArgs::from_args(); + log::info!("Using args: {:?}", cli_args); + log::info!("CH: {}", Channel::v0(v0::ChannelVs::Default).to_string()); + message_bus_init(cli_args.url).await; let mut sub = bus().subscribe(cli_args.channel).await.unwrap(); @@ -117,7 +120,7 @@ async fn receive_v3(sub: &mut nats::asynk::Subscription, count: u64) { message.try_into().unwrap(); message // same function can receive an error - .reply(Err(Error::WithMessage { + .reply(Err(BusError::WithMessage { message: format!("Fake Error {}", count), })) .await diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs index bc4409002..81eaed7b6 100644 --- a/mbus-api/src/lib.rs +++ b/mbus-api/src/lib.rs @@ -8,6 +8,8 @@ mod mbus_nats; pub mod receive; /// send messages traits pub mod send; +/// Version 0 of the messages +pub mod v0; use async_trait::async_trait; use dyn_clonable::clonable; @@ -19,138 +21,102 @@ use smol::io; use snafu::Snafu; use std::{fmt::Debug, marker::PhantomData, str::FromStr, time::Duration}; +/// Common error type for send/receive +pub type Error = io::Error; + /// Available Message Bus channels #[derive(Clone, Debug)] +#[allow(non_camel_case_types)] pub enum Channel { - /// Default - Default, - /// Registration of mayastor instances with the control plane - Registry, - /// Keep it In Sync Service - Kiiss, - /// Reply to requested Channel - Reply(String), + /// Version 0 of the Channels + v0(v0::ChannelVs), } impl FromStr for Channel { - type Err = String; + type Err = strum::ParseError; fn from_str(source: &str) -> Result { - match source { - "default" => Ok(Self::Default), - "registry" => Ok(Self::Registry), - "kiiss" => Ok(Self::Kiiss), - _ => Err(format!("Could not parse the channel: {}", source)), + match &source[0 ..= 2] { + "v0/" => { + let c: v0::ChannelVs = source[3 ..].parse()?; + Ok(Self::v0(c)) + } + _ => Err(strum::ParseError::VariantNotFound), } } } - -impl Default for Channel { - fn default() -> Self { - Channel::Default +impl ToString for Channel { + fn to_string(&self) -> String { + match self { + Self::v0(channel) => format!("v0/{}", channel.to_string()), + } } } -impl std::fmt::Display for Channel { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Channel::Default => write!(f, "default"), - Channel::Registry => write!(f, "registry"), - Channel::Kiiss => write!(f, "kiiss"), - Channel::Reply(ch) => write!(f, "{}", ch), - } +impl Default for Channel { + fn default() -> Self { + Channel::v0(v0::ChannelVs::Default) } } /// Message id which uniquely identifies every type of unsolicited message /// The solicited (replies) message do not currently carry an id as they /// are sent to a specific requested channel -#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -#[serde(rename_all = "camelCase")] +#[derive(Debug, PartialEq, Clone)] +#[allow(non_camel_case_types)] pub enum MessageId { - /// Default - Default, - /// Update Config - ConfigUpdate, - /// Request current Config - ConfigGetCurrent, - /// Register mayastor - Register, - /// Deregister mayastor - Deregister, + /// Version 0 + v0(v0::MessageIdVs), } -/// Sender identification (eg which mayastor instance sent the message) -pub type SenderId = String; - -/// Mayastor configurations -/// Currently, we have the global mayastor config and the child states config -#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] -pub enum Config { - /// Mayastor global config - MayastorConfig, - /// Mayastor child states config - ChildStatesConfig, +impl Serialize for MessageId { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(self.to_string().as_str()) + } } -impl Default for Config { - fn default() -> Self { - Config::MayastorConfig +impl<'de> Deserialize<'de> for MessageId { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + match string.parse() { + Ok(id) => Ok(id), + Err(error) => { + let error = + format!("Failed to parse into MessageId, error: {}", error); + Err(serde::de::Error::custom(error)) + } + } } } -/// Config Messages - -/// Update mayastor configuration -#[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct ConfigUpdate { - /// type of config being updated - pub kind: Config, - /// actual config data - pub data: Vec, -} -bus_impl_message_all!(ConfigUpdate, ConfigUpdate, (), Kiiss); +impl FromStr for MessageId { + type Err = strum::ParseError; -/// Request message configuration used by mayastor to request configuration -/// from a control plane service -#[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct ConfigGetCurrent { - /// type of config requested - pub kind: Config, -} -/// Reply message configuration returned by a controle plane service to mayastor -#[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct ReplyConfig { - /// config data - pub config: Vec, + fn from_str(source: &str) -> Result { + match &source[0 ..= 2] { + "v0/" => { + let id: v0::MessageIdVs = source[3 ..].parse()?; + Ok(Self::v0(id)) + } + _ => Err(strum::ParseError::VariantNotFound), + } + } } -bus_impl_message_all!( - ConfigGetCurrent, - ConfigGetCurrent, - ReplyConfig, - Kiiss, - GetConfig -); - -/// Registration - -/// Register message payload -#[derive(Serialize, Deserialize, Default, Debug, Clone)] -pub struct Register { - /// id of the mayastor instance - pub id: String, - /// grpc_endpoint of the mayastor instance - #[serde(rename = "grpcEndpoint")] - pub grpc_endpoint: String, +impl ToString for MessageId { + fn to_string(&self) -> String { + match self { + Self::v0(id) => format!("v0/{}", id.to_string()), + } + } } -bus_impl_message_all!(Register, Register, (), Registry); -/// Deregister message payload -#[derive(Serialize, Deserialize, Default, Debug, Clone)] -pub struct Deregister { - /// id of the mayastor instance - pub id: String, -} -bus_impl_message_all!(Deregister, Deregister, (), Registry); +/// Sender identification (eg which mayastor instance sent the message) +pub type SenderId = String; /// This trait defines all Bus Messages which must: /// 1 - be uniquely identifiable via MessageId @@ -198,7 +164,7 @@ struct SendPayload { /// for any other operation #[derive(Serialize, Deserialize, Debug, Snafu)] #[allow(missing_docs)] -pub enum Error { +pub enum BusError { #[snafu(display("Generic Failure, message={}", message))] WithMessage { message: String }, #[snafu(display("Ill formed request when deserializing the request"))] @@ -208,7 +174,7 @@ pub enum Error { /// Payload returned to the sender /// Includes an error as the operations may be fallible #[derive(Serialize, Deserialize)] -pub struct ReplyPayload(pub Result); +pub struct ReplyPayload(pub Result); // todo: implement thin wrappers on these /// MessageBus raw Message diff --git a/mbus-api/src/mbus_nats.rs b/mbus-api/src/mbus_nats.rs index f9fca7066..43a382b97 100644 --- a/mbus-api/src/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -1,8 +1,8 @@ use super::*; -use log::{info, warn}; use nats::asynk::Connection; use once_cell::sync::OnceCell; use smol::io; +use tracing::{info, warn}; static NATS_MSG_BUS: OnceCell = OnceCell::new(); /// Initialise the Nats Message Bus with the current tokio runtime diff --git a/mbus-api/src/receive.rs b/mbus-api/src/receive.rs index 1507fd30d..e01cbaf8c 100644 --- a/mbus-api/src/receive.rs +++ b/mbus-api/src/receive.rs @@ -53,8 +53,8 @@ where let request: SendPayload = serde_json::from_slice(&bus_message.data)?; if request.id == request.data.id() { - log::info!( - "We have a message from '{}': {:?}", + log::trace!( + "Received message from '{}': {:?}", request.sender, request.data ); @@ -78,10 +78,23 @@ pub struct ReceivedRawMessage<'a> { bus_msg: &'a BusMessage, } +impl std::fmt::Display for ReceivedRawMessage<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "channel: {}, msg_id: {:?}, reply_id: {:?}, data: {:?}", + self.bus_msg.subject, + self.id(), + self.bus_msg.reply, + std::str::from_utf8(&self.bus_msg.data) + ) + } +} + impl<'a> ReceivedRawMessage<'a> { /// Get a copy of the actual payload data which was sent /// May fail if the raw data cannot be deserialized into `S` - pub fn inner>(&self) -> io::Result { + pub fn inner + Message>(&self) -> io::Result { let request: SendPayload = serde_json::from_slice(&self.bus_msg.data)?; Ok(request.data) @@ -154,8 +167,8 @@ impl From for ReplyPayload { } } -impl From> for ReplyPayload { - fn from(val: Result) -> Self { +impl From> for ReplyPayload { + fn from(val: Result) -> Self { ReplyPayload(val) } } diff --git a/mbus-api/src/send.rs b/mbus-api/src/send.rs index af01c8168..bbb8c3bbb 100644 --- a/mbus-api/src/send.rs +++ b/mbus-api/src/send.rs @@ -139,12 +139,8 @@ macro_rules! bus_impl_message { impl Message for $S { type Reply = $R; - fn id(&self) -> MessageId { - MessageId::$I - } - fn channel(&self) -> Channel { - Channel::$C - } + impl_channel_id!($I, $C); + async fn publish(&self) -> smol::io::Result<()> { $T::Publish(self, self.channel(), bus()).await } diff --git a/mbus-api/src/v0.rs b/mbus-api/src/v0.rs new file mode 100644 index 000000000..893db4e16 --- /dev/null +++ b/mbus-api/src/v0.rs @@ -0,0 +1,185 @@ +use super::*; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use strum_macros::EnumString; + +/// Versioned Channels +#[derive(Clone, Debug, EnumString, strum_macros::ToString)] +#[strum(serialize_all = "snake_case")] +pub enum ChannelVs { + /// Default + Default, + /// Registration of mayastor instances with the control plane + Registry, + /// Node Service which exposes the registered mayastor instances + Node, + /// Keep it In Sync Service + Kiiss, +} +impl Default for ChannelVs { + fn default() -> Self { + ChannelVs::Default + } +} + +impl From for Channel { + fn from(channel: ChannelVs) -> Self { + Channel::v0(channel) + } +} + +/// Versioned Message Id's +#[derive( + Debug, PartialEq, Clone, strum_macros::ToString, strum_macros::EnumString, +)] +#[strum(serialize_all = "camelCase")] +pub enum MessageIdVs { + /// Default + Default, + /// Update Config + ConfigUpdate, + /// Request current Config + ConfigGetCurrent, + /// Register mayastor + Register, + /// Deregister mayastor + Deregister, + /// Node Service + /// Get all node information + GetNodes, +} + +// Only V0 should export this macro +// This allows the example code to use the v0 default +// Otherwise they have to impl whatever version they require +#[macro_export] +/// Use version 0 of the Message and Channel +macro_rules! impl_channel_id { + ($I:ident, $C:ident) => { + fn id(&self) -> MessageId { + MessageId::v0(v0::MessageIdVs::$I) + } + fn channel(&self) -> Channel { + Channel::v0(v0::ChannelVs::$C) + } + }; +} + +/// Mayastor configurations +/// Currently, we have the global mayastor config and the child states config +#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] +pub enum Config { + /// Mayastor global config + MayastorConfig, + /// Mayastor child states config + ChildStatesConfig, +} +impl Default for Config { + fn default() -> Self { + Config::MayastorConfig + } +} + +/// Config Messages + +/// Update mayastor configuration +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct ConfigUpdate { + /// type of config being updated + pub kind: Config, + /// actual config data + pub data: Vec, +} +bus_impl_message_all!(ConfigUpdate, ConfigUpdate, (), Kiiss); + +/// Request message configuration used by mayastor to request configuration +/// from a control plane service +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct ConfigGetCurrent { + /// type of config requested + pub kind: Config, +} +/// Reply message configuration returned by a controle plane service to mayastor +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct ReplyConfig { + /// config data + pub config: Vec, +} +bus_impl_message_all!( + ConfigGetCurrent, + ConfigGetCurrent, + ReplyConfig, + Kiiss, + GetConfig +); + +/// Registration + +/// Register message payload +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Register { + /// id of the mayastor instance + pub id: String, + /// grpc_endpoint of the mayastor instance + pub grpc_endpoint: String, +} +bus_impl_message_all!(Register, Register, (), Registry); + +/// Deregister message payload +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct Deregister { + /// id of the mayastor instance + pub id: String, +} +bus_impl_message_all!(Deregister, Deregister, (), Registry); + +/// Node Service + +/// Get all the nodes +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct GetNodes {} + +/// State of the Node +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + EnumString, + strum_macros::ToString, + Eq, + PartialEq, +)] +#[strum(serialize_all = "camelCase")] +pub enum NodeState { + /// Node is deemed online if it has not missed the + /// registration keep alive deadline + Online, + /// Node is deemed offline if has missed the + /// registration keep alive deadline + Offline, +} + +impl Default for NodeState { + fn default() -> Self { + Self::Offline + } +} + +/// Node information +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Node { + /// id of the mayastor instance + pub id: String, + /// grpc_endpoint of the mayastor instance + pub grpc_endpoint: String, + /// deemed state of the node + pub state: NodeState, +} + +/// All the nodes +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct Nodes(pub Vec); +bus_impl_message_all!(GetNodes, GetNodes, Nodes, Node); diff --git a/services/Cargo.toml b/services/Cargo.toml index bcd6ded66..1c0ff89f7 100644 --- a/services/Cargo.toml +++ b/services/Cargo.toml @@ -16,17 +16,24 @@ path = "common/src/lib.rs" mbus_api = { path = "../mbus-api" } nats = "0.8" structopt = "0.3.15" -log = "0.4.11" tokio = { version = "0.2", features = ["full"] } futures = "0.3.6" -env_logger = "0.7" serde_json = "1.0" async-trait = "0.1.36" dyn-clonable = "0.9.0" smol = "1.0.0" snafu = "0.6" lazy_static = "1.4.0" +humantime = "2.0.1" +state = "0.4.2" +tracing = "0.1" +tracing-subscriber = "0.2" +tracing-futures = "0.2.4" + +[dev-dependencies] +composer = { path = "../composer" } +rpc = { path = "../rpc" } [dependencies.serde] features = ["derive"] -version = "1.0" \ No newline at end of file +version = "1.0" diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index d6cbac12f..e0969a084 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -8,25 +8,26 @@ use async_trait::async_trait; use dyn_clonable::clonable; use futures::{future::join_all, stream::StreamExt}; use mbus_api::*; -use smol::io; use snafu::{OptionExt, ResultExt, Snafu}; -use std::collections::HashMap; +use state::Container; +use std::{collections::HashMap, convert::Into, ops::Deref}; +use tracing::{debug, error}; #[derive(Debug, Snafu)] #[allow(missing_docs)] pub enum ServiceError { - #[snafu(display("Channel {} has been closed.", channel))] + #[snafu(display("Channel {} has been closed.", channel.to_string()))] GetMessage { channel: Channel, }, - #[snafu(display("Failed to subscribe on Channel {}", channel))] + #[snafu(display("Failed to subscribe on Channel {}", channel.to_string()))] Subscribe { channel: Channel, - source: io::Error, + source: Error, }, GetMessageId { channel: Channel, - source: io::Error, + source: Error, }, FindSubscription { channel: Channel, @@ -35,32 +36,43 @@ pub enum ServiceError { HandleMessage { channel: Channel, id: MessageId, - source: io::Error, + source: Error, }, } /// Runnable service with N subscriptions which listen on a given /// message bus channel on a specific ID -#[derive(Default)] pub struct Service { server: String, channel: Channel, subscriptions: HashMap>>, + shared_state: std::sync::Arc, +} + +impl Default for Service { + fn default() -> Self { + Self { + server: "".to_string(), + channel: Default::default(), + subscriptions: Default::default(), + shared_state: std::sync::Arc::new(Container::new()), + } + } } /// Service Arguments for the service handler callback pub struct Arguments<'a> { /// Service context, like access to the message bus - pub context: Context<'a>, + pub context: &'a Context<'a>, /// Access to the actual message bus request pub request: Request<'a>, } impl<'a> Arguments<'a> { /// Returns a new Service Argument to be use by a Service Handler - pub fn new(bus: &'a DynBus, msg: &'a BusMessage) -> Self { + pub fn new(context: &'a Context, msg: &'a BusMessage) -> Self { Self { - context: bus.into(), + context, request: msg.into(), } } @@ -71,21 +83,27 @@ impl<'a> Arguments<'a> { #[derive(Clone)] pub struct Context<'a> { bus: &'a DynBus, + state: &'a Container, } -impl<'a> From<&'a DynBus> for Context<'a> { - fn from(bus: &'a DynBus) -> Self { +impl<'a> Context<'a> { + /// create a new context + pub fn new(bus: &'a DynBus, state: &'a Container) -> Self { Self { bus, + state, } } -} - -impl<'a> Context<'a> { /// get the message bus from the context pub fn get_bus_as_ref(&self) -> &'a DynBus { self.bus } + /// get the shared state of type `T` from the context + pub fn get_state(&self) -> &T { + self.state + .try_get() + .expect("Requested data type not shared via with_shared_data!") + } } /// Service Request received via the message bus @@ -97,24 +115,52 @@ pub type Request<'a> = ReceivedRawMessage<'a>; /// which processes the messages and a filter to match message types pub trait ServiceSubscriber: Clone + Send + Sync { /// async handler which processes the messages - async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error>; + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error>; /// filter which identifies which messages may be routed to the handler fn filter(&self) -> Vec; } impl Service { /// Setup default service connecting to `server` on subject `channel` - pub fn builder(server: String, channel: Channel) -> Self { + pub fn builder(server: String, channel: impl Into) -> Self { Self { server, - channel, + channel: channel.into(), ..Default::default() } } - /// Setup default `channel` - pub fn with_channel(mut self, channel: Channel) -> Self { - self.channel = channel; + /// Setup default `channel` where `with_subscription` will listen on + pub fn with_channel(mut self, channel: impl Into) -> Self { + self.channel = channel.into(); + self + } + + /// Add a new service-wide shared state which can be retried in the handlers + /// (more than one type of data can be added). + /// The type must be `Send + Sync + 'static`. + /// + /// Example: + /// # async fn main() { + /// # Service::builder(cli_args.url, Channel::Registry) + /// .with_shared_state(NodeStore::default()) + /// .with_shared_state(More {}) + /// .with_subscription(ServiceHandler::::default()) + /// # .run().await; + /// + /// # async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + /// let store: &NodeStore = args.context.get_state(); + /// let more: &More = args.context.get_state(); + /// # Ok(()) + /// # } + pub fn with_shared_state(self, state: T) -> Self { + let type_name = std::any::type_name::(); + if !self.shared_state.set(state) { + panic!(format!( + "Shared state for type '{}' has already been set!", + type_name + )); + } self } @@ -151,6 +197,7 @@ impl Service { bus: DynBus, channel: Channel, subscriptions: &[Box], + state: std::sync::Arc, ) -> Result<(), ServiceError> { let mut handle = bus.subscribe(channel.clone()).await.context(Subscribe { @@ -161,12 +208,15 @@ impl Service { let message = handle.next().await.context(GetMessage { channel: channel.clone(), })?; - let args = Arguments::new(&bus, &message); + + let context = Context::new(&bus, state.deref()); + let args = Arguments::new(&context, &message); + debug!("Processing message: {{ {} }}", args.request); if let Err(error) = Self::process_message(args, &subscriptions).await { - log::error!("Error processing message: {}", error); + error!("Error processing message: {}", error); } } } @@ -201,7 +251,7 @@ impl Service { if let Err(error) = result.as_ref() { // todo: should an error be returned to the sender? - log::error!( + error!( "Error handling message id {:?}: {:?}", subscription.filter(), error @@ -228,10 +278,16 @@ impl Service { let bus = bus.clone(); let channel = subscriptions.0.clone(); let subscriptions = subscriptions.1.clone(); + let state = self.shared_state.clone(); let handle = tokio::spawn(async move { - Self::run_channel(bus, channel.parse().unwrap(), &subscriptions) - .await + Self::run_channel( + bus, + channel.parse().unwrap(), + &subscriptions, + state, + ) + .await }); threads.push(handle); @@ -241,11 +297,9 @@ impl Service { .await .iter() .for_each(|result| match result { - Err(error) => { - log::error!("Failed to wait for thread: {:?}", error) - } + Err(error) => error!("Failed to wait for thread: {:?}", error), Ok(Err(error)) => { - log::error!("Error running channel thread: {:?}", error) + error!("Error running channel thread: {:?}", error) } _ => {} }); diff --git a/services/examples/kiiss-client/main.rs b/services/examples/kiiss-client/main.rs index 98190fd65..a5c271276 100644 --- a/services/examples/kiiss-client/main.rs +++ b/services/examples/kiiss-client/main.rs @@ -1,6 +1,6 @@ -use log::info; -use mbus_api::*; +use mbus_api::{v0::*, *}; use structopt::StructOpt; +use tracing::info; #[derive(Debug, StructOpt)] struct CliArgs { @@ -11,12 +11,17 @@ struct CliArgs { url: String, } +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + #[tokio::main] async fn main() { - env_logger::init_from_env( - env_logger::Env::default() - .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), - ); + init_tracing(); client().await; } @@ -37,7 +42,7 @@ async fn client() { &ConfigGetCurrent { kind: Config::MayastorConfig, }, - Channel::Kiiss, + Channel::v0(v0::ChannelVs::Kiiss), bus(), ) .await diff --git a/services/examples/service/main.rs b/services/examples/service/main.rs index 1eaa92222..ca25a60eb 100644 --- a/services/examples/service/main.rs +++ b/services/examples/service/main.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use common::*; use mbus_api::*; use serde::{Deserialize, Serialize}; -use smol::io; use std::{convert::TryInto, marker::PhantomData}; use structopt::StructOpt; @@ -36,7 +35,7 @@ bus_impl_message_all!(GetSvcName, Default, SvcName, Default); #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { let msg: ReceivedMessage = args.request.try_into()?; @@ -73,7 +72,7 @@ async fn client() { async fn server() { let cli_args = CliArgs::from_args(); - Service::builder(cli_args.url, Channel::Default) + Service::builder(cli_args.url, v0::ChannelVs::Default) .with_subscription(ServiceHandler::::default()) .run() .await; diff --git a/services/kiiss/src/server.rs b/services/kiiss/src/server.rs index 6b6c77b88..91ef1a9b1 100644 --- a/services/kiiss/src/server.rs +++ b/services/kiiss/src/server.rs @@ -3,12 +3,11 @@ extern crate lazy_static; use async_trait::async_trait; use common::*; -use log::info; -use mbus_api::*; -use smol::io; +use mbus_api::{v0::*, *}; use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; use structopt::StructOpt; use tokio::sync::Mutex; +use tracing::info; #[derive(Debug, StructOpt)] struct CliArgs { @@ -37,7 +36,7 @@ lazy_static! { #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { let data: ConfigUpdate = args.request.inner()?; info!("Received: {:?}", data); @@ -66,7 +65,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { let data: ConfigGetCurrent = args.request.inner()?; info!("Received: {:?}", data); @@ -85,14 +84,14 @@ impl ServiceSubscriber for ServiceHandler { .await } None => { - msg.reply(Err(Error::WithMessage { + msg.reply(Err(BusError::WithMessage { message: "Config is missing".into(), })) .await } }, None => { - msg.reply(Err(Error::WithMessage { + msg.reply(Err(BusError::WithMessage { message: "Config is missing".into(), })) .await @@ -106,7 +105,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { let _: ReceivedMessage = args.request.try_into()?; Ok(()) } @@ -117,7 +116,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), io::Error> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { let _: ReceivedMessage = args.request.try_into()?; Ok(()) } @@ -126,12 +125,17 @@ impl ServiceSubscriber for ServiceHandler { } } +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + #[tokio::main] async fn main() { - env_logger::init_from_env( - env_logger::Env::default() - .filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), - ); + init_tracing(); let cli_args = CliArgs::from_args(); info!("Using options: {:?}", &cli_args); @@ -140,10 +144,10 @@ async fn main() { } async fn server(cli_args: CliArgs) { - Service::builder(cli_args.url, Channel::Kiiss) + Service::builder(cli_args.url, ChannelVs::Kiiss) .with_subscription(ServiceHandler::::default()) .with_subscription(ServiceHandler::::default()) - .with_channel(Channel::Registry) + .with_channel(ChannelVs::Registry) .with_subscription(ServiceHandler::::default()) .with_subscription(ServiceHandler::::default()) .run() From 665cf3b6956cb6e9d6beb990da70cf62fb47e6fb Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 6 Nov 2020 14:58:04 +0000 Subject: [PATCH 56/92] Add initial node service... which listens on the registration channel for mayastor instances and keeps a record of them. It starts a watchdog with a set timeout for each instance and deems a node as offline if the deadline is not met. Reports this information via the message bus, which is to be consumed by the rest service. Added compose test. --- services/Cargo.toml | 4 + services/examples/node-client/main.rs | 36 +++ services/node/src/server.rs | 309 ++++++++++++++++++++++++++ 3 files changed, 349 insertions(+) create mode 100644 services/examples/node-client/main.rs create mode 100644 services/node/src/server.rs diff --git a/services/Cargo.toml b/services/Cargo.toml index 1c0ff89f7..0c0e04c61 100644 --- a/services/Cargo.toml +++ b/services/Cargo.toml @@ -8,6 +8,10 @@ edition = "2018" name = "kiiss" path = "kiiss/src/server.rs" +[[bin]] +name = "node" +path = "node/src/server.rs" + [lib] name = "common" path = "common/src/lib.rs" diff --git a/services/examples/node-client/main.rs b/services/examples/node-client/main.rs new file mode 100644 index 000000000..403a96729 --- /dev/null +++ b/services/examples/node-client/main.rs @@ -0,0 +1,36 @@ +use mbus_api::{v0::*, *}; +use structopt::StructOpt; +use tracing::info; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + url: String, +} + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[tokio::main] +async fn main() { + init_tracing(); + + client().await; +} + +async fn client() { + let cli_args = CliArgs::from_args(); + mbus_api::message_bus_init(cli_args.url).await; + + let nodes = GetNodes {}.request().await.unwrap(); + + info!("Received Nodes: {:?}", nodes); +} diff --git a/services/node/src/server.rs b/services/node/src/server.rs new file mode 100644 index 000000000..1e131faee --- /dev/null +++ b/services/node/src/server.rs @@ -0,0 +1,309 @@ +use async_trait::async_trait; +use common::*; +use mbus_api::{v0::*, *}; +use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; +use structopt::StructOpt; +use tokio::sync::Mutex; +use tracing::{error, info}; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + nats: String, + /// Deadline for the mayastor instance keep alive registration + /// Default: 20s + #[structopt(long, short, default_value = "20s")] + deadline: humantime::Duration, +} + +/// Needed so we can implement the ServiceSubscriber trait for +/// the message types external to the crate +#[derive(Clone, Default)] +struct ServiceHandler { + data: PhantomData, +} + +/// Watchdog with which must be pet within the deadline, otherwise +/// it triggers the `on_timeout` future +#[derive(Clone)] +struct Watchdog { + deadline: std::time::Duration, + pet_chan: tokio::sync::mpsc::Sender<()>, +} + +impl Watchdog { + /// new empty watchdog with a timeout + pub fn new(deadline: std::time::Duration) -> Self { + Self { + deadline, + pet_chan: tokio::sync::mpsc::channel(1).0, + } + } + + /// arm watchdog with self timeout and execute error callback if + /// the deadline is not met + pub fn arm(&mut self, on_timeout: T) + where + T: std::future::Future + Send + 'static, + T::Output: Send + 'static, + { + let deadline = self.deadline; + let (s, mut r) = tokio::sync::mpsc::channel(1); + self.pet_chan = s; + tokio::spawn(async move { + let result = tokio::time::timeout(deadline, r.recv()).await; + if result.is_err() { + on_timeout.await; + } + }); + } + + /// meet the deadline + #[allow(dead_code)] + pub async fn pet( + &mut self, + ) -> Result<(), tokio::sync::mpsc::error::SendError<()>> { + self.pet_chan.send(()).await + } +} + +/// In memory database of all nodes which we know of and their state +#[derive(Default, Clone)] +struct NodeStore { + inner: std::sync::Arc, +} +struct NodeStoreInner { + state: Mutex>, + deadline: std::time::Duration, +} +impl Default for NodeStoreInner { + fn default() -> Self { + Self { + deadline: CliArgs::from_args().deadline.into(), + state: Default::default(), + } + } +} + +impl NodeStore { + /// Register a new node through the register information + async fn register(&self, registration: Register) { + let mut state = self.inner.state.lock().await; + + let mut watchdog = Watchdog::new(self.inner.deadline); + let id = registration.id.clone(); + let store = self.clone(); + let deadline = self.inner.deadline; + watchdog.arm(async move { + error!( + "Node id {} missed the registration deadline of {:?}!", + id, deadline + ); + store.offline(id).await; + }); + + let id = registration.id.clone(); + let node = Node { + id: registration.id, + grpc_endpoint: registration.grpc_endpoint, + state: NodeState::Online, + }; + state.insert(id, (node, watchdog)); + } + /// Deregister a node through the deregister information + async fn deregister(&self, node: Deregister) { + let mut state = self.inner.state.lock().await; + state.remove(&node.id); + } + /// Offline node through its id + async fn offline(&self, id: String) { + let mut state = self.inner.state.lock().await; + if let Some(n) = state.get_mut(&id) { + n.0.state = NodeState::Offline; + } + } + /// Get the list of nodes which we know of + async fn get_nodes(&self) -> Vec { + let nodes = self.inner.state.lock().await; + nodes + .values() + .cloned() + .collect::>() + .into_iter() + .map(|(n, _)| n) + .collect() + } +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let store: &NodeStore = args.context.get_state(); + store.register(args.request.inner()?).await; + Ok(()) + } + fn filter(&self) -> Vec { + vec![Register::default().id()] + } +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let store: &NodeStore = args.context.get_state(); + store.deregister(args.request.inner()?).await; + Ok(()) + } + fn filter(&self) -> Vec { + vec![Deregister::default().id()] + } +} + +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let request: ReceivedMessage = + args.request.try_into()?; + + let store: &NodeStore = args.context.get_state(); + let nodes = store.get_nodes().await; + request.reply(Nodes(nodes)).await + } + fn filter(&self) -> Vec { + vec![GetNodes::default().id()] + } +} + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[tokio::main] +async fn main() { + init_tracing(); + + let cli_args = CliArgs::from_args(); + info!("Using options: {:?}", &cli_args); + + server(cli_args).await; +} + +async fn server(cli_args: CliArgs) { + Service::builder(cli_args.nats, ChannelVs::Registry) + .with_shared_state(NodeStore::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_channel(ChannelVs::Node) + .with_subscription(ServiceHandler::::default()) + .run() + .await; +} + +#[cfg(test)] +mod tests { + use super::*; + use composer::*; + use rpc::mayastor::Null; + + async fn bus_init() -> Result<(), Box> { + tokio::time::timeout(std::time::Duration::from_secs(2), async { + mbus_api::message_bus_init("10.1.0.2".into()).await + }) + .await?; + Ok(()) + } + async fn wait_for_node() -> Result<(), Box> { + let _ = GetNodes {}.request().await?; + Ok(()) + } + fn init_tracing() { + if let Ok(filter) = + tracing_subscriber::EnvFilter::try_from_default_env() + { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } + } + // to avoid waiting for timeouts + async fn orderly_start( + test: &ComposeTest, + ) -> Result<(), Box> { + test.start_containers(vec!["nats", "node"]).await?; + + bus_init().await?; + wait_for_node().await?; + + test.start("mayastor").await?; + + let mut hdl = test.grpc_handle("mayastor").await?; + hdl.mayastor.list_nexus(Null {}).await?; + Ok(()) + } + + #[tokio::test] + async fn node() -> Result<(), Box> { + init_tracing(); + let nats_arg = vec!["-n", "nats.node"]; + let maya_name = "node-test-name"; + let test = Builder::new() + .name("node") + .network("10.1.0.0/16") + .add_container_bin( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .add_container_bin( + "node", + Binary::from_dbg("node") + .with_args(nats_arg.clone()) + .with_args(vec!["-d", "2sec"]), + ) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_args(nats_arg.clone()) + .with_args(vec!["-N", maya_name]), + ) + .with_clean(true) + .build_only() + .await?; + + orderly_start(&test).await?; + + let nodes = GetNodes {}.request().await?; + tracing::info!("Nodes: {:?}", nodes); + assert_eq!(nodes.0.len(), 1); + assert_eq!( + nodes.0.first().unwrap(), + &Node { + id: maya_name.to_string(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Online, + } + ); + tokio::time::delay_for(std::time::Duration::from_secs(2)).await; + let nodes = GetNodes {}.request().await?; + tracing::info!("Nodes: {:?}", nodes); + assert_eq!(nodes.0.len(), 1); + assert_eq!( + nodes.0.first().unwrap(), + &Node { + id: maya_name.to_string(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Offline, + } + ); + + // run with --nocapture to see all the logs + test.logs_all().await?; + Ok(()) + } +} From f824c0626f3deaa34901ba391865f0139de16791 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 9 Nov 2020 18:14:41 +0000 Subject: [PATCH 57/92] Add initial rest service... ... which creates a rest service and subscribes for /nodes and /nodes{id} URI for which it returns a single and all nodes information. It gets the information through the message bus GetNodes{} request. Added a versioned rest client library. (Using temporary dummy CN and user certificate for https) --- Cargo.lock | 761 +++++++++++++++++++++++++++- Cargo.toml | 1 + composer/src/lib.rs | 2 +- nix/pkgs/mayastor/default.nix | 1 + rest/Cargo.toml | 40 ++ rest/certs/README | 1 + rest/certs/build.sh | 40 ++ rest/certs/openssl.cnf | 10 + rest/certs/rsa/ca.cert | 30 ++ rest/certs/rsa/ca.key | 52 ++ rest/certs/rsa/user.cert | 26 + rest/certs/rsa/user.chain | 56 ++ rest/certs/rsa/user.key | 28 + rest/certs/rsa/user.req | 15 + rest/certs/rsa/user.rsa | 27 + rest/service/src/main.rs | 90 ++++ rest/service/src/message_bus/mod.rs | 1 + rest/service/src/message_bus/v0.rs | 135 +++++ rest/src/lib.rs | 67 +++ rest/src/versions/mod.rs | 4 + rest/src/versions/v0.rs | 46 ++ rest/tests/test.rs | 22 + rest/tests/v0_test.rs | 96 ++++ 23 files changed, 1544 insertions(+), 7 deletions(-) create mode 100644 rest/Cargo.toml create mode 100644 rest/certs/README create mode 100755 rest/certs/build.sh create mode 100644 rest/certs/openssl.cnf create mode 100644 rest/certs/rsa/ca.cert create mode 100644 rest/certs/rsa/ca.key create mode 100644 rest/certs/rsa/user.cert create mode 100644 rest/certs/rsa/user.chain create mode 100644 rest/certs/rsa/user.key create mode 100644 rest/certs/rsa/user.req create mode 100644 rest/certs/rsa/user.rsa create mode 100644 rest/service/src/main.rs create mode 100644 rest/service/src/message_bus/mod.rs create mode 100644 rest/service/src/message_bus/v0.rs create mode 100644 rest/src/lib.rs create mode 100644 rest/src/versions/mod.rs create mode 100644 rest/src/versions/v0.rs create mode 100644 rest/tests/test.rs create mode 100644 rest/tests/v0_test.rs diff --git a/Cargo.lock b/Cargo.lock index 666cce3f3..b38277249 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,275 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +[[package]] +name = "actix-codec" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78d1833b3838dbe990df0f1f87baf640cf6146e898166afe401839d1b001e570" +dependencies = [ + "bitflags", + "bytes 0.5.6", + "futures-core", + "futures-sink", + "log", + "pin-project 0.4.27", + "tokio", + "tokio-util 0.3.1", +] + +[[package]] +name = "actix-connect" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "177837a10863f15ba8d3ae3ec12fac1099099529ed20083a27fdfe247381d0dc" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "actix-utils", + "derive_more", + "either", + "futures-util", + "http 0.2.1", + "log", + "rustls", + "tokio-rustls", + "trust-dns-proto", + "trust-dns-resolver", + "webpki", +] + +[[package]] +name = "actix-http" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "404df68c297f73b8d36c9c9056404913d25905a8f80127b0e5fe147c9c4b9f02" +dependencies = [ + "actix-codec", + "actix-connect", + "actix-rt", + "actix-service", + "actix-threadpool", + "actix-tls", + "actix-utils", + "base64 0.13.0", + "bitflags", + "brotli2", + "bytes 0.5.6", + "cookie", + "copyless", + "derive_more", + "either", + "encoding_rs", + "flate2", + "futures-channel", + "futures-core", + "futures-util", + "fxhash", + "h2", + "http 0.2.1", + "httparse", + "indexmap", + "itoa", + "language-tags", + "lazy_static", + "log", + "mime", + "percent-encoding 2.1.0", + "pin-project 1.0.1", + "rand 0.7.3", + "regex", + "serde", + "serde_json", + "serde_urlencoded", + "sha-1", + "slab", + "time 0.2.22", +] + +[[package]] +name = "actix-macros" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a60f9ba7c4e6df97f3aacb14bb5c0cd7d98a49dcbaed0d7f292912ad9a6a3ed2" +dependencies = [ + "quote 1.0.7", + "syn 1.0.48", +] + +[[package]] +name = "actix-router" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd1f7dbda1645bf7da33554db60891755f6c01c1b2169e2f4c492098d30c235" +dependencies = [ + "bytestring", + "http 0.2.1", + "log", + "regex", + "serde", +] + +[[package]] +name = "actix-rt" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "143fcc2912e0d1de2bcf4e2f720d2a60c28652ab4179685a1ee159e0fb3db227" +dependencies = [ + "actix-macros", + "actix-threadpool", + "copyless", + "futures-channel", + "futures-util", + "smallvec", + "tokio", +] + +[[package]] +name = "actix-server" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45407e6e672ca24784baa667c5d32ef109ccdd8d5e0b5ebb9ef8a67f4dfb708e" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "actix-utils", + "futures-channel", + "futures-util", + "log", + "mio", + "mio-uds", + "num_cpus", + "slab", + "socket2", +] + +[[package]] +name = "actix-service" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0052435d581b5be835d11f4eb3bce417c8af18d87ddf8ace99f8e67e595882bb" +dependencies = [ + "futures-util", + "pin-project 0.4.27", +] + +[[package]] +name = "actix-testing" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47239ca38799ab74ee6a8a94d1ce857014b2ac36f242f70f3f75a66f691e791c" +dependencies = [ + "actix-macros", + "actix-rt", + "actix-server", + "actix-service", + "log", + "socket2", +] + +[[package]] +name = "actix-threadpool" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d209f04d002854b9afd3743032a27b066158817965bf5d036824d19ac2cc0e30" +dependencies = [ + "derive_more", + "futures-channel", + "lazy_static", + "log", + "num_cpus", + "parking_lot", + "threadpool", +] + +[[package]] +name = "actix-tls" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24789b7d7361cf5503a504ebe1c10806896f61e96eca9a7350e23001aca715fb" +dependencies = [ + "actix-codec", + "actix-service", + "actix-utils", + "futures-util", + "rustls", + "tokio-rustls", + "webpki", + "webpki-roots", +] + +[[package]] +name = "actix-utils" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9022dec56632d1d7979e59af14f0597a28a830a9c1c7fec8b2327eb9f16b5a" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "bitflags", + "bytes 0.5.6", + "either", + "futures-channel", + "futures-sink", + "futures-util", + "log", + "pin-project 0.4.27", + "slab", +] + +[[package]] +name = "actix-web" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88344b7a5ef27e5e09e73565379f69273dd3e2d29e82afc381b84d170d0a5631" +dependencies = [ + "actix-codec", + "actix-http", + "actix-macros", + "actix-router", + "actix-rt", + "actix-server", + "actix-service", + "actix-testing", + "actix-threadpool", + "actix-tls", + "actix-utils", + "actix-web-codegen", + "awc", + "bytes 0.5.6", + "derive_more", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "fxhash", + "log", + "mime", + "pin-project 1.0.1", + "regex", + "rustls", + "serde", + "serde_json", + "serde_urlencoded", + "socket2", + "time 0.2.22", + "tinyvec 1.0.1", + "url", +] + +[[package]] +name = "actix-web-codegen" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad26f77093333e0e7c6ffe54ebe3582d908a104e448723eec6d43d08b07143fb" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] + [[package]] name = "addr2line" version = "0.14.0" @@ -246,6 +516,31 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "awc" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "425980a1e58e5030a3e4b065a3d577c8f0e16142ea9d81f30614eae810c98577" +dependencies = [ + "actix-codec", + "actix-http", + "actix-rt", + "actix-service", + "base64 0.13.0", + "bytes 0.5.6", + "cfg-if 1.0.0", + "derive_more", + "futures-core", + "log", + "mime", + "percent-encoding 2.1.0", + "rand 0.7.3", + "rustls", + "serde", + "serde_json", + "serde_urlencoded", +] + [[package]] name = "backtrace" version = "0.3.54" @@ -260,6 +555,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base-x" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" + [[package]] name = "base64" version = "0.10.1" @@ -275,6 +576,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + [[package]] name = "base64-url" version = "1.4.7" @@ -423,6 +730,26 @@ dependencies = [ "serde_with", ] +[[package]] +name = "brotli-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445dea95f4c2b41cde57cc9fee236ae4dbae88d8fcbdb4750fc1bb5d86aaecd" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "brotli2" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cb036c3eade309815c15ddbacec5b22c4d1f3983a774ab2eac2e3e9ea85568e" +dependencies = [ + "brotli-sys", + "libc", +] + [[package]] name = "build_const" version = "0.2.1" @@ -469,6 +796,15 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81a18687293a1546b67c246452202bbbf143d239cb43494cc163da14979082da" +[[package]] +name = "bytestring" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7c05fa5172da78a62d9949d662d2ac89d4cc7355d7b49adee5163f1fb3f363" +dependencies = [ + "bytes 0.5.6", +] + [[package]] name = "cache-padded" version = "1.1.1" @@ -512,7 +848,7 @@ dependencies = [ "num-integer", "num-traits 0.2.14", "serde", - "time", + "time 0.1.44", "winapi 0.3.9", ] @@ -551,6 +887,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "cloudabi" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" +dependencies = [ + "bitflags", +] + [[package]] name = "colored_json" version = "2.1.0" @@ -586,12 +931,35 @@ dependencies = [ "cache-padded", ] +[[package]] +name = "const_fn" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" + [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "cookie" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784ad0fbab4f3e9cef09f20e0aea6000ae08d2cb98ac4c0abc53df18803d702f" +dependencies = [ + "percent-encoding 2.1.0", + "time 0.2.22", + "version_check", +] + +[[package]] +name = "copyless" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536" + [[package]] name = "core-foundation" version = "0.7.0" @@ -623,6 +991,15 @@ dependencies = [ "build_const", ] +[[package]] +name = "crc32fast" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +dependencies = [ + "cfg-if 1.0.0", +] + [[package]] name = "crossbeam" version = "0.7.3" @@ -871,6 +1248,17 @@ dependencies = [ "syn 0.15.44", ] +[[package]] +name = "derive_more" +version = "0.99.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] + [[package]] name = "devinfo" version = "0.1.0" @@ -910,6 +1298,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "dns-lookup" version = "1.0.5" @@ -985,10 +1379,31 @@ dependencies = [ ] [[package]] -name = "either" -version = "1.6.1" +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "encoding_rs" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-as-inner" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] [[package]] name = "enum-primitive-derive" @@ -1092,6 +1507,18 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" +[[package]] +name = "flate2" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129" +dependencies = [ + "cfg-if 1.0.0", + "crc32fast", + "libc", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1256,6 +1683,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "gcc" version = "0.3.55" @@ -1379,6 +1815,17 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi 0.3.9", +] + [[package]] name = "http" version = "0.1.21" @@ -1565,6 +2012,18 @@ dependencies = [ "libc", ] +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2", + "widestring", + "winapi 0.3.9", + "winreg", +] + [[package]] name = "ipnetwork" version = "0.17.0" @@ -1638,6 +2097,12 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "language-tags" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" + [[package]] name = "lazy_static" version = "1.4.0" @@ -1682,6 +2147,15 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +[[package]] +name = "lock_api" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +dependencies = [ + "scopeguard", +] + [[package]] name = "log" version = "0.4.11" @@ -1714,6 +2188,21 @@ dependencies = [ "libc", ] +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -1838,6 +2327,12 @@ dependencies = [ "autocfg 1.0.1", ] +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + [[package]] name = "miniz_oxide" version = "0.4.3" @@ -2110,6 +2605,32 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +[[package]] +name = "parking_lot" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi 0.1.0", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi 0.3.9", +] + [[package]] name = "partition-identity" version = "0.2.8" @@ -2489,7 +3010,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi", + "cloudabi 0.0.3", "fuchsia-cprng", "libc", "rand_core 0.4.2", @@ -2588,6 +3109,40 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "resolv-conf" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11834e137f3b14e309437a8276714eed3a80d1ef894869e510f2c0c0b98b9f4a" +dependencies = [ + "hostname", + "quick-error", +] + +[[package]] +name = "rest" +version = "0.1.0" +dependencies = [ + "actix-rt", + "actix-web", + "anyhow", + "async-trait", + "composer", + "futures", + "mbus_api", + "rpc", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "strum_macros", + "tokio", + "tracing", + "tracing-futures", + "tracing-subscriber", +] + [[package]] name = "ring" version = "0.16.15" @@ -2869,6 +3424,25 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "sha-1" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" +dependencies = [ + "block-buffer", + "cfg-if 1.0.0", + "cpuid-bool", + "digest", + "opaque-debug", +] + +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + [[package]] name = "sha2" version = "0.9.2" @@ -3012,12 +3586,70 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "standback" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" +dependencies = [ + "version_check", +] + [[package]] name = "state" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "serde", + "serde_derive", + "syn 1.0.48", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2 1.0.24", + "quote 1.0.7", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn 1.0.48", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + [[package]] name = "strsim" version = "0.7.0" @@ -3223,6 +3855,15 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.1.44" @@ -3234,12 +3875,65 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "time" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55b7151c9065e80917fbf285d9a5d1432f60db41d170ccafc749a136b41a93af" +dependencies = [ + "const_fn", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi 0.3.9", +] + +[[package]] +name = "time-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.24", + "quote 1.0.7", + "standback", + "syn 1.0.48", +] + [[package]] name = "tinyvec" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" +[[package]] +name = "tinyvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b78a366903f506d2ad52ca8dc552102ffdd3e937ba8a227f024dc1d1eae28575" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + [[package]] name = "tokio" version = "0.2.22" @@ -3621,6 +4315,46 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "trust-dns-proto" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdd7061ba6f4d4d9721afedffbfd403f20f39a4301fee1b70d6fcd09cca69f28" +dependencies = [ + "async-trait", + "backtrace", + "enum-as-inner", + "futures", + "idna", + "lazy_static", + "log", + "rand 0.7.3", + "smallvec", + "thiserror", + "tokio", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f23cdfdc3d8300b3c50c9e84302d3bd6d860fb9529af84ace6cf9665f181b77" +dependencies = [ + "backtrace", + "cfg-if 0.1.10", + "futures", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "trust-dns-proto", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -3658,7 +4392,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" dependencies = [ - "tinyvec", + "tinyvec 0.3.4", ] [[package]] @@ -3882,6 +4616,12 @@ dependencies = [ "libc", ] +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + [[package]] name = "winapi" version = "0.2.8" @@ -3925,6 +4665,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index 33070a527..ff0f9003f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,4 +17,5 @@ members = [ "services", "mbus-api", "composer", + "rest", ] diff --git a/composer/src/lib.rs b/composer/src/lib.rs index d7cd137df..7c40ef45c 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -631,7 +631,7 @@ impl ComposeTest { } /// stop the container - async fn stop(&self, name: &str) -> Result<(), Error> { + pub async fn stop(&self, name: &str) -> Result<(), Error> { let id = self.containers.get(name).unwrap(); if let Err(e) = self .docker diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index e66376803..fca7c8097 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -55,6 +55,7 @@ let "sysfs" "mbus-api" "services" + "rest" ]; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; diff --git a/rest/Cargo.toml b/rest/Cargo.toml new file mode 100644 index 000000000..37f28db9c --- /dev/null +++ b/rest/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "rest" +version = "0.1.0" +authors = ["Tiago Castro "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "rest" +path = "./service/src/main.rs" + +[lib] +name = "rest_client" +path = "./src/lib.rs" + +[dependencies] +rustls = "0.18" +actix-web = { version = "3.2.0", features = ["rustls"] } +mbus_api = { path = "../mbus-api" } +async-trait = "0.1.41" +serde_json = "1.0" +structopt = "0.3.15" +futures = "0.3.6" +tracing = "0.1" +tracing-subscriber = "0.2" +tracing-futures = "0.2.4" +strum = "0.19" +strum_macros = "0.19" +anyhow = "1.0.32" + +[dev-dependencies] +composer = { path = "../composer" } +rpc = { path = "../rpc" } +tokio = { version = "0.2", features = ["full"] } +actix-rt = "1.1.1" + +[dependencies.serde] +features = ["derive"] +version = "1.0" \ No newline at end of file diff --git a/rest/certs/README b/rest/certs/README new file mode 100644 index 000000000..6b2ec8db5 --- /dev/null +++ b/rest/certs/README @@ -0,0 +1 @@ +WARNING: These are dummy example certificates and script based on https://github.com/ctz/rustls \ No newline at end of file diff --git a/rest/certs/build.sh b/rest/certs/build.sh new file mode 100755 index 000000000..8c8235f10 --- /dev/null +++ b/rest/certs/build.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -xe + +rm -rf rsa/ +mkdir -p rsa/ + +openssl req -nodes \ + -x509 \ + -days 3650 \ + -newkey rsa:4096 \ + -keyout rsa/ca.key \ + -out rsa/ca.cert \ + -sha256 \ + -batch \ + -subj "/CN=testserver RSA CA" + +openssl req -nodes \ + -newkey rsa:2048 \ + -keyout rsa/user.key \ + -out rsa/user.req \ + -sha256 \ + -batch \ + -subj "/CN=testserver.com" + +openssl rsa \ + -in rsa/user.key \ + -out rsa/user.rsa + +openssl x509 -req \ + -in rsa/user.req \ + -out rsa/user.cert \ + -CA rsa/ca.cert \ + -CAkey rsa/ca.key \ + -sha256 \ + -days 3650 \ + -set_serial 123 \ + -extensions v3_user -extfile openssl.cnf + +cat rsa/user.cert rsa/ca.cert > rsa/user.chain diff --git a/rest/certs/openssl.cnf b/rest/certs/openssl.cnf new file mode 100644 index 000000000..639786c23 --- /dev/null +++ b/rest/certs/openssl.cnf @@ -0,0 +1,10 @@ + +[ v3_user ] +basicConstraints = critical,CA:false +keyUsage = nonRepudiation, digitalSignature +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = localhost diff --git a/rest/certs/rsa/ca.cert b/rest/certs/rsa/ca.cert new file mode 100644 index 000000000..a2e3f404a --- /dev/null +++ b/rest/certs/rsa/ca.cert @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFGTCCAwGgAwIBAgIUN1l7q2yf89x4yEggibLWNSycUM4wDQYJKoZIhvcNAQEL +BQAwHDEaMBgGA1UEAwwRdGVzdHNlcnZlciBSU0EgQ0EwHhcNMjAxMTEzMTkxODMz +WhcNMzAxMTExMTkxODMzWjAcMRowGAYDVQQDDBF0ZXN0c2VydmVyIFJTQSBDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANQgZRvntSb/SH/hlYXpnJlJ +fxEQRjP/ehJibAVvufuZjwrP4g54vw/2xeUTYr1ZIwc+7bppXOb9N1qtOmpkYjzJ +IiEpZ0Laviem6FJZzSL+seB+M8TpdE6Twx4IZa3f4f4stWNrhw3mGUNcpb2K+zSs +jH/3Z95150kNFaCNCndWiv9AzChdjexAD1SsFFoR/pSM0OHvmlmeCeU+xeutC9Mo +y0Nw9ZzyVVZFo9Qj+Wci0mss9UJfCXq+2n/Vay+MFLIkpNHOZXLrXZ3TTb4H/G8M +NQEI7kTHEK5jhELSzl03db2vkYHIwFUwJiISw53q6ecn1GtnydcPH8Xm7Yr12CRv +1G6laCJhJUNX4ad6j1BGNB4i72zG39hDOImz5ZAKtQ16/Mxb4oJjal2Trkj+ICLF +xZiZ5FIu8XWcpw4wP7hPiitTUmvSTse5BORWAg4ZFdeutojU9Yrh6AG4SJ+9Ulyq +10VXiOTe/0S8kGBAZmj/9TU4szE1WxkLYtiSU8kFbjKaO0ji0eTd70rwRPfGLDJn ++bOEMYO/fOLjAUOwDWJnCOJCx18UXova+ypc3mF8SXL4QQPAXRubHfCWLW0o9853 +bmROj11LDkk9Jm0mzpihEFBuWk9MbduvRfAZvBDNRk4tsQkysRMlfomrs9M8rnKJ ++uN+faFxpjq/INISkRpXAgMBAAGjUzBRMB0GA1UdDgQWBBRQFsV2zvNNXZW5umbH +Nhte2+aThjAfBgNVHSMEGDAWgBRQFsV2zvNNXZW5umbHNhte2+aThjAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQB/5ne2glvTv8CCl8fhB0mG8oL8 +QWyMGwhdC2JWWbYde0Rbp6ZUWR+Lh4hiY6wJwSb7WLTgDgknA+JRPcS6bDRRRqcH +i3K2Z44l+ZYh2YkdPnv0a3OMdMrvldzd4IUiJ13Z6pdFOUx+mjjtfw2RytxsJFTq +C7EoIM9tYV7MGFEhfjy32cw0rFBsOwOiSX9tpU5GG93o70rmitzLgNcJVVsWwZma +29EHJR2YIKJZZ3baUOfUjVtB/CJeun19ygbXFZNKJtUOSp8PhQLf/wrIH5CP/4li +LmgsYZxZmazFrAxamgcFTUHhWHh4m8n6mRlDXk9MwdyQl5ZCttVulLd2yEgcREla +A+h+QglYSRCaM8YnbhwC62VS7OF1E4bbNVwd+k4uOe/0CgCkQC5loCED/qB4B2cV +jYC8Dq5zg+GfKA4B4/dnCEakCv8DTIFSyS7Ci+ND/LWiDRaV6H/nkhFhWAjEvIuB +ROuCWE1lvguJiZBUdERaM4WVP+MyPLm+FAgIpU90PdssbSzsndenfhUjCa8PNUYT +7DDChChOEcsH3L6tiB8Gj7hiXEXvbo5fgQnC8cMNta1yYaX9SLkqGxdhy3qKdVo6 +DG5qPwstdnQtKLP4H7VqLiHBl9sClr461fdMCcwi7OnhpCiHFQ43p2x1LWhQCOyC +kXMiuA/5EtdpuIwjPg== +-----END CERTIFICATE----- diff --git a/rest/certs/rsa/ca.key b/rest/certs/rsa/ca.key new file mode 100644 index 000000000..676be0768 --- /dev/null +++ b/rest/certs/rsa/ca.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDUIGUb57Um/0h/ +4ZWF6ZyZSX8REEYz/3oSYmwFb7n7mY8Kz+IOeL8P9sXlE2K9WSMHPu26aVzm/Tda +rTpqZGI8ySIhKWdC2r4npuhSWc0i/rHgfjPE6XROk8MeCGWt3+H+LLVja4cN5hlD +XKW9ivs0rIx/92fededJDRWgjQp3Vor/QMwoXY3sQA9UrBRaEf6UjNDh75pZngnl +PsXrrQvTKMtDcPWc8lVWRaPUI/lnItJrLPVCXwl6vtp/1WsvjBSyJKTRzmVy612d +002+B/xvDDUBCO5ExxCuY4RC0s5dN3W9r5GByMBVMCYiEsOd6unnJ9RrZ8nXDx/F +5u2K9dgkb9RupWgiYSVDV+Gneo9QRjQeIu9sxt/YQziJs+WQCrUNevzMW+KCY2pd +k65I/iAixcWYmeRSLvF1nKcOMD+4T4orU1Jr0k7HuQTkVgIOGRXXrraI1PWK4egB +uEifvVJcqtdFV4jk3v9EvJBgQGZo//U1OLMxNVsZC2LYklPJBW4ymjtI4tHk3e9K +8ET3xiwyZ/mzhDGDv3zi4wFDsA1iZwjiQsdfFF6L2vsqXN5hfEly+EEDwF0bmx3w +li1tKPfOd25kTo9dSw5JPSZtJs6YoRBQblpPTG3br0XwGbwQzUZOLbEJMrETJX6J +q7PTPK5yifrjfn2hcaY6vyDSEpEaVwIDAQABAoICAC7UR/31FSTazqKMpnm9rPia +JRcbRKRODiDo4XgBxkkpCU8VvY38UhGwJNgBg5bRis4aslio0f2cgQ/fljnOl3Fb +5dD0VvuHVttiGX+0m3jSSWqJ8dvSvf5q2HRU3j2YY0e9jv23y0TiO0mHtn81o2dp +gErvz1ppJPoXjWK4QEtAK8dWArwWizJQohHLO2L2/gDFZYe7YZPPM9gMF52DwkyH +gPgOg+p2BSjajuRSXMjMAy/t6OcSHAGhCJOYBeWlQxxgwpIewn6wPyaMc2k0UiIh +y8HCuvcsCx60kYo/B5fEmpWZoMHKE8u6qXB0PAMszR5eyKI9c22qxRTneiFnTKO3 +f9yOWpXhtYaUXlFLdGgv5h5n0AMAaVw6oNQ0d7tF9GAZBLudij5sojbt7gTct0JV +q9vc14zHj/ZAiWkiBYbKQuEe8WK8TCiS7GiY+Ul34GTVFnzUSDSz1h2seojfRI8A +r7dLzkR5UGMg/GxHzpnGVcd5mr5f+B2SKD6QvDGi8AxKozeTkdT/YKD+n6dLh4FF +xVZR1Mr5Gd0uLgVGXlRG/pIh9/YN0HqUFMiBXXOS4AxNLY70QgpjSilFybkRaAjM +BYRG8phHHfL+Jc973BvSsHAfeysB/FlygO0w8C3ZUOtAkV2umfftPQmNCS454b5p +dtfNfOmmNq5QrGxgNxOhAoIBAQD4z1ZNz0XJUwCdQx+GTrWWzo1MnVqeSi5Zmj0B +joBO/SmwQMDx9e92pixDGNGvNS83i3xp5UAf45z+eA3TRyRy16pyJA5nP3Y8W57W +/daBYT9PBRmqOD9JHND0tzTfLyElWXCERUgiFVre1FS+XfJhcK56relas4M+6jcT +X/dZlqXrfap7SnwUpin/k0icxfW6JUKc+Q8MCfk4EuqPIqpAHEh6QHqAMnrv9ZgV +e5ruUVkSWNS3wjhAeH0Qo9AIzsM6O1jRN+oWXvXTYaqZ+LomDpfdQJJdhuNBskt+ +P88dvcp283agUtVE9CfFKuZ8upEpj0marA6haXWhhFnpGOJlAoIBAQDaQa3awwph +Z4YlVb41aGMesB+zmN5pw4QMEWdJKrBd8fYZNh+doQ488f42GLUwjWaxb2DCBPyz +LC1mWLrw6x2ZAUn+ytKqbLp3H4xRwbbmTj1B+pVogmQ5+T1Q+gB1Sq+S0fxBpfy4 +A89fDUY/qgTdO+CRrM9YBvcTpAEZIF3TKgSsM5CNuTnpG0I39HayO3k90Iv0ZpaR +0KbagdtZ5jatfdCgN8OQuuMLculL4YU4wq3e8BvACob3emixlBvnBhymMPywZEY7 +cKalIt1GNRHQpGMj+aHOJNbma/FL6GzVR3Xi0tYqDJdaZrt3pmGMW1naWMCHOtWA +LtFM+7ds2+ALAoIBAQD0vXYmSZlaxGOZTZpn0Wp7sid5Ulr90vguuTIDIvoBOLNW +JQvltcXcDu8A/Rvc3VLZr0ItJAzkkU8vXEY1KginUo7dgawNUXZrPrBKyBvE3jSY +9Ope7r8JmufLn57JXqGbDJRJ96BRrnHZOEE3lE/EG0P1drZQur2CGH8edspm/HGu +58jXR9gvs5ciR+0oEO16MgaJQ8/SRsk3qBvDuhR/xZJhdqVJDt6wxQLZNuGaRLh5 +/JkZPFl6yUo0IP6Ue1W8JP5jpj2Dzdq/F5XpqKjqoHYvc8HFb9aBG/F4eSmxFqxe +P3PTlkckcYnhPybDT/1URRbgpxgpHARSYU7To9+lAoIBAQCe2dzCcuBUQnfFJqHY +11AFHuUD30hJrA6oheYpv5M73QZkFRtvq6/kNbDzCIqfetr6W3kN3XlaotWsPvpY +rJcPLHwBoPcxKQ+R8fk77ATFnL5DDjG+LeDqvPeev0akF6av8ntqAHhuKf+9S7kM +Tx9ZMeJhxGfoxrD/r4dJFWCUd2mNN76HGAAJJFjOxvO5B0ZX8jhaumvuVvGJ6aeP +x9mV7hNw+QUyioMDazBXlR1UUYUg1Jst67uN1Z7rWEPyomUppE/VxjXDv1raqARq +u+dqdqrdnSviELgEXx3xRIoptOgdHZzYX++s0Uayb/thn39HRv9hrTWFJQ4avL4k +0ec/AoIBAQDmIxOvuVI+KL9p81hsYj6YIYoRI2ddvMnASA/stPHyqKl+H//Uucbg +W0lXdmBgDCkwbfXO/I0U2gatm8fyw7gGAwRZTeCTfykhGvUu/LmLDM4lgBqCC1G0 +J+Yp5feNM1GfDde+GZ4P0lDQxriHdwI34rU+rKJiBwi6/yXIya1/zX1exuX9skez +INEDtSx2dW2mWYRmTTm/wxJyZYavqtTA6KBF2JZP/AoygiHILPcpjpOZQZLG5GMS +Pi9LuUErtMXjQz8p2TZJoEpRY7weHH+pHWGO8aj3H7UEji9dkax509g4dNgWACLv +btZ7O5or3ycX8GpyS4RU8iWjkHwkIkgu +-----END PRIVATE KEY----- diff --git a/rest/certs/rsa/user.cert b/rest/certs/rsa/user.cert new file mode 100644 index 000000000..9468aeeae --- /dev/null +++ b/rest/certs/rsa/user.cert @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEXTCCAkWgAwIBAgIBezANBgkqhkiG9w0BAQsFADAcMRowGAYDVQQDDBF0ZXN0 +c2VydmVyIFJTQSBDQTAeFw0yMDExMTMxOTE4MzNaFw0zMDExMTExOTE4MzNaMBkx +FzAVBgNVBAMMDnRlc3RzZXJ2ZXIuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArB4oko8d5/ILPAm2RErIUvtC4u+w8WFSS/4jRh/MKpFdBaLigq6d +3Ef0pAgUSQ2Jc7yiNxI7nG9vTYPeVD6QeainbJgLDq/m0MMwNM1aDFn3M5BjqtyT +Jdw8atsQrTvi5yRXfReCuRKQaHWbmSMfCVdFqEUsIFIwUK4hxKRnxYezxVABLJja +izOQhYmYtY+A1pHt6E4bgEhL1sXTDBMrX+SKjVclRO5nFOgzjs3iNwU1R5Fk/0O1 +ky6H4BVSpRTkgQnRTsFlxHndOetQJucTV7RVVjtBQkJJhBggV3Sf7YE8KMtNGwN5 +CdCt7VWUaiBNjmz38MJl7V64L6FDYdcRGQIDAQABo4GsMIGpMAwGA1UdEwEB/wQC +MAAwCwYDVR0PBAQDAgbAMB0GA1UdDgQWBBTxAhhgGThh/5xhVGOXBMkIT4W9gjBX +BgNVHSMEUDBOgBRQFsV2zvNNXZW5umbHNhte2+aThqEgpB4wHDEaMBgGA1UEAwwR +dGVzdHNlcnZlciBSU0EgQ0GCFDdZe6tsn/PceMhIIImy1jUsnFDOMBQGA1UdEQQN +MAuCCWxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAgEAfAc42bQa6J+4+cLrUa4T +gXvuFHN5tl1VetBz8F44DuVv2CUSrfxho1kiQbziLI8PC/pviRsdEqRTNACgRVye +jfGjJXgG/kgYEXhTFWKKzKaUxdMvZimpoYKrvVHVzq9Iy8p96ejFuhvWqto0+SNt +b8wQpo6QYQXfoxVY/IqdLzPAhvra/TPWhNE8oBZMqvpu0ORxRzMiLK02ADK8SIDm +ufw/b7ikGpgGhVh64qbIIBl+efodl3f+GVrf4plto8Cbf3b2QKBWt943CCaLgDf9 +4Rn+hSnJ9uC94NgKPJVBe91GVTJZhyQ5eUpJ32FsbD9sJjJ8DhQiMMKLhEeGHkmt +x7T/SGnzzjRoUiCLfdkPVFBF+s3zFmE2+tlq7CYvrUvN0li5Yc3iYP38+qXu9F4a +Gnc3E9vRgcOSPQh0ZbbckPYMCLINcV+J7kXzF6FDlf4078h4i6J41Yb9X1pfgrJy +a7D2R3NtNH/wuEDzmoXV5zV4c09YmZ5Kp+pjIbpzwIoEhg1pAD1pyLP174YFRxmd +fUoNMCX33ZxNWWqWpAg94qthCPyRIiHP3La6xYybPoaPDYi8JNlLtOkhtLSAJLyy +q5gWtNIUuaS1heZPkbph2EfAfJVVAj6VQTSnc95bnHk1eZpyZDwEA9oc/P2sZuPm +07a4ymuTCCsbOm19HlmMRtY= +-----END CERTIFICATE----- diff --git a/rest/certs/rsa/user.chain b/rest/certs/rsa/user.chain new file mode 100644 index 000000000..07b39d3fe --- /dev/null +++ b/rest/certs/rsa/user.chain @@ -0,0 +1,56 @@ +-----BEGIN CERTIFICATE----- +MIIEXTCCAkWgAwIBAgIBezANBgkqhkiG9w0BAQsFADAcMRowGAYDVQQDDBF0ZXN0 +c2VydmVyIFJTQSBDQTAeFw0yMDExMTMxOTE4MzNaFw0zMDExMTExOTE4MzNaMBkx +FzAVBgNVBAMMDnRlc3RzZXJ2ZXIuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArB4oko8d5/ILPAm2RErIUvtC4u+w8WFSS/4jRh/MKpFdBaLigq6d +3Ef0pAgUSQ2Jc7yiNxI7nG9vTYPeVD6QeainbJgLDq/m0MMwNM1aDFn3M5BjqtyT +Jdw8atsQrTvi5yRXfReCuRKQaHWbmSMfCVdFqEUsIFIwUK4hxKRnxYezxVABLJja +izOQhYmYtY+A1pHt6E4bgEhL1sXTDBMrX+SKjVclRO5nFOgzjs3iNwU1R5Fk/0O1 +ky6H4BVSpRTkgQnRTsFlxHndOetQJucTV7RVVjtBQkJJhBggV3Sf7YE8KMtNGwN5 +CdCt7VWUaiBNjmz38MJl7V64L6FDYdcRGQIDAQABo4GsMIGpMAwGA1UdEwEB/wQC +MAAwCwYDVR0PBAQDAgbAMB0GA1UdDgQWBBTxAhhgGThh/5xhVGOXBMkIT4W9gjBX +BgNVHSMEUDBOgBRQFsV2zvNNXZW5umbHNhte2+aThqEgpB4wHDEaMBgGA1UEAwwR +dGVzdHNlcnZlciBSU0EgQ0GCFDdZe6tsn/PceMhIIImy1jUsnFDOMBQGA1UdEQQN +MAuCCWxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAgEAfAc42bQa6J+4+cLrUa4T +gXvuFHN5tl1VetBz8F44DuVv2CUSrfxho1kiQbziLI8PC/pviRsdEqRTNACgRVye +jfGjJXgG/kgYEXhTFWKKzKaUxdMvZimpoYKrvVHVzq9Iy8p96ejFuhvWqto0+SNt +b8wQpo6QYQXfoxVY/IqdLzPAhvra/TPWhNE8oBZMqvpu0ORxRzMiLK02ADK8SIDm +ufw/b7ikGpgGhVh64qbIIBl+efodl3f+GVrf4plto8Cbf3b2QKBWt943CCaLgDf9 +4Rn+hSnJ9uC94NgKPJVBe91GVTJZhyQ5eUpJ32FsbD9sJjJ8DhQiMMKLhEeGHkmt +x7T/SGnzzjRoUiCLfdkPVFBF+s3zFmE2+tlq7CYvrUvN0li5Yc3iYP38+qXu9F4a +Gnc3E9vRgcOSPQh0ZbbckPYMCLINcV+J7kXzF6FDlf4078h4i6J41Yb9X1pfgrJy +a7D2R3NtNH/wuEDzmoXV5zV4c09YmZ5Kp+pjIbpzwIoEhg1pAD1pyLP174YFRxmd +fUoNMCX33ZxNWWqWpAg94qthCPyRIiHP3La6xYybPoaPDYi8JNlLtOkhtLSAJLyy +q5gWtNIUuaS1heZPkbph2EfAfJVVAj6VQTSnc95bnHk1eZpyZDwEA9oc/P2sZuPm +07a4ymuTCCsbOm19HlmMRtY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFGTCCAwGgAwIBAgIUN1l7q2yf89x4yEggibLWNSycUM4wDQYJKoZIhvcNAQEL +BQAwHDEaMBgGA1UEAwwRdGVzdHNlcnZlciBSU0EgQ0EwHhcNMjAxMTEzMTkxODMz +WhcNMzAxMTExMTkxODMzWjAcMRowGAYDVQQDDBF0ZXN0c2VydmVyIFJTQSBDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANQgZRvntSb/SH/hlYXpnJlJ +fxEQRjP/ehJibAVvufuZjwrP4g54vw/2xeUTYr1ZIwc+7bppXOb9N1qtOmpkYjzJ +IiEpZ0Laviem6FJZzSL+seB+M8TpdE6Twx4IZa3f4f4stWNrhw3mGUNcpb2K+zSs +jH/3Z95150kNFaCNCndWiv9AzChdjexAD1SsFFoR/pSM0OHvmlmeCeU+xeutC9Mo +y0Nw9ZzyVVZFo9Qj+Wci0mss9UJfCXq+2n/Vay+MFLIkpNHOZXLrXZ3TTb4H/G8M +NQEI7kTHEK5jhELSzl03db2vkYHIwFUwJiISw53q6ecn1GtnydcPH8Xm7Yr12CRv +1G6laCJhJUNX4ad6j1BGNB4i72zG39hDOImz5ZAKtQ16/Mxb4oJjal2Trkj+ICLF +xZiZ5FIu8XWcpw4wP7hPiitTUmvSTse5BORWAg4ZFdeutojU9Yrh6AG4SJ+9Ulyq +10VXiOTe/0S8kGBAZmj/9TU4szE1WxkLYtiSU8kFbjKaO0ji0eTd70rwRPfGLDJn ++bOEMYO/fOLjAUOwDWJnCOJCx18UXova+ypc3mF8SXL4QQPAXRubHfCWLW0o9853 +bmROj11LDkk9Jm0mzpihEFBuWk9MbduvRfAZvBDNRk4tsQkysRMlfomrs9M8rnKJ ++uN+faFxpjq/INISkRpXAgMBAAGjUzBRMB0GA1UdDgQWBBRQFsV2zvNNXZW5umbH +Nhte2+aThjAfBgNVHSMEGDAWgBRQFsV2zvNNXZW5umbHNhte2+aThjAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQB/5ne2glvTv8CCl8fhB0mG8oL8 +QWyMGwhdC2JWWbYde0Rbp6ZUWR+Lh4hiY6wJwSb7WLTgDgknA+JRPcS6bDRRRqcH +i3K2Z44l+ZYh2YkdPnv0a3OMdMrvldzd4IUiJ13Z6pdFOUx+mjjtfw2RytxsJFTq +C7EoIM9tYV7MGFEhfjy32cw0rFBsOwOiSX9tpU5GG93o70rmitzLgNcJVVsWwZma +29EHJR2YIKJZZ3baUOfUjVtB/CJeun19ygbXFZNKJtUOSp8PhQLf/wrIH5CP/4li +LmgsYZxZmazFrAxamgcFTUHhWHh4m8n6mRlDXk9MwdyQl5ZCttVulLd2yEgcREla +A+h+QglYSRCaM8YnbhwC62VS7OF1E4bbNVwd+k4uOe/0CgCkQC5loCED/qB4B2cV +jYC8Dq5zg+GfKA4B4/dnCEakCv8DTIFSyS7Ci+ND/LWiDRaV6H/nkhFhWAjEvIuB +ROuCWE1lvguJiZBUdERaM4WVP+MyPLm+FAgIpU90PdssbSzsndenfhUjCa8PNUYT +7DDChChOEcsH3L6tiB8Gj7hiXEXvbo5fgQnC8cMNta1yYaX9SLkqGxdhy3qKdVo6 +DG5qPwstdnQtKLP4H7VqLiHBl9sClr461fdMCcwi7OnhpCiHFQ43p2x1LWhQCOyC +kXMiuA/5EtdpuIwjPg== +-----END CERTIFICATE----- diff --git a/rest/certs/rsa/user.key b/rest/certs/rsa/user.key new file mode 100644 index 000000000..64745a057 --- /dev/null +++ b/rest/certs/rsa/user.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCsHiiSjx3n8gs8 +CbZESshS+0Li77DxYVJL/iNGH8wqkV0FouKCrp3cR/SkCBRJDYlzvKI3Ejucb29N +g95UPpB5qKdsmAsOr+bQwzA0zVoMWfczkGOq3JMl3Dxq2xCtO+LnJFd9F4K5EpBo +dZuZIx8JV0WoRSwgUjBQriHEpGfFh7PFUAEsmNqLM5CFiZi1j4DWke3oThuASEvW +xdMMEytf5IqNVyVE7mcU6DOOzeI3BTVHkWT/Q7WTLofgFVKlFOSBCdFOwWXEed05 +61Am5xNXtFVWO0FCQkmEGCBXdJ/tgTwoy00bA3kJ0K3tVZRqIE2ObPfwwmXtXrgv +oUNh1xEZAgMBAAECggEAJZRMHXu79tmFFG22awC87klH2E6nHny2QDtTrkrhA+ZL +HiDrPccsVl0rj+jnrO+UtD1aWZXUiBpYfd8t2szyNG3baD1mJKXJSoljbBPt9kZq +T8rVyAdy5VBJrLb51CkAKKFprLlGZGwmNXiAKZZef2abodrAgKoRPHxYiRuwQJM9 +aQU7EhDEcBJtkSvUhNHAfY4XJSeY4c27y3oOkHUtx5SVTdoPWYSnNY0O5gh4wEBI +7+463+vfvfBGcieOtG2HR47U7mXO+TjD+t4a5D6AGDnYYQ0ZrNfpDTJjBGvTrzEx +NjlXjkvzU9StGYzAh64kgC5jxnFbltfOIbS3XGUsdQKBgQDle6jdX6CHMQN77s/P +me1chM7/0OJ/rezAwEDsGfYfv07mC17HStzCLEvyjppLN52ivrXcnF82DTDh5DLu +2m2Zvtu0zJCgsCePkvbJTMUjPazX7FvPZf+87HBvFHl2PtbZRoyevAQYpri85t9z +LWeM5KAROwHbYrGylZ9r9QKYAwKBgQDAAZO+mtMs2qV8j0IVYgLWGFt0be1aNXY5 +ycm6OjNC98u/HxlRg7P9kmzoHhKVyTsAwZYeqqg/cRgQ9qRrIIsQxdPuyxmvG+BI +Cn+PjwUS7BGa9ARuFiBYAmJKP+gbcNrc+zXmkpqDwtz050xaccGIpCqvjjYDCGxr +JA49GMTtswKBgFDBxrg4yc4pkndPoDQHkD9JwWrHAJ47/6AJSA+OR03+ze4ovC7Y +Cn4Ohp+STXwA02mNQYSkRuFHnRcxbjceCQnc34ZtosvZkeJ1roFCkfGH58aIt/px +g6Dhd+OvSdsx2/vUVvIZajuemE5p1OK8bL9pSWcm481nmu8ktliQfVwvAoGATlsW +aS2TynU8HZX0+Pbzg12TTPNjbnHs2m5v5ZDExKqNOmS9l1dxB6VOzncfTIM/kfJu +iftJCALmOF3/ATzZo7Gn6IZ3588vexbVscdC4dL26E521FoxPtlSNUzZMwGeP5pB +B+SCLA1E0Kjnru1HdrTQOq6Fej1gyWSAbwyBqSMCgYAOiyBdFbhL5MDUU6O3dbPw +gpkGTgRKziEKnV7x8o4TNb8FQPlNiMpIOFUyhs5fVRrF5owUUWIg22vlarM3FzWr +nzMFDwqJiluVDVYHtvPQy4VZLiPgw5xgoYfHqwtAEOHTpB67r+5r5JahuqjaH0rp +hgq+QS48vf0XLoIEy2zIAA== +-----END PRIVATE KEY----- diff --git a/rest/certs/rsa/user.req b/rest/certs/rsa/user.req new file mode 100644 index 000000000..b7211f6e0 --- /dev/null +++ b/rest/certs/rsa/user.req @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICXjCCAUYCAQAwGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsHiiSjx3n8gs8CbZESshS+0Li77DxYVJL +/iNGH8wqkV0FouKCrp3cR/SkCBRJDYlzvKI3Ejucb29Ng95UPpB5qKdsmAsOr+bQ +wzA0zVoMWfczkGOq3JMl3Dxq2xCtO+LnJFd9F4K5EpBodZuZIx8JV0WoRSwgUjBQ +riHEpGfFh7PFUAEsmNqLM5CFiZi1j4DWke3oThuASEvWxdMMEytf5IqNVyVE7mcU +6DOOzeI3BTVHkWT/Q7WTLofgFVKlFOSBCdFOwWXEed0561Am5xNXtFVWO0FCQkmE +GCBXdJ/tgTwoy00bA3kJ0K3tVZRqIE2ObPfwwmXtXrgvoUNh1xEZAgMBAAGgADAN +BgkqhkiG9w0BAQsFAAOCAQEAAiwhQOl7wMoRbKgc2F81CfsoOY//rTnzb04aelPX +Opk8kJSssjJ5EgXmkayJo5bLOi9xbC7S+LekRFjiITczwmcxJC7q5TQV9QjVSNAo +8xySwRdEghYd1xsR3jqApUhl4pNHGgxHdoCIoanz+tUv+0n4zP57+kBI6R3edvau +d9P+ozbbaZDYhiHUVI+Qm9l2tQRWi4DTsa2Jh+uoXxpaKkJU4HdH5eoa8LFeNJOE +fVDmFRl0TkIin+oONmn9jJLalaDthyF9Zv3xr0m7PUbwL02ikMRFA+p4HwXFFDom +oFzI5HMOWANfKSyUCfa4FQgJWn+FB0yI5vnc5ozTverNow== +-----END CERTIFICATE REQUEST----- diff --git a/rest/certs/rsa/user.rsa b/rest/certs/rsa/user.rsa new file mode 100644 index 000000000..baa8a1e3d --- /dev/null +++ b/rest/certs/rsa/user.rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEArB4oko8d5/ILPAm2RErIUvtC4u+w8WFSS/4jRh/MKpFdBaLi +gq6d3Ef0pAgUSQ2Jc7yiNxI7nG9vTYPeVD6QeainbJgLDq/m0MMwNM1aDFn3M5Bj +qtyTJdw8atsQrTvi5yRXfReCuRKQaHWbmSMfCVdFqEUsIFIwUK4hxKRnxYezxVAB +LJjaizOQhYmYtY+A1pHt6E4bgEhL1sXTDBMrX+SKjVclRO5nFOgzjs3iNwU1R5Fk +/0O1ky6H4BVSpRTkgQnRTsFlxHndOetQJucTV7RVVjtBQkJJhBggV3Sf7YE8KMtN +GwN5CdCt7VWUaiBNjmz38MJl7V64L6FDYdcRGQIDAQABAoIBACWUTB17u/bZhRRt +tmsAvO5JR9hOpx58tkA7U65K4QPmSx4g6z3HLFZdK4/o56zvlLQ9WlmV1IgaWH3f +LdrM8jRt22g9ZiSlyUqJY2wT7fZGak/K1cgHcuVQSay2+dQpACihaay5RmRsJjV4 +gCmWXn9mm6HawICqETx8WIkbsECTPWkFOxIQxHASbZEr1ITRwH2OFyUnmOHNu8t6 +DpB1LceUlU3aD1mEpzWNDuYIeMBASO/uOt/r373wRnInjrRth0eO1O5lzvk4w/re +GuQ+gBg52GENGazX6Q0yYwRr068xMTY5V45L81PUrRmMwIeuJIAuY8ZxW5bXziG0 +t1xlLHUCgYEA5Xuo3V+ghzEDe+7Pz5ntXITO/9Dif63swMBA7Bn2H79O5gtex0rc +wixL8o6aSzedor613JxfNg0w4eQy7tptmb7btMyQoLAnj5L2yUzFIz2s1+xbz2X/ +vOxwbxR5dj7W2UaMnrwEGKa4vObfcy1njOSgETsB22KxspWfa/UCmAMCgYEAwAGT +vprTLNqlfI9CFWIC1hhbdG3tWjV2OcnJujozQvfLvx8ZUYOz/ZJs6B4Slck7AMGW +HqqoP3EYEPakayCLEMXT7ssZrxvgSAp/j48FEuwRmvQEbhYgWAJiSj/oG3Da3Ps1 +5pKag8Lc9OdMWnHBiKQqr442AwhsayQOPRjE7bMCgYBQwca4OMnOKZJ3T6A0B5A/ +ScFqxwCeO/+gCUgPjkdN/s3uKLwu2Ap+Doafkk18ANNpjUGEpEbhR50XMW43HgkJ +3N+GbaLL2ZHida6BQpHxh+fGiLf6cYOg4Xfjr0nbMdv71FbyGWo7nphOadTivGy/ +aUlnJuPNZ5rvJLZYkH1cLwKBgE5bFmktk8p1PB2V9Pj284Ndk0zzY25x7Npub+WQ +xMSqjTpkvZdXcQelTs53H0yDP5Hybon7SQgC5jhd/wE82aOxp+iGd+fPL3sW1bHH +QuHS9uhOdtRaMT7ZUjVM2TMBnj+aQQfkgiwNRNCo567tR3a00DquhXo9YMlkgG8M +gakjAoGADosgXRW4S+TA1FOjt3Wz8IKZBk4ESs4hCp1e8fKOEzW/BUD5TYjKSDhV +MobOX1UaxeaMFFFiINtr5WqzNxc1q58zBQ8KiYpblQ1WB7bz0MuFWS4j4MOcYKGH +x6sLQBDh06Qeu6/ua+SWobqo2h9K6YYKvkEuPL39Fy6CBMtsyAA= +-----END RSA PRIVATE KEY----- diff --git a/rest/service/src/main.rs b/rest/service/src/main.rs new file mode 100644 index 000000000..c27116021 --- /dev/null +++ b/rest/service/src/main.rs @@ -0,0 +1,90 @@ +mod message_bus; +use message_bus::v0::{MessageBus, *}; + +use actix_web::{ + get, + middleware, + web, + App, + HttpResponse, + HttpServer, + Responder, +}; +use rustls::{ + internal::pemfile::{certs, rsa_private_keys}, + NoClientAuth, + ServerConfig, +}; +use std::io::BufReader; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Rest Server address to bind to + /// Default: 0.0.0.0:8080 + #[structopt(long, short, default_value = "0.0.0.0:8080")] + rest: String, + /// The Nats Server URL or address to connect to + /// Default: nats://0.0.0.0:4222 + #[structopt(long, short, default_value = "nats://0.0.0.0:4222")] + nats: String, +} + +#[get("/v0/nodes")] +async fn get_nodes() -> impl Responder { + match MessageBus::get_nodes().await { + Ok(nodes) => HttpResponse::Ok().json(nodes), + Err(error) => { + let error = serde_json::json!({"error": error.to_string()}); + HttpResponse::InternalServerError().json(error) + } + } +} + +#[get("/v0/nodes/{id}")] +async fn get_node(web::Path(node_id): web::Path) -> impl Responder { + match MessageBus::get_node(node_id).await { + Ok(Some(node)) => HttpResponse::Ok().json(node), + Ok(None) => HttpResponse::NoContent().json(()), + Err(error) => { + let error = serde_json::json!({"error": error.to_string()}); + HttpResponse::InternalServerError().json(error) + } + } +} + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + init_tracing(); + mbus_api::message_bus_init(CliArgs::from_args().nats).await; + + // dummy certificates + let mut config = ServerConfig::new(NoClientAuth::new()); + let cert_file = &mut BufReader::new( + &std::include_bytes!("../../certs/rsa/user.chain")[..], + ); + let key_file = &mut BufReader::new( + &std::include_bytes!("../../certs/rsa/user.rsa")[..], + ); + let cert_chain = certs(cert_file).unwrap(); + let mut keys = rsa_private_keys(key_file).unwrap(); + config.set_single_cert(cert_chain, keys.remove(0)).unwrap(); + + HttpServer::new(move || { + App::new() + .wrap(middleware::Logger::default()) + .service(get_nodes) + .service(get_node) + }) + .bind_rustls(CliArgs::from_args().rest, config)? + .run() + .await +} diff --git a/rest/service/src/message_bus/mod.rs b/rest/service/src/message_bus/mod.rs new file mode 100644 index 000000000..2d24cd45f --- /dev/null +++ b/rest/service/src/message_bus/mod.rs @@ -0,0 +1 @@ +pub mod v0; diff --git a/rest/service/src/message_bus/v0.rs b/rest/service/src/message_bus/v0.rs new file mode 100644 index 000000000..517eba082 --- /dev/null +++ b/rest/service/src/message_bus/v0.rs @@ -0,0 +1,135 @@ +use async_trait::async_trait; +use mbus_api::{v0::*, *}; + +/// Mayastor Node +pub type Node = mbus_api::v0::Node; + +/// Interface used by the rest service to interact with the mayastor +/// services via the message bus +#[async_trait(?Send)] +pub trait MessageBusTrait: Sized { + #[tracing::instrument(level = "info")] + async fn get_nodes() -> std::io::Result> { + GetNodes {}.request().await.map(|v| v.0) + } + #[tracing::instrument(level = "info")] + async fn get_node(id: String) -> std::io::Result> { + let nodes = Self::get_nodes().await?; + Ok(nodes.into_iter().find(|n| n.id == id)) + } +} + +/// Implementation of the bus interface trait +pub struct MessageBus {} +impl MessageBusTrait for MessageBus {} + +#[cfg(test)] +mod tests { + use super::*; + use composer::*; + use rpc::mayastor::Null; + + async fn bus_init() -> Result<(), Box> { + tokio::time::timeout(std::time::Duration::from_secs(2), async { + mbus_api::message_bus_init("10.1.0.2".into()).await + }) + .await?; + Ok(()) + } + async fn wait_for_node() -> Result<(), Box> { + let _ = GetNodes {}.request().await?; + Ok(()) + } + fn init_tracing() { + if let Ok(filter) = + tracing_subscriber::EnvFilter::try_from_default_env() + { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } + } + // to avoid waiting for timeouts + async fn orderly_start( + test: &ComposeTest, + ) -> Result<(), Box> { + test.start_containers(vec!["nats", "node"]).await?; + + bus_init().await?; + wait_for_node().await?; + + test.start("mayastor").await?; + + let mut hdl = test.grpc_handle("mayastor").await?; + hdl.mayastor.list_nexus(Null {}).await?; + Ok(()) + } + + #[tokio::test] + async fn bus() -> Result<(), Box> { + init_tracing(); + let nats_arg = vec!["-n", "nats.rest_backend"]; + let mayastor = "node-test-name"; + let test = Builder::new() + .name("rest_backend") + .network("10.1.0.0/16") + .add_container_bin( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .add_container_bin( + "node", + Binary::from_dbg("node").with_args(nats_arg.clone()), + ) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_args(nats_arg.clone()) + .with_args(vec!["-N", mayastor]), + ) + .with_clean(true) + .build_only() + .await?; + + orderly_start(&test).await?; + + test_bus_backend(mayastor, &test).await?; + + // run with --nocapture to see all the logs + test.logs_all().await?; + Ok(()) + } + + async fn test_bus_backend( + mayastor: &str, + test: &ComposeTest, + ) -> Result<(), Box> { + let nodes = MessageBus::get_nodes().await?; + tracing::info!("Nodes: {:?}", nodes); + assert_eq!(nodes.len(), 1); + assert_eq!( + nodes.first().unwrap(), + &Node { + id: mayastor.to_string(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Online, + } + ); + let node = MessageBus::get_node(mayastor.to_string()).await?; + assert_eq!( + node, + Some(Node { + id: mayastor.to_string(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Online, + }) + ); + + test.stop("mayastor").await?; + + tokio::time::delay_for(std::time::Duration::from_millis(250)).await; + assert!(MessageBus::get_nodes().await?.is_empty()); + + Ok(()) + } +} diff --git a/rest/src/lib.rs b/rest/src/lib.rs new file mode 100644 index 000000000..3629552ef --- /dev/null +++ b/rest/src/lib.rs @@ -0,0 +1,67 @@ +#![warn(missing_docs)] +//! Client library which exposes information from the different mayastor +//! control plane services through REST +//! Different versions are exposed through `versions` +//! +//! # Example: +//! +//! async fn main() { +//! use rest_client::versions::v0::RestClient; +//! let client = RestClient::new("https://localhost:8080"); +//! let _nodes = client.get_nodes().await.unwrap(); +//! } + +/// expose different versions of the client +pub mod versions; + +use actix_web::client::Client; +use serde::Deserialize; +use std::{io::BufReader, string::ToString}; + +/// Actix Rest Client +#[derive(Clone)] +pub struct ActixRestClient { + client: actix_web::client::Client, + url: String, +} + +impl ActixRestClient { + /// creates a new client which uses the specified `url` + pub fn new(url: &str) -> anyhow::Result { + let cert_file = &mut BufReader::new( + &std::include_bytes!("../certs/rsa/ca.cert")[..], + ); + + let mut config = rustls::ClientConfig::new(); + config + .root_store + .add_pem_file(cert_file) + .map_err(|_| anyhow::anyhow!("Add pem file to the root store!"))?; + let connector = actix_web::client::Connector::new() + .rustls(std::sync::Arc::new(config)); + let rest_client = + Client::builder().connector(connector.finish()).finish(); + + Ok(Self { + client: rest_client, + url: url.to_string(), + }) + } + async fn get(&self, urn: String, _: fn(R) -> Y) -> anyhow::Result + where + for<'de> R: Deserialize<'de>, + { + let uri = format!("{}{}", self.url, urn); + + let mut rest_response = + self.client.get(uri).send().await.map_err(|error| { + anyhow::anyhow!( + "Failed to get nodes from rest, err={:?}", + error + ) + })?; + + let rest_body = rest_response.body().await?; + Ok(serde_json::from_slice::(&rest_body)?) + } +} diff --git a/rest/src/versions/mod.rs b/rest/src/versions/mod.rs new file mode 100644 index 000000000..cc8d594f4 --- /dev/null +++ b/rest/src/versions/mod.rs @@ -0,0 +1,4 @@ +//! All the different interface versions of the rest library + +/// version 0 +pub mod v0; diff --git a/rest/src/versions/v0.rs b/rest/src/versions/v0.rs new file mode 100644 index 000000000..a28d6c3f9 --- /dev/null +++ b/rest/src/versions/v0.rs @@ -0,0 +1,46 @@ +use super::super::ActixRestClient; +use async_trait::async_trait; +use std::string::ToString; +use strum_macros::{self, Display}; + +/// Node from the node service +pub type Node = mbus_api::v0::Node; +/// Vector of Nodes from the node service +pub type Nodes = mbus_api::v0::Nodes; + +/// RestClient interface +#[async_trait(?Send)] +pub trait RestClient { + /// Get all the known nodes + async fn get_nodes(&self) -> anyhow::Result>; +} + +#[derive(Display, Debug)] +enum RestURNs { + #[strum(serialize = "nodes")] + GetNodes(Nodes), +} + +macro_rules! get { + ($S:ident, $T:ident) => { + $S.get( + format!("/v0/{}", RestURNs::$T(Default::default()).to_string()), + RestURNs::$T, + ) + }; +} + +#[async_trait(?Send)] +impl RestClient for ActixRestClient { + async fn get_nodes(&self) -> anyhow::Result> { + let nodes = get!(self, GetNodes).await?; + Ok(nodes.0) + } +} + +impl ActixRestClient { + /// Get RestClient v0 + pub fn v0(&self) -> impl RestClient { + self.clone() + } +} diff --git a/rest/tests/test.rs b/rest/tests/test.rs new file mode 100644 index 000000000..a5a7b902e --- /dev/null +++ b/rest/tests/test.rs @@ -0,0 +1,22 @@ +pub use composer::*; +pub use tracing::info; + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +pub fn init() { + init_tracing(); +} + +pub async fn bus_init(nats: &str) -> Result<(), Box> { + tokio::time::timeout(std::time::Duration::from_secs(2), async { + mbus_api::message_bus_init(nats.into()).await + }) + .await?; + Ok(()) +} diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs new file mode 100644 index 000000000..cbdd62317 --- /dev/null +++ b/rest/tests/v0_test.rs @@ -0,0 +1,96 @@ +mod test; +use mbus_api::{ + v0::{GetNodes, NodeState}, + Message, +}; +use rest_client::{versions::v0::*, ActixRestClient}; +use rpc::mayastor::Null; +use test::{Binary, Builder, ComposeTest, ContainerSpec}; + +async fn wait_for_node() -> Result<(), Box> { + let _ = GetNodes {}.request().await?; + Ok(()) +} + +// to avoid waiting for timeouts +async fn orderly_start( + test: &ComposeTest, +) -> Result<(), Box> { + test.start_containers(vec!["nats", "node", "rest"]).await?; + + test::bus_init("localhost").await?; + wait_for_node().await?; + + test.start("mayastor").await?; + + let mut hdl = test.grpc_handle("mayastor").await?; + hdl.mayastor.list_nexus(Null {}).await?; + Ok(()) +} + +#[actix_rt::test] +async fn client() -> Result<(), Box> { + test::init(); + + let nats_arg = vec!["-n", "nats.rest"]; + let mayastor = "node-test-name"; + let test = Builder::new() + .name("rest") + .network("10.1.0.0/16") + .add_container_spec( + ContainerSpec::new( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .with_portmap("4222", "4222"), + ) + .add_container_bin( + "node", + Binary::from_dbg("node").with_args(nats_arg.clone()), + ) + .add_container_spec( + ContainerSpec::new( + "rest", + Binary::from_nix("rest").with_args(nats_arg.clone()), + ) + .with_portmap("8080", "8080"), + ) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_args(nats_arg.clone()) + .with_args(vec!["-N", mayastor]), + ) + .with_clean(true) + .build_only() + .await?; + + orderly_start(&test).await?; + + client_test(mayastor, &test).await?; + + // run with --nocapture to see all the logs + test.logs_all().await?; + Ok(()) +} + +async fn client_test( + mayastor: &str, + test: &ComposeTest, +) -> Result<(), Box> { + let client = ActixRestClient::new("https://localhost:8080").unwrap().v0(); + let nodes = client.get_nodes().await.unwrap(); + assert_eq!(nodes.len(), 1); + assert_eq!( + nodes.first().unwrap(), + &Node { + id: mayastor.to_string(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Online, + } + ); + test.stop("mayastor").await?; + tokio::time::delay_for(std::time::Duration::from_millis(250)).await; + assert!(client.get_nodes().await?.is_empty()); + Ok(()) +} From 6e78b025e5636765474a92dd257f423a18d04fb8 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Wed, 11 Nov 2020 10:35:14 +0000 Subject: [PATCH 58/92] Add initial k8s node operator... ... which reconciles the control plane nodes against the k8s msn It does so by polling at a certain period and comparing the control plane nodes against the k8s nodes. --- Cargo.lock | 482 +++++++++++++++++++++++++++++++++- Cargo.toml | 5 +- nix/pkgs/mayastor/default.nix | 6 +- operators/Cargo.toml | 35 +++ operators/node/src/main.rs | 254 ++++++++++++++++++ 5 files changed, 767 insertions(+), 15 deletions(-) create mode 100644 operators/Cargo.toml create mode 100644 operators/node/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index b38277249..2837d18c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,15 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "actix-codec" version = "0.3.0" @@ -285,6 +295,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +[[package]] +name = "ahash" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" +dependencies = [ + "const-random", +] + [[package]] name = "aho-corasick" version = "0.7.15" @@ -318,6 +337,12 @@ version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7" +[[package]] +name = "array_tool" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f8cb5d814eb646a863c4f24978cff2880c4be96ad8cde2c0f0678732902e271" + [[package]] name = "arrayref" version = "0.3.6" @@ -347,6 +372,19 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-compression" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb1ff21a63d3262af46b9f33a826a8d134e2d0d9b2179c86034948b732ea8b2a" +dependencies = [ + "bytes 0.5.6", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", +] + [[package]] name = "async-executor" version = "1.3.0" @@ -931,6 +969,26 @@ dependencies = [ "cache-padded", ] +[[package]] +name = "const-random" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02dc82c12dc2ee6e1ded861cf7d582b46f66f796d1b6c93fa28b911ead95da02" +dependencies = [ + "const-random-macro", + "proc-macro-hack", +] + +[[package]] +name = "const-random-macro" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc757bbb9544aa296c2ae00c679e81f886b37e28e59097defe0cf524306f6685" +dependencies = [ + "getrandom 0.2.0", + "proc-macro-hack", +] + [[package]] name = "const_fn" version = "0.4.3" @@ -966,7 +1024,17 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +dependencies = [ + "core-foundation-sys 0.8.2", "libc", ] @@ -976,6 +1044,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + [[package]] name = "cpuid-bool" version = "0.1.2" @@ -1217,12 +1291,34 @@ dependencies = [ "syn 1.0.48", ] +[[package]] +name = "dashmap" +version = "3.11.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f260e2fc850179ef410018660006951c1b55b79e8087e87111a2c388994b9b5" +dependencies = [ + "ahash", + "cfg-if 0.1.10", + "num_cpus", +] + [[package]] name = "data-encoding" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" +[[package]] +name = "derivative" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] + [[package]] name = "derive_builder" version = "0.7.2" @@ -1525,6 +1621,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.0.0" @@ -1732,6 +1843,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + [[package]] name = "gimli" version = "0.23.0" @@ -1937,6 +2059,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "hyper-tls" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +dependencies = [ + "bytes 0.5.6", + "hyper", + "native-tls", + "tokio", + "tokio-tls", +] + [[package]] name = "hyper-unix-connector" version = "0.1.5" @@ -2021,9 +2156,15 @@ dependencies = [ "socket2", "widestring", "winapi 0.3.9", - "winreg", + "winreg 0.6.2", ] +[[package]] +name = "ipnet" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" + [[package]] name = "ipnetwork" version = "0.17.0" @@ -2072,6 +2213,19 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" +[[package]] +name = "jsonpath_lib" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8727f6987896c010ec9add275f59de2ae418b672fafa77bc3673b4cee1f09ca" +dependencies = [ + "array_tool", + "env_logger", + "log", + "serde", + "serde_json", +] + [[package]] name = "jsonrpc" version = "0.1.0" @@ -2087,6 +2241,23 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "k8s-openapi" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57f95fd36c08ce592e67400a0f1a66f432196997d5a7e9a97e8743c33d8a9312" +dependencies = [ + "base64 0.12.3", + "bytes 0.5.6", + "chrono", + "http 0.2.1", + "percent-encoding 2.1.0", + "serde", + "serde-value", + "serde_json", + "url", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -2097,6 +2268,68 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "kube" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3787d41d01ff816f93f1a73d20252f8a65887682206cfbf2d0f7d2d2b1b73fa" +dependencies = [ + "Inflector", + "base64 0.12.3", + "bytes 0.5.6", + "chrono", + "dirs", + "either", + "futures", + "futures-util", + "http 0.2.1", + "jsonpath_lib", + "k8s-openapi", + "log", + "openssl", + "pem", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "static_assertions", + "thiserror", + "time 0.2.22", + "tokio", + "url", +] + +[[package]] +name = "kube-derive" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd71bf282e5551ac0852afcf25352b7fb8dd9a66eed7b6e66a6ebbf6b5b2f475" +dependencies = [ + "Inflector", + "proc-macro2 1.0.24", + "quote 1.0.7", + "serde_json", + "syn 1.0.48", +] + +[[package]] +name = "kube-runtime" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9abc7b19889353e501e6bc7b2b9d7062b2e008ec256f11e9428ed8e56d046d2f" +dependencies = [ + "dashmap", + "derivative", + "futures", + "k8s-openapi", + "kube", + "pin-project 0.4.27", + "serde", + "smallvec", + "snafu", + "tokio", +] + [[package]] name = "language-tags" version = "0.2.2" @@ -2333,6 +2566,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "miniz_oxide" version = "0.4.3" @@ -2413,6 +2656,24 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" +[[package]] +name = "native-tls" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a1cda389c26d6b88f3d2dc38aa1b750fe87d298cc5d795ec9e975f402f00372" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.0.0", + "security-framework-sys 2.0.0", + "tempfile", +] + [[package]] name = "nats" version = "0.8.2" @@ -2593,12 +2854,73 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +dependencies = [ + "bitflags", + "cfg-if 0.1.10", + "foreign-types", + "lazy_static", + "libc", + "openssl-sys", +] + [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +[[package]] +name = "openssl-sys" +version = "0.9.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +dependencies = [ + "autocfg 1.0.1", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "operators" +version = "0.1.0" +dependencies = [ + "actix-web", + "anyhow", + "either", + "humantime 2.0.1", + "k8s-openapi", + "kube", + "kube-derive", + "kube-runtime", + "mbus_api", + "rest", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "tokio", + "tracing", + "tracing-futures", + "tracing-subscriber", +] + +[[package]] +name = "ordered-float" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fe9037165d7023b1228bc4ae9a2fa1a2b0095eca6c2998c624723dfd01314a5" +dependencies = [ + "num-traits 0.2.14", +] + [[package]] name = "parking" version = "2.0.0" @@ -2645,6 +2967,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59698ea79df9bf77104aefd39cc3ec990cb9693fb59c3b0a70ddf2646fdffb4b" +dependencies = [ + "base64 0.12.3", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "1.0.1" @@ -2914,7 +3247,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -2963,7 +3296,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", ] [[package]] @@ -3067,7 +3400,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -3109,6 +3442,43 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "reqwest" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" +dependencies = [ + "async-compression", + "base64 0.12.3", + "bytes 0.5.6", + "encoding_rs", + "futures-core", + "futures-util", + "http 0.2.1", + "http-body 0.3.1", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "lazy_static", + "log", + "mime", + "mime_guess", + "native-tls", + "percent-encoding 2.1.0", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-tls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.7.0", +] + [[package]] name = "resolv-conf" version = "0.6.3" @@ -3237,7 +3607,7 @@ dependencies = [ "openssl-probe", "rustls", "schannel", - "security-framework", + "security-framework 1.0.0", ] [[package]] @@ -3291,10 +3661,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" dependencies = [ "bitflags", - "core-foundation", - "core-foundation-sys", + "core-foundation 0.7.0", + "core-foundation-sys 0.7.0", "libc", - "security-framework-sys", + "security-framework-sys 1.0.0", +] + +[[package]] +name = "security-framework" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1759c2e3c8580017a484a7ac56d3abc5a6c1feadf88db2f3633f12ae4268c69" +dependencies = [ + "bitflags", + "core-foundation 0.9.1", + "core-foundation-sys 0.8.2", + "libc", + "security-framework-sys 2.0.0", ] [[package]] @@ -3303,7 +3686,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", + "libc", +] + +[[package]] +name = "security-framework-sys" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f99b9d5e26d2a71633cc4f2ebae7cc9f874044e0c351a27e17892d76dce5678b" +dependencies = [ + "core-foundation-sys 0.8.2", "libc", ] @@ -3331,6 +3724,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.117" @@ -3348,6 +3751,7 @@ version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" dependencies = [ + "indexmap", "itoa", "ryu", "serde", @@ -3497,7 +3901,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eaebd4be561a7d8148803baa108092f85090189c4b8c3ffb81602b15b5c1771" dependencies = [ - "getrandom", + "getrandom 0.1.15", "signature", "subtle-encoding", "zeroize", @@ -3546,6 +3950,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c4e6046e4691afe918fd1b603fd6e515bcda5388a1092a9edbada307d159f09" dependencies = [ "doc-comment", + "futures-core", + "pin-project 0.4.27", "snafu-derive", ] @@ -3601,6 +4007,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "stdweb" version = "0.4.20" @@ -3981,6 +4393,16 @@ dependencies = [ "webpki", ] +[[package]] +name = "tokio-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-util" version = "0.2.0" @@ -4377,6 +4799,15 @@ dependencies = [ "libudev-sys", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.4" @@ -4462,6 +4893,12 @@ dependencies = [ "rand 0.6.5", ] +[[package]] +name = "vcpkg" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" + [[package]] name = "vec-arena" version = "1.0.0" @@ -4521,6 +4958,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" dependencies = [ "cfg-if 0.1.10", + "serde", + "serde_json", "wasm-bindgen-macro", ] @@ -4539,6 +4978,18 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" +dependencies = [ + "cfg-if 0.1.10", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.68" @@ -4674,6 +5125,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "winreg" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index ff0f9003f..df42cb618 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "sysfs", "services", "mbus-api", - "composer", - "rest", + "composer", + "rest", + "operators", ] diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index fca7c8097..02c665108 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -38,8 +38,8 @@ let version = builtins.readFile "${version_drv}"; buildProps = rec { name = "mayastor"; - # cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0rrkj111h7h5blj6qx28166hygag3y92zn5isqig03fnib2zx3mi"; + #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; + cargoSha256 = "0dmg0y1wp3gkfiql80b8li20x6l407cih16i9sdbbly34bc84w09"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" @@ -56,6 +56,8 @@ let "mbus-api" "services" "rest" + "operators" + "composer" ]; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; diff --git a/operators/Cargo.toml b/operators/Cargo.toml new file mode 100644 index 000000000..d8f05c7a5 --- /dev/null +++ b/operators/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "operators" +version = "0.1.0" +authors = ["Tiago Castro "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "node-op" +path = "node/src/main.rs" + +[dependencies] +rustls = "0.18" +actix-web = { version = "3.2.0", features = ["rustls"] } +serde_json = "1.0" +structopt = "0.3.15" +tokio = { version = "0.2", features = ["full"] } +anyhow = "1.0.32" +mbus_api = { path = "../mbus-api" } +strum = "0.19" +humantime = "2.0.1" +kube = "0.43.0" +kube-runtime = "0.43.0" +kube-derive = "0.43.0" +k8s-openapi = { version = "0.9.0", default-features = false, features = ["v1_18"] } +either = "1.6.0" +tracing = "0.1" +tracing-subscriber = "0.2" +tracing-futures = "0.2.4" +rest = { path = "../rest" } + +[dependencies.serde] +features = ["derive"] +version = "1.0" \ No newline at end of file diff --git a/operators/node/src/main.rs b/operators/node/src/main.rs new file mode 100644 index 000000000..8e1925c42 --- /dev/null +++ b/operators/node/src/main.rs @@ -0,0 +1,254 @@ +use kube::api::{Api, DeleteParams, ListParams, Meta, PostParams}; +use kube_derive::CustomResource; +use rest_client::versions::v0::*; +use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; +use structopt::StructOpt; +use tracing::{debug, error, info, instrument}; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Rest Server hostname to connect to + /// Default: localhost:8080 + #[structopt(long, short, default_value = "localhost:8080")] + rest: String, + + /// Polling period + #[structopt(long, short, default_value = "30s")] + period: humantime::Duration, +} + +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)] +#[kube( + group = "openebs.io", + version = "v1alpha1", + kind = "MayastorNode", + namespaced +)] +#[kube(apiextensions = "v1beta1")] +#[kube(status = "String")] +#[serde(rename_all = "camelCase")] +pub struct MayastorNodeSpec { + pub grpc_endpoint: String, +} + +impl TryFrom<&MayastorNode> for Node { + type Error = strum::ParseError; + fn try_from(kube_node: &MayastorNode) -> Result { + Ok(Node { + id: kube_node.name(), + grpc_endpoint: kube_node.spec.grpc_endpoint.clone(), + state: kube_node + .status + .as_ref() + .unwrap_or(&"".to_string()) + .parse()?, + }) + } +} + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[actix_web::main] +async fn main() -> anyhow::Result<()> { + init_tracing(); + + let polling_period = CliArgs::from_args().period.into(); + + let rest_url = format!("https://{}", CliArgs::from_args().rest); + let rest_cli = rest_client::ActixRestClient::new(&rest_url)?; + + let kube_client = kube::Client::try_default().await?; + let namespace = "mayastor"; + + // Validate that our "CRD" is up to date? + + // Manage the MayastorNode CR + let nodes_api: Api = + Api::namespaced(kube_client.clone(), namespace); + + loop { + // Poll for kubernetes nodes and rest nodes + // Reconcile from rest into kubernetes + if let Err(error) = polling_work(&nodes_api, rest_cli.v0()).await { + error!("Error while polling: {}", error); + } + + // Sleep till the next poll + tokio::time::delay_for(polling_period).await; + } +} + +/// This isn't quite a reconciler as no action is taken from k8s MayastorNodes +/// (msn), in fact, they should not be updated by the user. +/// We simply forward/translate control plane nodes into k8s nodes. +#[instrument(skip(nodes_api, rest_cli))] +async fn polling_work( + nodes_api: &Api, + rest_cli: impl RestClient, +) -> anyhow::Result<()> { + // Fetch all nodes as seen by the control plane via REST + let rest_nodes = rest_cli.get_nodes().await?; + println!("Retrieved rest nodes: {:?}", rest_nodes); + + // Fetch all node CRD's from k8s + let kube_nodes = nodes_get_all(&nodes_api).await?; + debug!("Retrieved kube nodes: {:?}", kube_nodes); + + // control plane nodes which do not exist in k8s + let new_nodes = rest_nodes + .iter() + .filter(|node| { + !kube_nodes + .iter() + .any(|kube_node| kube_node.name() == node.id) + }) + .collect::>(); + + // k8s nodes which no longer exist in the control plane + let delete_nodes = kube_nodes + .iter() + .filter(|kube_node| { + !rest_nodes.iter().any(|node| kube_node.name() == node.id) + }) + .collect::>(); + + // k8s nodes are out of date so need an update + let update_nodes = rest_nodes + .iter() + .filter(|&node| { + kube_nodes.iter().any(|kube_node| { + let node_from_kube = Node::try_from(kube_node); + if let Ok(kube_node) = node_from_kube.as_ref() { + kube_node != node + } else { + error!( + "Node {:#?} is not formatted properly.", + node_from_kube + ); + true + } + }) + }) + .collect::>(); + + if !new_nodes.is_empty() { + info!("Creating nodes: {:?}", new_nodes); + + for node in new_nodes { + if let Err(error) = node_create(&nodes_api, &node).await { + error!( + "Failed to create kube_node: {}, error={}", + node.id, error + ); + } + } + } + + if !update_nodes.is_empty() { + info!("Updating nodes: {:?}", update_nodes); + + for node in update_nodes { + if let Err(error) = node_update(&nodes_api, &node).await { + error!( + "Failed to update kube_node: {}, error={:?}", + node.id, error + ); + } + } + } + + if !delete_nodes.is_empty() { + info!("Deleting nodes: {:?}", delete_nodes); + + for node in delete_nodes { + if let Err(error) = node_delete(&nodes_api, &Meta::name(node)).await + { + error!( + "Failed to delete kube_node: {}, error={:?}", + Meta::name(node), + error + ); + } + } + } + + Ok(()) +} + +#[instrument(skip(nodes_api))] +async fn nodes_get_all( + nodes_api: &Api, +) -> anyhow::Result> { + let list_params = ListParams::default(); + let kube_nodes = nodes_api.list(&list_params).await?.items; + Ok(kube_nodes) +} + +#[instrument(skip(nodes_api))] +async fn node_create( + nodes_api: &Api, + node: &Node, +) -> anyhow::Result<()> { + let kube_node = MayastorNode::new( + &node.id, + MayastorNodeSpec { + grpc_endpoint: node.grpc_endpoint.clone(), + }, + ); + + let post_params = PostParams::default(); + let mut kube_node = nodes_api.create(&post_params, &kube_node).await?; + + let status = Some(node.state.to_string()); + kube_node.status = status.clone(); + let kube_node = nodes_api + .replace_status( + &Meta::name(&kube_node), + &post_params, + serde_json::to_vec(&kube_node)?, + ) + .await?; + assert_eq!(kube_node.status, status); + + Ok(()) +} + +#[instrument(skip(nodes_api))] +async fn node_update( + nodes_api: &Api, + node: &Node, +) -> anyhow::Result<()> { + let post_params = PostParams::default(); + let status = Some(node.state.to_string()); + + let mut kube_node = nodes_api.get(&node.id).await?; + kube_node.status = status.clone(); + + let kube_node = nodes_api + .replace_status( + &Meta::name(&kube_node), + &post_params, + serde_json::to_vec(&kube_node)?, + ) + .await?; + assert_eq!(kube_node.status, status); + + Ok(()) +} + +#[instrument(skip(nodes_api))] +async fn node_delete( + nodes_api: &Api, + name: &str, +) -> anyhow::Result<()> { + let delete_params = DeleteParams::default(); + let _ = nodes_api.delete(name, &delete_params).await?; + Ok(()) +} From 466e2830ccbffcede7a4123a9a343c87bfe700ca Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 18 Nov 2020 22:34:50 +0100 Subject: [PATCH 59/92] env: add -l option to specify cores The -l option serves the same purpose as the -m option however it accepts a list of cores with ranges i.e 1,2,10-15. Also two default values have been updated while here as the number of shared buffers was to low to test this change and some notes to the YAML file has been added. --- deploy/mayastor-daemonset.yaml | 22 +++++++++++----- mayastor/src/core/env.rs | 41 +++++++++++++++++++++--------- mayastor/src/subsys/config/opts.rs | 13 ++++++---- 3 files changed, 53 insertions(+), 23 deletions(-) diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 9fe85ad95..57a3f413c 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -25,10 +25,6 @@ spec: nodeSelector: openebs.io/engine: mayastor kubernetes.io/arch: amd64 - # NOTE: Each container must have mem/cpu limits defined in order to - # belong to Guaranteed QoS class, hence can never get evicted in case of - # pressure unless they exceed those limits. limits and requests must be - # the same. initContainers: - name: message-bus-probe image: busybox:latest @@ -49,10 +45,21 @@ spec: - name: IMPORT_NEXUSES value: "false" args: + # In order to select what cores mayastor should be running on, a mask or a list can be specified. + # For example: -m 0x1 will tell mayastor to only use one core which is equivalent to -l 1 + # Using a mask of 0x3 will use the first 2 cores, which is equivalent to -l 1-2 + # + # The -l argument supports ranges to be able to do the same as passing a mask for example: + # -l 1,2,10-20 means use core 1, 2, 10 to 20 + # + # Note: + # 1. When both -m and -l are specified the -l argument is takes precedence. + # 2. Ensure that the CPU resources are updated accordingly. If you use 2 CPUs, the CPU: field should also read 2. - "-N$(MY_NODE_NAME)" - "-g$(MY_POD_IP)" - "-nnats" - "-y/var/local/mayastor/config.yaml" + - "-m0x3" securityContext: privileged: true volumeMounts: @@ -65,12 +72,15 @@ spec: - name: config mountPath: /var/local/mayastor/config.yaml resources: + # NOTE: Each container must have mem/cpu limits defined in order to + # belong to Guaranteed QoS class, hence can never get evicted in case of + # pressure unless they exceed those limits. limits and requests must be the same. limits: - cpu: "1" + cpu: "2" memory: "500Mi" hugepages-2Mi: "1Gi" requests: - cpu: "1" + cpu: "2" memory: "500Mi" hugepages-2Mi: "1Gi" ports: diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index d4e25aacf..586b0ad27 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -81,10 +81,10 @@ fn parse_mb(src: &str) -> Result { )] pub struct MayastorCliArgs { #[structopt(short = "g", default_value = grpc::default_endpoint_str())] - /// IP address and port (optional) for the gRPC server to listen on + /// IP address and port (optional) for the gRPC server to listen on. pub grpc_endpoint: String, #[structopt(short = "L")] - /// Enable logging for sub components + /// Enable logging for sub components. pub log_components: Vec, #[structopt(short = "m", default_value = "0x1")] /// The reactor mask to be used for starting up the instance @@ -93,10 +93,10 @@ pub struct MayastorCliArgs { /// Name of the node where mayastor is running (ID used by control plane) pub node_name: Option, #[structopt(short = "n")] - /// Hostname/IP and port (optional) of the message bus server + /// Hostname/IP and port (optional) of the message bus server. pub mbus_endpoint: Option, /// The maximum amount of hugepage memory we are allowed to allocate in MiB - /// (default: all) + /// a value of 0 means no limits. #[structopt( short = "s", parse(try_from_str = parse_mb), @@ -104,23 +104,27 @@ pub struct MayastorCliArgs { )] pub mem_size: i32, #[structopt(short = "u")] - /// Disable the use of PCIe devices + /// Disable the use of PCIe devices. pub no_pci: bool, #[structopt(short = "r", default_value = "/var/tmp/mayastor.sock")] - /// Path to create the rpc socket + /// Path to create the rpc socket. pub rpc_address: String, #[structopt(short = "y")] - /// path to mayastor config file + /// Path to mayastor YAML config file. pub mayastor_config: Option, #[structopt(short = "C")] - /// path to child status config file + /// Path to child status config file. pub child_status_config: Option, #[structopt(long = "huge-dir")] - /// path to hugedir + /// Path to hugedir. pub hugedir: Option, #[structopt(long = "env-context")] - /// pass additional arguments to the EAL environment + /// Pass additional arguments to the EAL environment. pub env_context: Option, + #[structopt(short = "-l")] + /// List of cores to run on instead of using the core mask. When specified + /// it supersedes the core mask (-m) argument. + pub core_list: Option, } /// Defaults are redefined here in case of using it during tests @@ -139,6 +143,7 @@ impl Default for MayastorCliArgs { mayastor_config: None, child_status_config: None, hugedir: None, + core_list: None, } } } @@ -216,6 +221,7 @@ pub struct MayastorEnvironment { tpoint_group_mask: String, unlink_hugepage: bool, log_component: Vec, + core_list: Option, } impl Default for MayastorEnvironment { @@ -250,6 +256,7 @@ impl Default for MayastorEnvironment { tpoint_group_mask: String::new(), unlink_hugepage: true, log_component: vec![], + core_list: None, } } } @@ -338,6 +345,7 @@ impl MayastorEnvironment { rpc_addr: args.rpc_address, hugedir: args.hugedir, env_context: args.env_context, + core_list: args.core_list, ..Default::default() } .setup_static() @@ -382,8 +390,6 @@ impl MayastorEnvironment { args.push(CString::new(self.name.clone()).unwrap()); - args.push(CString::new(format!("-c {}", self.reactor_mask)).unwrap()); - if self.mem_channel > 0 { args.push( CString::new(format!("-n {}", self.mem_channel)).unwrap(), @@ -470,6 +476,17 @@ impl MayastorEnvironment { ); } + // when -l is specified it overrules the core mask. The core mask still + // carries our default of 0x1 such that existing testing code + // does not require any changes. + if let Some(list) = &self.core_list { + args.push(CString::new(format!("-l {}", list)).unwrap()); + } else { + args.push( + CString::new(format!("-c {}", self.reactor_mask)).unwrap(), + ) + } + let mut cargs = args .iter() .map(|arg| arg.as_ptr()) diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index ec96757eb..3a1baaf16 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -133,7 +133,7 @@ pub struct TcpTransportOpts { max_io_size: u32, /// IO unit size io_unit_size: u32, - /// max admin queue depth (?) + /// max admin queue depth per admin queue max_aq_depth: u32, /// num of shared buffers num_shared_buf: u32, @@ -147,8 +147,11 @@ pub struct TcpTransportOpts { ch2_success: bool, /// dif dif_insert_or_strip: bool, - /// no idea + /// The socket priority of the connection owned by this transport (TCP + /// only) sock_priority: u32, + /// abort execution timeout + abort_timeout_sec: u32, } impl Default for TcpTransportOpts { @@ -160,14 +163,14 @@ impl Default for TcpTransportOpts { io_unit_size: 131_072, ch2_success: true, max_qpairs_per_ctrl: 128, - num_shared_buf: 511, - // reduce when we have a single target + num_shared_buf: 2048, buf_cache_size: 64, dif_insert_or_strip: false, max_aq_depth: 128, max_srq_depth: 0, // RDMA no_srq: false, // RDMA sock_priority: 0, + abort_timeout_sec: 1, } } } @@ -187,7 +190,7 @@ impl From for spdk_nvmf_transport_opts { num_shared_buffers: o.num_shared_buf, buf_cache_size: o.buf_cache_size, dif_insert_or_strip: o.dif_insert_or_strip, - abort_timeout_sec: 0, + abort_timeout_sec: o.abort_timeout_sec, association_timeout: 120000, transport_specific: std::ptr::null(), } From 95a650a3717562bae13aef475efa11778c483705 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 24 Sep 2020 14:47:06 +0000 Subject: [PATCH 60/92] Replace watcher by cache implementation based on official k8s client Unfortunatelly official client contains a few bugs so I had to fork it and create a modified npm package that fixes all issues and is used by moac. The plan is to upstream the fixes and get rid of the fork eventually. Watcher is more resilient to various failures than before and includes methods for modifying objects too. After each modification it waits for a watcher event from k8s to update the local cache and only then continues with the next operation. If the operation fails or event does not come it reinitializes the cache to avoid continuing with a corrupted state. As part of that rewrite watcher and all operators to typescript to take advantage of type checking. Compilation step generates map files so we can see line numbers in TS source files in error stacks when running the mocha tests. CRDs were defined using v1beta1 api and that is changed to v1 now. Resolves: CAS-460 and CAS-436 and CAS-268 --- csi/moac/.gitignore | 9 +- csi/moac/crds/mayastornode.yaml | 75 +- csi/moac/crds/mayastorpool.yaml | 135 +- csi/moac/crds/mayastorvolume.yaml | 239 +-- csi/moac/finalizer_helper.ts | 173 -- csi/moac/index.js | 95 +- csi/moac/nats.js | 60 +- csi/moac/node-composition.nix | 9 +- csi/moac/node-env.nix | 163 +- csi/moac/node-packages.nix | 2648 ++++++++++++----------- csi/moac/node.js | 35 +- csi/moac/node_operator.js | 265 --- csi/moac/node_operator.ts | 278 +++ csi/moac/package-lock.json | 2802 ++++++++++++++----------- csi/moac/package.json | 26 +- csi/moac/pool_operator.js | 427 ---- csi/moac/pool_operator.ts | 476 +++++ csi/moac/test/index.js | 3 +- csi/moac/test/nats_test.js | 49 +- csi/moac/test/node_operator_test.js | 631 +++--- csi/moac/test/pool_operator_test.js | 1481 ++++++++----- csi/moac/test/volume_operator_test.js | 689 +++--- csi/moac/test/watcher_stub.js | 116 +- csi/moac/test/watcher_test.js | 864 ++++---- csi/moac/tsconfig.json | 9 +- csi/moac/volume_operator.js | 465 ---- csi/moac/volume_operator.ts | 544 +++++ csi/moac/watcher.js | 318 --- csi/moac/watcher.ts | 555 +++++ 29 files changed, 7471 insertions(+), 6168 deletions(-) delete mode 100644 csi/moac/finalizer_helper.ts delete mode 100644 csi/moac/node_operator.js create mode 100644 csi/moac/node_operator.ts delete mode 100644 csi/moac/pool_operator.js create mode 100644 csi/moac/pool_operator.ts delete mode 100644 csi/moac/volume_operator.js create mode 100644 csi/moac/volume_operator.ts delete mode 100644 csi/moac/watcher.js create mode 100644 csi/moac/watcher.ts diff --git a/csi/moac/.gitignore b/csi/moac/.gitignore index 213915fcf..1ca28f6cc 100644 --- a/csi/moac/.gitignore +++ b/csi/moac/.gitignore @@ -1,6 +1,11 @@ /node_modules/ /proto/ /result -/replica.js -/pool.js +/watcher.js /nexus.js +/node_operator.js +/pool.js +/pool_operator.js +/replica.js +/volume_operator.js +/*.js.map diff --git a/csi/moac/crds/mayastornode.yaml b/csi/moac/crds/mayastornode.yaml index 54026ab3e..c3265638e 100644 --- a/csi/moac/crds/mayastornode.yaml +++ b/csi/moac/crds/mayastornode.yaml @@ -1,47 +1,50 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: mayastornodes.openebs.io spec: group: openebs.io - version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + # Both status and spec parts are updated by the controller. + status: {} + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + description: Specification of the mayastor node. + type: object + required: + - grpcEndpoint + properties: + grpcEndpoint: + description: Address of gRPC server that mayastor listens on + type: string + status: + description: State of the node as seen by the control plane + type: string + additionalPrinterColumns: + - name: State + type: string + description: State of the storage pool + jsonPath: .status + - name: Age + type: date + jsonPath: .metadata.creationTimestamp scope: Namespaced names: kind: MayastorNode listKind: MayastorNodeList plural: mayastornodes singular: mayastornode - shortNames: ['msn'] - additionalPrinterColumns: - - name: State - type: string - description: State of the storage pool - JSONPath: .status - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - subresources: - # Both status and spec parts are updated by the controller. - status: {} - validation: - openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - description: Specification of the mayastor node. - type: object - required: - - grpcEndpoint - properties: - grpcEndpoint: - description: Address of gRPC server that mayastor listens on - type: string - status: - description: State of the node as seen by the control plane - type: string + shortNames: ['msn'] \ No newline at end of file diff --git a/csi/moac/crds/mayastorpool.yaml b/csi/moac/crds/mayastorpool.yaml index 46aa2f2da..bc48b08ae 100644 --- a/csi/moac/crds/mayastorpool.yaml +++ b/csi/moac/crds/mayastorpool.yaml @@ -1,10 +1,77 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: mayastorpools.openebs.io spec: group: openebs.io - version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + description: Specification of the mayastor pool. + type: object + required: + - node + - disks + properties: + node: + description: Name of the k8s node where the storage pool is located. + type: string + disks: + description: Disk devices (paths or URIs) that should be used for the pool. + type: array + items: + type: string + status: + description: Status part updated by the pool controller. + type: object + properties: + state: + description: Pool state. + type: string + reason: + description: Reason for the pool state value if applicable. + type: string + disks: + description: Disk device URIs that are actually used for the pool. + type: array + items: + type: string + capacity: + description: Capacity of the pool in bytes. + type: integer + format: int64 + minimum: 0 + used: + description: How many bytes are used in the pool. + type: integer + format: int64 + minimum: 0 + additionalPrinterColumns: + - name: Node + type: string + description: Node where the storage pool is located + jsonPath: .spec.node + - name: State + type: string + description: State of the storage pool + jsonPath: .status.state + - name: Age + type: date + jsonPath: .metadata.creationTimestamp scope: Namespaced names: kind: MayastorPool @@ -12,67 +79,3 @@ spec: plural: mayastorpools singular: mayastorpool shortNames: ["msp"] - additionalPrinterColumns: - - name: Node - type: string - description: Node where the storage pool is located - JSONPath: .spec.node - - name: State - type: string - description: State of the storage pool - JSONPath: .status.state - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} - validation: - openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - description: Specification of the mayastor pool. - type: object - required: - - node - - disks - properties: - node: - description: Name of the k8s node where the storage pool is located. - type: string - disks: - description: Disk devices (paths or URIs) that should be used for the pool. - type: array - items: - type: string - status: - description: Status part updated by the pool controller. - type: object - properties: - state: - description: Pool state. - type: string - reason: - description: Reason for the pool state value if applicable. - type: string - disks: - description: Disk device URIs that are actually used for the pool. - type: array - items: - type: string - capacity: - description: Capacity of the pool in bytes. - type: integer - format: int64 - minimum: 0 - used: - description: How many bytes are used in the pool. - type: integer - format: int64 - minimum: 0 diff --git a/csi/moac/crds/mayastorvolume.yaml b/csi/moac/crds/mayastorvolume.yaml index 074a186a3..a92ab1b8c 100644 --- a/csi/moac/crds/mayastorvolume.yaml +++ b/csi/moac/crds/mayastorvolume.yaml @@ -1,138 +1,141 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: mayastorvolumes.openebs.io spec: group: openebs.io - version: v1alpha1 - scope: Namespaced - names: - kind: MayastorVolume - listKind: MayastorVolumeList - plural: mayastorvolumes - singular: mayastorvolume - shortNames: ['msv'] - additionalPrinterColumns: - - name: Node - type: string - description: Node where the volume is located - JSONPath: .status.node - - name: Size - type: integer - format: int64 - minimum: 0 - description: Size of the volume - JSONPath: .status.size - - name: State - type: string - description: State of the storage pool - JSONPath: .status.state - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - subresources: - # The status part is updated by the controller and spec part by the user - # usually. Well, not in this case. The mayastor's control plane updates both - # parts and user is allowed to update some of the properties in the spec - # too. The status part is read-only for the user as it is usually done. - status: {} - validation: - openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - description: Specification of the mayastor volume. + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + # The status part is updated by the controller and spec part by the user + # usually. Well, not in this case. The mayastor's control plane updates both + # parts and user is allowed to update some of the properties in the spec + # too. The status part is read-only for the user as it is usually done. + status: {} + schema: + openAPIV3Schema: type: object - required: - - replicaCount - - requiredBytes properties: - replicaCount: - description: The number of replicas used for the volume. - type: integer - minimum: 1 - preferredNodes: - description: A list of preferred cluster nodes for the volume. - type: array - items: - type: string - requiredNodes: - description: Only cluster nodes from this list should be used for the volume. - type: array - items: - type: string - requiredBytes: - description: The minimum size of the volume. - type: integer - minimum: 1 - limitBytes: - description: The maximum size of the volume (if zero then same as the requiredBytes). - type: integer - minimum: 0 - protocol: - description: Share protocol of the nexus + apiVersion: type: string - status: - description: Properties related to current state of the volume. - type: object - properties: - size: - description: The size of the volume if it has been created - type: integer - format: int64 - state: - description: Overall state of the volume. - type: string - reason: - description: Further explanation of the state if applicable. + kind: type: string - node: - description: Name of the k8s node with the nexus. - type: string - nexus: - description: Frontend of the volume. + metadata: + type: object + spec: + description: Specification of the mayastor volume. type: object + required: + - replicaCount + - requiredBytes properties: - deviceUri: - description: URI of a block device for IO. + replicaCount: + description: The number of replicas used for the volume. + type: integer + minimum: 1 + preferredNodes: + description: A list of preferred cluster nodes for the volume. + type: array + items: + type: string + requiredNodes: + description: Only cluster nodes from this list should be used for the volume. + type: array + items: + type: string + requiredBytes: + description: The minimum size of the volume. + type: integer + minimum: 1 + limitBytes: + description: The maximum size of the volume (if zero then same as the requiredBytes). + type: integer + minimum: 0 + protocol: + description: Share protocol of the nexus type: string + status: + description: Properties related to current state of the volume. + type: object + properties: + size: + description: The size of the volume if it has been created + type: integer + format: int64 state: - description: State of the nexus. + description: Overall state of the volume. + type: string + reason: + description: Further explanation of the state if applicable. + type: string + node: + description: Name of the k8s node with the nexus. type: string - children: - description: Child devices of the nexus (replicas). + nexus: + description: Frontend of the volume. + type: object + properties: + deviceUri: + description: URI of a block device for IO. + type: string + state: + description: State of the nexus. + type: string + children: + description: Child devices of the nexus (replicas). + type: array + items: + description: child device of the nexus (replica). + type: object + properties: + uri: + description: URI used by nexus to access the child. + type: string + state: + description: State of the child as seen by the nexus. + type: string + replicas: + description: List of replicas type: array items: - description: child device of the nexus (replica). type: object properties: - uri: - description: URI used by nexus to access the child. + node: + description: Name of the k8s node with the replica. type: string - state: - description: State of the child as seen by the nexus. + pool: + description: Name of the pool that replica was created on. type: string - replicas: - description: List of replicas - type: array - items: - type: object - properties: - node: - description: Name of the k8s node with the replica. - type: string - pool: - description: Name of the pool that replica was created on. - type: string - uri: - description: URI of the replica used by the nexus. - type: string - offline: - description: Is replica reachable by control plane. - type: boolean + uri: + description: URI of the replica used by the nexus. + type: string + offline: + description: Is replica reachable by control plane. + type: boolean + additionalPrinterColumns: + - name: Node + type: string + description: Node where the volume is located + jsonPath: .status.node + - name: Size + type: integer + format: int64 + minimum: 0 + description: Size of the volume + jsonPath: .status.size + - name: State + type: string + description: State of the storage pool + jsonPath: .status.state + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + scope: Namespaced + names: + kind: MayastorVolume + listKind: MayastorVolumeList + plural: mayastorvolumes + singular: mayastorvolume + shortNames: ['msv'] diff --git a/csi/moac/finalizer_helper.ts b/csi/moac/finalizer_helper.ts deleted file mode 100644 index 4eb89c7da..000000000 --- a/csi/moac/finalizer_helper.ts +++ /dev/null @@ -1,173 +0,0 @@ -// -'use strict'; - -const k8s = require('@kubernetes/client-node'); -const log = require('./logger').Logger('finalizer_helper'); - -export class FinalizerHelper { - private kubeConfig: any; - private k8sApi: any; - private namespace: String; - private groupname: String; - private version: String; - private plural: String; - - constructor (namespace: String, groupname:String, version:String, plural:String) { - this.namespace = namespace; - this.groupname = groupname; - this.version = version; - this.kubeConfig = new k8s.KubeConfig(); - this.kubeConfig.loadFromDefault(); - this.k8sApi = this.kubeConfig.makeApiClient(k8s.CustomObjectsApi); - this.plural = plural; - } - - addFinalizer(body: any, instancename: String, finalizer: String) { - if (body.metadata.deletionTimestamp != undefined) { - log.warn(`addFinalizer(${instancename},${finalizer}), deletionTimestamp is set`); - return; - } - - if (body.metadata.finalizers != undefined) { - const index = body.metadata.finalizers.indexOf(finalizer); - if ( index > -1) { - log.debug(`@addFinalizer(${instancename},${finalizer}), finalizer already present`); - return; - } - body.metadata.finalizers.push(finalizer); - } else { - body.metadata.finalizers = [finalizer]; - } - - // TODO: use patchNamespacedCustomObject - this.k8sApi.replaceNamespacedCustomObject( - this.groupname, - this.version, - this.namespace, - this.plural, - instancename, - body) - .then((res:any) => { - log.debug(`added finalizer:${finalizer} to ${this.plural}:${instancename}`); - }) - .catch((err:any) => { - log.error(`add finalizer:${finalizer} to ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); - }); - } - - removeFinalizer(body: any, instancename: String, finalizer: String) { - if (body.metadata.finalizers == undefined) { - log.debug(`removeFinalizer(${instancename},${finalizer}), no finalizers defined.`); - return; - } - - const index = body.metadata.finalizers.indexOf(finalizer); - if ( index < 0) { - log.debug(`removeFinalizer(${instancename},${finalizer}), finalizer not found`); - return; - } - body.metadata.finalizers.splice(index, 1); - - // TODO: use patchNamespacedCustomObject - this.k8sApi.replaceNamespacedCustomObject( - this.groupname, - this.version, - this.namespace, - this.plural, - instancename, - body). - then((res:any) => { - log.debug(`removed finalizer:${finalizer} from ${this.plural}:${instancename}`); - }) - .catch((err: any) => { - log.error(`remove finalizer:${finalizer} from ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); - }); - } - - addFinalizerToCR(instancename: String, finalizer: String) { - this.k8sApi.getNamespacedCustomObject( - this.groupname, - this.version, - this.namespace, - this.plural, - instancename) - .then((customresource:any) => { - let body = customresource.body; - - if (body.metadata.deletionTimestamp != undefined) { - log.warn(`addFinalizerToCR(${instancename},${finalizer}), deletionTimestamp is set`); - return; - } - - if (body.metadata.finalizers != undefined) { - const index = body.metadata.finalizers.indexOf(finalizer); - if ( index > -1) { - log.debug(`@addFinalizerToCR(${instancename},${finalizer}), finalizer already present`); - return; - } - body.metadata.finalizers.splice(-1, 0, finalizer); - } else { - body.metadata.finalizers = [finalizer]; - } - - // TODO: use patchNamespacedCustomObject - this.k8sApi.replaceNamespacedCustomObject( - this.groupname, - this.version, - this.namespace, - this.plural, - instancename, - body) - .then((res:any) => { - log.debug(`added finalizer:${finalizer} to ${this.plural}:${instancename}`); - }) - .catch((err:any) => { - log.error(`add finalizer:${finalizer} to ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); - }); - }) - .catch((err: any) => { - log.error(`add finalizer:${finalizer} to ${this.plural}:${instancename}, get failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); - }); - } - - removeFinalizerFromCR(instancename: String, finalizer: String) { - this.k8sApi.getNamespacedCustomObject( - this.groupname, - this.version, - this.namespace, - this.plural, - instancename) - .then((customresource:any) => { - let body = customresource.body; - if (body.metadata.finalizers == undefined) { - log.debug(`removeFinalizerFromCR(${instancename},${finalizer}), no finalizers on pool`); - return; - } - - const index = body.metadata.finalizers.indexOf(finalizer); - if ( index < 0) { - log.debug(`removeFinalizerFromCR(${instancename},${finalizer}), finalizer not found`); - return; - } - body.metadata.finalizers.splice(index, 1); - - // TODO: use patchNamespacedCustomObject - this.k8sApi.replaceNamespacedCustomObject( - this.groupname, - this.version, - this.namespace, - this.plural, - instancename, - body). - then((res:any) => { - log.debug(`removed finalizer:${finalizer} from ${this.plural}:${instancename}`); - }) - .catch((err: any) => { - log.error(`remove finalizer:${finalizer} from ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); - }); - }) - .catch((err: any) => { - log.error(`remove finalizer:${finalizer} from ${this.plural}:${instancename}, get failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); - }); - } -} diff --git a/csi/moac/index.js b/csi/moac/index.js index 9760d516b..58d081fc8 100755 --- a/csi/moac/index.js +++ b/csi/moac/index.js @@ -5,54 +5,46 @@ 'use strict'; -const { Client, KubeConfig } = require('kubernetes-client'); -const Request = require('kubernetes-client/backends/request'); +const { KubeConfig } = require('client-node-fixed-watcher'); const yargs = require('yargs'); const logger = require('./logger'); const Registry = require('./registry'); -const NodeOperator = require('./node_operator'); -const PoolOperator = require('./pool_operator'); +const { NodeOperator } = require('./node_operator'); +const { PoolOperator } = require('./pool_operator'); const Volumes = require('./volumes'); -const VolumeOperator = require('./volume_operator'); +const { VolumeOperator } = require('./volume_operator'); const ApiServer = require('./rest_api'); const CsiServer = require('./csi').CsiServer; const { MessageBus } = require('./nats'); const log = new logger.Logger(); -// Read k8s client configuration, in order to be able to connect to k8s api -// server, either from a file or from environment and return k8s client -// object. +// Load k8s config file. // // @param {string} [kubefile] Kube config file. // @returns {object} k8s client object. -function createK8sClient (kubefile) { - var backend; +function createKubeConfig (kubefile) { + const kubeConfig = new KubeConfig(); try { - if (kubefile != null) { + if (kubefile) { log.info('Reading k8s configuration from file ' + kubefile); - const kubeconfig = new KubeConfig(); - kubeconfig.loadFromFile(kubefile); - backend = new Request({ kubeconfig }); + kubeConfig.loadFromFile(kubefile); + } else { + kubeConfig.loadFromDefault(); } - return new Client({ backend }); } catch (e) { log.error('Cannot get k8s client configuration: ' + e); process.exit(1); } + return kubeConfig; } async function main () { - var client; - var registry; - var volumes; - var poolOper; - var volumeOper; - var csiNodeOper; - var nodeOper; - var csiServer; - var apiServer; - var messageBus; + let poolOper; + let volumeOper; + let csiNodeOper; + let nodeOper; + let kubeConfig; const opts = yargs .options({ @@ -96,6 +88,12 @@ async function main () { alias: 'verbose', describe: 'Print debug log messages', count: true + }, + w: { + alias: 'watcher-idle-timeout', + describe: 'Restart watcher connections after this many seconds if idle', + default: 0, + number: true } }) .help('help') @@ -118,13 +116,13 @@ async function main () { if (csiServer) csiServer.undoReady(); if (apiServer) apiServer.stop(); if (!opts.s) { - if (volumeOper) await volumeOper.stop(); + if (volumeOper) volumeOper.stop(); } if (volumes) volumes.stop(); if (!opts.s) { - if (poolOper) await poolOper.stop(); + if (poolOper) poolOper.stop(); if (csiNodeOper) await csiNodeOper.stop(); - if (nodeOper) await nodeOper.stop(); + if (nodeOper) nodeOper.stop(); } if (messageBus) messageBus.stop(); if (registry) registry.close(); @@ -142,40 +140,53 @@ async function main () { // Create csi server before starting lengthy initialization so that we can // serve csi.identity() calls while getting ready. - csiServer = new CsiServer(opts.csiAddress); + const csiServer = new CsiServer(opts.csiAddress); await csiServer.start(); - registry = new Registry(); + const registry = new Registry(); // Listen to register and deregister messages from mayastor nodes - messageBus = new MessageBus(registry); + const messageBus = new MessageBus(registry); messageBus.start(opts.m); if (!opts.s) { // Create k8s client and load openAPI spec from k8s api server - client = createK8sClient(opts.kubeconfig); - log.debug('Loading openAPI spec from the server'); - await client.loadSpec(); + kubeConfig = createKubeConfig(opts.kubeconfig); // Start k8s operators - nodeOper = new NodeOperator(opts.namespace); - await nodeOper.init(client, registry); + nodeOper = new NodeOperator( + opts.namespace, + kubeConfig, + registry, + opts.watcherIdleTimeout + ); + await nodeOper.init(kubeConfig); await nodeOper.start(); - poolOper = new PoolOperator(opts.namespace); - await poolOper.init(client, registry); + poolOper = new PoolOperator( + opts.namespace, + kubeConfig, + registry, + opts.watcherIdleTimeout + ); + await poolOper.init(kubeConfig); await poolOper.start(); } - volumes = new Volumes(registry); + const volumes = new Volumes(registry); volumes.start(); if (!opts.s) { - volumeOper = new VolumeOperator(opts.namespace); - await volumeOper.init(client, volumes); + volumeOper = new VolumeOperator( + opts.namespace, + kubeConfig, + volumes, + opts.watcherIdleTimeout + ); + await volumeOper.init(kubeConfig); await volumeOper.start(); } - apiServer = new ApiServer(registry); + const apiServer = new ApiServer(registry); await apiServer.start(opts.port); csiServer.makeReady(registry, volumes); diff --git a/csi/moac/nats.js b/csi/moac/nats.js index df667178d..cba658c4a 100644 --- a/csi/moac/nats.js +++ b/csi/moac/nats.js @@ -49,26 +49,28 @@ class MessageBus { return this.connected; } + // The method is async but returns immediately. + // However it's up to caller if she wants to wait for it. _connect () { log.debug(`Connecting to NATS at "${this.endpoint}" ...`); if (this.timeout) clearTimeout(this.timeout); assert(!this.nc); - this.nc = nats.connect({ + nats.connect({ servers: [`nats://${this.endpoint}`] - }); - var self = this; - this.nc.on('connect', () => { - log.info(`Connected to NATS message bus at "${this.endpoint}"`); - self.connected = true; - self._subscribe(); - }); - this.nc.on('error', (err) => { - log.error(`${err}`); - self._disconnect(); - log.debug(`Reconnecting after ${self.reconnectDelay}ms`); - // reconnect but give it some time to recover to prevent spinning in loop - self.timeout = setTimeout(self._connect.bind(self), self.reconnectDelay); - }); + }) + .then((nc) => { + log.info(`Connected to NATS message bus at "${this.endpoint}"`); + this.nc = nc; + this.connected = true; + this._subscribe(); + }) + .catch((err) => { + log.error(`${err}`); + this._disconnect(); + log.debug(`Reconnecting after ${this.reconnectDelay}ms`); + // reconnect but give it some time to recover to prevent spinning in loop + this.timeout = setTimeout(this._connect.bind(this), this.reconnectDelay); + }); } _disconnect () { @@ -81,12 +83,9 @@ class MessageBus { } _parsePayload (msg) { - if (typeof (msg.data) !== 'string') { - log.error(`Invalid payload in ${msg.subject} message: not a string`); - return; - } + const sc = nats.StringCodec(); try { - return JSON.parse(msg.data); + return JSON.parse(sc.decode(msg.data)); } catch (e) { log.error(`Invalid payload in ${msg.subject} message: not a JSON`); } @@ -106,6 +105,7 @@ class MessageBus { log.trace(`"${id}" with "${ep}" requested registration`); this.registry.addNode(id, ep); } + _deregistrationReceived (data) { const id = data.id; if (typeof id !== 'string' || id.length === 0) { @@ -117,25 +117,25 @@ class MessageBus { } _subscribe () { - this.nc.subscribe('v0/registry', (err, msg) => { - if (err) { - log.error(`Error receiving a registry message: ${err}`); - return; - } - const payload = this._parsePayload(msg); + const registrySub = this.nc.subscribe('v0/registry'); + this._registryHandler(registrySub); + } + + async _registryHandler (sub) { + for await (const m of sub) { + const payload = this._parsePayload(m); if (!payload) { return; } - - if (payload.id == "v0/register") { + if (payload.id === 'v0/register') { this._registrationReceived(payload.data); - } else if (payload.id == "v0/deregister") { + } else if (payload.id === 'v0/deregister') { this._deregistrationReceived(payload.data); } else { const id = payload.id; log.error(`Unknown registry message: ${id}`); } - }); + } } } diff --git a/csi/moac/node-composition.nix b/csi/moac/node-composition.nix index 9988d2a1c..6441534a8 100644 --- a/csi/moac/node-composition.nix +++ b/csi/moac/node-composition.nix @@ -1,12 +1,9 @@ # This file has been generated by node2nix 1.8.0. Do not edit! -{ pkgs ? import { +{pkgs ? import { inherit system; - } -, system ? builtins.currentSystem -, nodejs-slim ? pkgs.nodejs-slim-12_x -, nodejs ? pkgs."nodejs-12_x" -}: + }, system ? builtins.currentSystem, nodejs-slim ? pkgs.nodejs-slim-12_x, nodejs ? pkgs."nodejs-12_x"}: + let nodeEnv = import ./node-env.nix { inherit (pkgs) stdenv python2 utillinux runCommand writeTextFile; diff --git a/csi/moac/node-env.nix b/csi/moac/node-env.nix index cd2c6b06f..4d35a5efa 100644 --- a/csi/moac/node-env.nix +++ b/csi/moac/node-env.nix @@ -1,12 +1,12 @@ # This file originates from node2nix -{ stdenv, nodejs-slim, nodejs, python2, utillinux, libtool, runCommand, writeTextFile }: +{stdenv, nodejs-slim, nodejs, python2, utillinux, libtool, runCommand, writeTextFile}: + let python = if nodejs ? python then nodejs.python else python2; # Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise - tarWrapper = runCommand "tarWrapper" - { } '' + tarWrapper = runCommand "tarWrapper" {} '' mkdir -p $out/bin cat > $out/bin/tar < 0; + const wasOffline = this.syncFailed > 0; if (wasOffline) { this.syncFailed = 0; } @@ -210,21 +209,20 @@ class Node extends EventEmitter { // @param {object[]} replicas New replicas with properties. // _mergePoolsAndReplicas (pools, replicas) { - var self = this; // detect modified and new pools pools.forEach((props) => { const poolReplicas = replicas.filter((r) => r.pool === props.name); - const pool = self.pools.find((p) => p.name === props.name); + const pool = this.pools.find((p) => p.name === props.name); if (pool) { // the pool already exists - update it pool.merge(props, poolReplicas); } else { // it is a new pool - self._registerPool(new Pool(props), poolReplicas); + this._registerPool(new Pool(props), poolReplicas); } }); // remove pools that no longer exist - self.pools + this.pools .filter((p) => !pools.find((ent) => ent.name === p.name)) .forEach((p) => p.unbind()); } @@ -242,20 +240,19 @@ class Node extends EventEmitter { // @param {object[]} nexusList List of nexus obtained from storage node. // _mergeNexus (nexusList) { - var self = this; // detect modified and new pools nexusList.forEach((props) => { - const nexus = self.nexus.find((n) => n.uuid === props.uuid); + const nexus = this.nexus.find((n) => n.uuid === props.uuid); if (nexus) { // the nexus already exists - update it nexus.merge(props); } else { // it is a new nexus - self._registerNexus(new Nexus(props, [])); + this._registerNexus(new Nexus(props, [])); } }); // remove nexus that no longer exist - const removedNexus = self.nexus.filter( + const removedNexus = this.nexus.filter( (n) => !nexusList.find((ent) => ent.uuid === n.uuid) ); removedNexus.forEach((n) => n.destroy()); @@ -339,7 +336,7 @@ class Node extends EventEmitter { async createPool (name, disks) { log.debug(`Creating pool "${name}@${this.name}" ...`); - var poolInfo = await this.call('createPool', { name, disks }); + const poolInfo = await this.call('createPool', { name, disks }); log.info(`Created pool "${name}@${this.name}"`); const newPool = new Pool(poolInfo); @@ -357,7 +354,7 @@ class Node extends EventEmitter { const children = replicas.map((r) => r.uri); log.debug(`Creating nexus "${uuid}@${this.name}"`); - var nexusInfo = await this.call('createNexus', { uuid, size, children }); + const nexusInfo = await this.call('createNexus', { uuid, size, children }); log.info(`Created nexus "${uuid}@${this.name}"`); const newNexus = new Nexus(nexusInfo); diff --git a/csi/moac/node_operator.js b/csi/moac/node_operator.js deleted file mode 100644 index dee49e6e3..000000000 --- a/csi/moac/node_operator.js +++ /dev/null @@ -1,265 +0,0 @@ -// Node operator is responsible for managing mayastor node custom resources -// that represent nodes in the cluster that run mayastor (storage nodes). -// -// Roles: -// * The operator creates/modifies/deletes the resources to keep them up to date. -// * A user can delete a stale resource (can happen that moac doesn't know) - -'use strict'; - -const assert = require('assert'); -const fs = require('fs'); -const path = require('path'); -const yaml = require('js-yaml'); -const EventStream = require('./event_stream'); -const log = require('./logger').Logger('node-operator'); -const Watcher = require('./watcher'); -const Workq = require('./workq'); - -const crdNode = yaml.safeLoad( - fs.readFileSync(path.join(__dirname, '/crds/mayastornode.yaml'), 'utf8') -); - -// Node operator watches k8s CSINode resources and based on that detects -// running mayastor instances in the cluster. -class NodeOperator { - // init() is decoupled from constructor because tests do their own - // initialization of the object. - // - // @param {string} namespace Namespace the operator should operate on. - constructor (namespace) { - this.k8sClient = null; // k8s client for sending requests to api srv - this.watcher = null; // k8s resource watcher for CSI nodes resource - this.registry = null; - this.namespace = namespace; - this.workq = new Workq(); // for serializing node operations - } - - // Create node CRD if it doesn't exist and augment client object so that CRD - // can be manipulated as any other standard k8s api object. - // - // @param {object} k8sClient Client for k8s api server. - // @param {object} registry Registry with node objects. - // - async init (k8sClient, registry) { - log.info('Initializing node operator'); - assert(registry); - - try { - await k8sClient.apis[ - 'apiextensions.k8s.io' - ].v1beta1.customresourcedefinitions.post({ body: crdNode }); - log.info('Created CRD ' + crdNode.spec.names.kind); - } catch (err) { - // API returns a 409 Conflict if CRD already exists. - if (err.statusCode !== 409) throw err; - } - k8sClient.addCustomResourceDefinition(crdNode); - - this.k8sClient = k8sClient; - this.registry = registry; - - // Initialize watcher with all callbacks for new/mod/del events - this.watcher = new Watcher( - 'node', - this.k8sClient.apis['openebs.io'].v1alpha1.namespaces( - this.namespace - ).mayastornodes, - this.k8sClient.apis['openebs.io'].v1alpha1.watch.namespaces( - this.namespace - ).mayastornodes, - this._filterMayastorNode - ); - } - - // Normalize k8s mayastor node resource. - // - // @param {object} msn MayaStor node custom resource. - // @returns {object} Properties defining the node. - // - _filterMayastorNode (msn) { - if (!msn.spec.grpcEndpoint) { - log.warn('Ignoring mayastor node resource without grpc endpoint'); - return null; - } - return { - metadata: { name: msn.metadata.name }, - spec: { - grpcEndpoint: msn.spec.grpcEndpoint - }, - status: msn.status || 'unknown' - }; - } - - // Bind watcher's new/del events to node operator's callbacks. - // - // Not interested in mod events as the operator is the only who should - // be doing modifications to these objects. - // - // @param {object} watcher k8s node resource watcher. - // - _bindWatcher (watcher) { - var self = this; - watcher.on('new', (obj) => { - self.registry.addNode(obj.metadata.name, obj.spec.grpcEndpoint); - }); - watcher.on('del', (obj) => { - self.registry.removeNode(obj.metadata.name); - }); - } - - // Start node operator's watcher loop. - async start () { - var self = this; - - // install event handlers to follow changes to resources. - self._bindWatcher(self.watcher); - await self.watcher.start(); - - // This will start async processing of node events. - self.eventStream = new EventStream({ registry: self.registry }); - self.eventStream.on('data', async (ev) => { - if (ev.kind !== 'node') return; - - await self.workq.push(ev, self._onNodeEvent.bind(self)); - }); - } - - async _onNodeEvent (ev) { - var self = this; - const name = ev.object.name; - if (ev.eventType === 'new' || ev.eventType === 'mod') { - const endpoint = ev.object.endpoint; - const k8sNode = await self.watcher.getRawBypass(name); - - if (k8sNode) { - // Update object only if it has really changed - if (k8sNode.spec.grpcEndpoint !== endpoint) { - try { - await self._updateResource(name, k8sNode, endpoint); - } catch (err) { - log.error(`Failed to update node resource "${name}": ${err}`); - return; - } - } - } else if (ev.eventType === 'new') { - try { - await self._createResource(name, endpoint); - } catch (err) { - log.error(`Failed to create node resource "${name}": ${err}`); - return; - } - } - - await this._updateStatus(name, ev.object.isSynced() ? 'online' : 'offline'); - } else if (ev.eventType === 'del') { - await self._deleteResource(ev.object.name); - } else { - assert.strictEqual(ev.eventType, 'sync'); - } - } - - // Create k8s CRD object. - // - // @param {string} name Node of the created node. - // @param {string} grpcEndpoint Endpoint property of the object. - // - async _createResource (name, grpcEndpoint) { - log.info(`Creating node resource "${name}"`); - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastornodes.post({ - body: { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: { - name, - namespace: this.namespace - }, - spec: { grpcEndpoint } - } - }); - } - - // Update properties of k8s CRD object or create it if it does not exist. - // - // @param {string} name Name of the updated node. - // @param {object} k8sNode Existing k8s resource object. - // @param {string} grpcEndpoint Endpoint property of the object. - // - async _updateResource (name, k8sNode, grpcEndpoint) { - log.info(`Updating spec of node resource "${name}"`); - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastornodes(name) - .put({ - body: { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: k8sNode.metadata, - spec: { grpcEndpoint } - } - }); - } - - // Update state of the resource. - // - // NOTE: This method does not throw if the operation fails as there is nothing - // we can do if it fails. Though we log an error message in such a case. - // - // @param {string} name UUID of the resource. - // @param {string} status State of the node. - // - async _updateStatus (name, status) { - var k8sNode = await this.watcher.getRawBypass(name); - if (!k8sNode) { - log.warn( - `Wanted to update state of node resource "${name}" that disappeared` - ); - return; - } - if (k8sNode.status === status) { - // avoid unnecessary status updates - return; - } - log.debug(`Updating status of node resource "${name}"`); - k8sNode.status = status; - try { - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastornodes(name) - .status.put({ body: k8sNode }); - } catch (err) { - log.error(`Failed to update status of node resource "${name}": ${err}`); - } - } - - // Delete node resource with specified name. - // - // @param {string} name Name of the node resource to delete. - // - async _deleteResource (name) { - var k8sNode = await this.watcher.getRawBypass(name); - if (k8sNode) { - log.info(`Deleting node resource "${name}"`); - try { - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastornodes(name) - .delete(); - } catch (err) { - log.error(`Failed to delete node resource "${name}": ${err}`); - } - } - } - - // Stop listening for watcher and node events and reset the cache - async stop () { - this.watcher.removeAllListeners(); - await this.watcher.stop(); - this.eventStream.destroy(); - this.eventStream = null; - } -} - -module.exports = NodeOperator; diff --git a/csi/moac/node_operator.ts b/csi/moac/node_operator.ts new file mode 100644 index 000000000..ce6b87c18 --- /dev/null +++ b/csi/moac/node_operator.ts @@ -0,0 +1,278 @@ +// Node operator is responsible for managing mayastor node custom resources +// that represent nodes in the cluster that run mayastor (storage nodes). +// +// Roles: +// * The operator creates/modifies/deletes the resources to keep them up to date. +// * A user can delete a stale resource (can happen that moac doesn't know) + +import assert from 'assert'; +import * as fs from 'fs'; +import * as path from 'path'; +import { + ApiextensionsV1Api, + KubeConfig, +} from 'client-node-fixed-watcher'; +import { + CustomResource, + CustomResourceCache, + CustomResourceMeta, +} from './watcher'; + +const yaml = require('js-yaml'); +const EventStream = require('./event_stream'); +const log = require('./logger').Logger('node-operator'); +const Workq = require('./workq'); + +const RESOURCE_NAME: string = 'mayastornode'; +const crdNode = yaml.safeLoad( + fs.readFileSync(path.join(__dirname, '/crds/mayastornode.yaml'), 'utf8') +); + +// State of a storage node. +enum NodeState { + Unknown = "unknown", + Online = "online", + Offline = "offline", +} + +// Object defines properties of node resource. +export class NodeResource extends CustomResource { + apiVersion?: string; + kind?: string; + metadata: CustomResourceMeta; + spec: { grpcEndpoint: string }; + status?: NodeState; + + constructor(cr: CustomResource) { + super(); + this.apiVersion = cr.apiVersion; + this.kind = cr.kind; + if (cr.status === NodeState.Online) { + this.status = NodeState.Online; + } else if (cr.status === NodeState.Offline) { + this.status = NodeState.Offline; + } else { + this.status = NodeState.Unknown; + } + if (cr.metadata === undefined) { + throw new Error('missing metadata'); + } else { + this.metadata = cr.metadata; + } + if (cr.spec === undefined) { + throw new Error('missing spec'); + } else { + let grpcEndpoint = (cr.spec as any).grpcEndpoint; + if (grpcEndpoint === undefined) { + throw new Error('missing grpc endpoint in spec'); + } + this.spec = { grpcEndpoint }; + } + } +} + +export class NodeOperator { + watcher: CustomResourceCache; // k8s resource watcher for nodes + registry: any; + namespace: string; + workq: any; // for serializing node operations + eventStream: any; // events from the registry + + // Create node operator object. + // + // @param namespace Namespace the operator should operate on. + // @param kubeConfig KubeConfig. + // @param registry Registry with node objects. + // @param [idleTimeout] Timeout for restarting watcher connection when idle. + constructor ( + namespace: string, + kubeConfig: KubeConfig, + registry: any, + idleTimeout: number | undefined, + ) { + assert(registry); + this.namespace = namespace; + this.workq = new Workq(); + this.registry = registry; + this.watcher = new CustomResourceCache( + this.namespace, + RESOURCE_NAME, + kubeConfig, + NodeResource, + { idleTimeout } + ); + } + + // Create node CRD if it doesn't exist. + // + // @param kubeConfig KubeConfig. + async init (kubeConfig: KubeConfig) { + log.info('Initializing node operator'); + let k8sExtApi = kubeConfig.makeApiClient(ApiextensionsV1Api); + try { + await k8sExtApi.createCustomResourceDefinition(crdNode); + log.info(`Created CRD ${RESOURCE_NAME}`); + } catch (err) { + // API returns a 409 Conflict if CRD already exists. + if (err.statusCode !== 409) throw err; + } + } + + // Bind watcher's new/del events to node operator's callbacks. + // + // Not interested in mod events as the operator is the only who should + // be doing modifications to these objects. + // + // @param {object} watcher k8s node resource watcher. + // + _bindWatcher (watcher: CustomResourceCache) { + watcher.on('new', (obj: NodeResource) => { + if (obj.metadata) { + this.registry.addNode(obj.metadata.name, obj.spec.grpcEndpoint); + } + }); + watcher.on('del', (obj: NodeResource) => { + this.registry.removeNode(obj.metadata.name); + }); + } + + // Start node operator's watcher loop. + async start () { + // install event handlers to follow changes to resources. + this._bindWatcher(this.watcher); + await this.watcher.start(); + + // This will start async processing of node events. + this.eventStream = new EventStream({ registry: this.registry }); + this.eventStream.on('data', async (ev: any) => { + if (ev.kind !== 'node') return; + await this.workq.push(ev, this._onNodeEvent.bind(this)); + }); + } + + async _onNodeEvent (ev: any) { + const name = ev.object.name; + if (ev.eventType === 'new') { + const grpcEndpoint = ev.object.endpoint; + let origObj = this.watcher.get(name); + if (origObj === undefined) { + await this._createResource(name, grpcEndpoint); + } else { + await this._updateSpec(name, grpcEndpoint); + } + await this._updateStatus( + name, + ev.object.isSynced() ? NodeState.Online : NodeState.Offline, + ); + } else if (ev.eventType === 'mod') { + const grpcEndpoint = ev.object.endpoint; + let origObj = this.watcher.get(name); + // The node might be just going away - do nothing if not in the cache + if (origObj !== undefined) { + await this._updateSpec(name, grpcEndpoint); + await this._updateStatus( + name, + ev.object.isSynced() ? NodeState.Online : NodeState.Offline, + ); + } + } else if (ev.eventType === 'del') { + await this._deleteResource(ev.object.name); + } else { + assert.strictEqual(ev.eventType, 'sync'); + } + } + + async _createResource(name: string, grpcEndpoint: string) { + log.info(`Creating node resource "${name}"`); + try { + await this.watcher.create({ + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorNode', + metadata: { + name, + namespace: this.namespace + }, + spec: { grpcEndpoint } + }); + } catch (err) { + log.error(`Failed to create node resource "${name}": ${err}`); + } + } + + // Update properties of k8s CRD object or create it if it does not exist. + // + // @param name Name of the updated node. + // @param grpcEndpoint Endpoint property of the object. + // + async _updateSpec (name: string, grpcEndpoint: string) { + try { + await this.watcher.update(name, (orig: NodeResource) => { + // Update object only if it has really changed + if (orig.spec.grpcEndpoint === grpcEndpoint) { + return; + } + log.info(`Updating spec of node resource "${name}"`); + return { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorNode', + metadata: orig.metadata, + spec: { grpcEndpoint } + }; + }); + } catch (err) { + log.error(`Failed to update node resource "${name}": ${err}`); + } + } + + // Update state of the resource. + // + // NOTE: This method does not throw if the operation fails as there is nothing + // we can do if it fails. Though we log an error message in such a case. + // + // @param name UUID of the resource. + // @param status State of the node. + // + async _updateStatus (name: string, status: NodeState) { + try { + await this.watcher.updateStatus(name, (orig: NodeResource) => { + // avoid unnecessary status updates + if (orig.status === status) { + return; + } + log.debug(`Updating status of node resource "${name}"`); + return { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorNode', + metadata: orig.metadata, + spec: orig.spec, + status: status, + }; + }); + } catch (err) { + log.error(`Failed to update status of node resource "${name}": ${err}`); + } + } + + // Delete node resource with specified name. + // + // @param {string} name Name of the node resource to delete. + // + async _deleteResource (name: string) { + try { + log.info(`Deleting node resource "${name}"`); + await this.watcher.delete(name); + } catch (err) { + log.error(`Failed to delete node resource "${name}": ${err}`); + } + } + + // Stop listening for watcher and node events and reset the cache + stop () { + this.watcher.stop(); + this.watcher.removeAllListeners(); + if (this.eventStream) { + this.eventStream.destroy(); + this.eventStream = null; + } + } +} diff --git a/csi/moac/package-lock.json b/csi/moac/package-lock.json index 2cb6e658a..f6e7caf33 100644 --- a/csi/moac/package-lock.json +++ b/csi/moac/package-lock.json @@ -5,76 +5,134 @@ "requires": true, "dependencies": { "@babel/code-frame": { - "version": "7.10.1", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.1.tgz", - "integrity": "sha512-IGhtTmpjGbYzcEDOw7DcQtbQSXcG9ftmAXtWTu9V936vDye4xjjekktFAtgZsWpzTj/X01jocB46mTywm/4SZw==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", + "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", "dev": true, "requires": { - "@babel/highlight": "^7.10.1" + "@babel/highlight": "^7.10.4" } }, "@babel/helper-validator-identifier": { - "version": "7.10.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.1.tgz", - "integrity": "sha512-5vW/JXLALhczRCWP0PnFDMCJAchlBvM7f4uk/jXritBnIa6E1KmqmtrS3yn1LAnxFBypQ3eneLuXjsnfQsgILw==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", + "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==", "dev": true }, "@babel/highlight": { - "version": "7.10.1", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.1.tgz", - "integrity": "sha512-8rMof+gVP8mxYZApLF/JgNDAkdKa+aJt3ZYxF8z6+j/hpeXL7iMsKCPHa2jNMHu/qqBwzQF4OHNoYi8dMA/rYg==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz", + "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==", "dev": true, "requires": { - "@babel/helper-validator-identifier": "^7.10.1", + "@babel/helper-validator-identifier": "^7.10.4", "chalk": "^2.0.0", "js-tokens": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } } }, - "@grpc/proto-loader": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.4.tgz", - "integrity": "sha512-HTM4QpI9B2XFkPz7pjwMyMgZchJ93TVkL3kWPW8GDMDKYxsMnmf4w2TNMJK7+KNiYHS5cJrCEAFlF+AwtXWVPA==", + "@dabh/diagnostics": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.2.tgz", + "integrity": "sha512-+A1YivoVDNNVCdfozHSR8v/jyuuLTMXwjWuxPFlFlUapXoGc+Gj9mDlTDDfrwl7rXCl2tNZ0kE8sIBO6YOn96Q==", "requires": { - "lodash.camelcase": "^4.3.0", - "protobufjs": "^6.8.6" + "colorspace": "1.1.x", + "enabled": "2.0.x", + "kuler": "^2.0.0" } }, - "@kubernetes/client-node": { - "version": "0.10.2", - "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.10.2.tgz", - "integrity": "sha512-JvsmxbTwiMqsh9LyuXMzT5HjoENFbB3a/JroJsobuAzkxN162UqAOvg++/AA+ccIMWRR2Qln4FyaOJ0a4eKyXg==", + "@eslint/eslintrc": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.2.1.tgz", + "integrity": "sha512-XRUeBZ5zBWLYgSANMpThFddrZZkEbGHgUdt5UJjZfnlN9BGCiUBrf+nvbRupSjMvqzwnQN0qwCmOxITt1cfywA==", + "dev": true, "requires": { - "@types/js-yaml": "^3.12.1", - "@types/node": "^10.12.0", - "@types/request": "^2.47.1", - "@types/underscore": "^1.8.9", - "@types/ws": "^6.0.1", - "isomorphic-ws": "^4.0.1", + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", "js-yaml": "^3.13.1", - "json-stream": "^1.0.0", - "jsonpath-plus": "^0.19.0", - "request": "^2.88.0", - "shelljs": "^0.8.2", - "tslib": "^1.9.3", - "underscore": "^1.9.1", - "ws": "^6.1.0" + "lodash": "^4.17.19", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" }, "dependencies": { - "@types/node": { - "version": "10.17.24", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.24.tgz", - "integrity": "sha512-5SCfvCxV74kzR3uWgTYiGxrd69TbT1I6+cMx1A5kEly/IVveJBimtAMlXiEyVFn5DvUFewQWxOOiJhlxeQwxgA==" - }, - "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", + "debug": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "dev": true, "requires": { - "async-limiter": "~1.0.0" + "ms": "2.1.2" } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true } } }, + "@grpc/proto-loader": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.5.tgz", + "integrity": "sha512-WwN9jVNdHRQoOBo9FDH7qU+mgfjPc8GygPYms3M+y3fbQLfnCe/Kv/E01t7JRgnrsOHH8euvSbed3mIalXhwqQ==", + "requires": { + "lodash.camelcase": "^4.3.0", + "protobufjs": "^6.8.6" + } + }, "@panva/asn1.js": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@panva/asn1.js/-/asn1.js-1.0.0.tgz", @@ -135,23 +193,23 @@ "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" }, "@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.0.0.tgz", + "integrity": "sha512-FyD2meJpDPjyNQejSjvnhpgI/azsQkA4lGbuu5BQZfjvJ9cbRZXzeWL2HceCekW4lixO9JPesIIQkSoLjeJHNQ==" }, "@sinonjs/commons": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.7.1.tgz", - "integrity": "sha512-Debi3Baff1Qu1Unc3mjJ96MgpbwTn43S1+9yJ0llWygPwDNu2aaWBD6yc9y/Z8XDRNhx7U+u2UDg2OGQXkclUQ==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.1.tgz", + "integrity": "sha512-892K+kWUUi3cl+LlqEWIDrhvLgdL79tECi8JZUyq6IviKy/DNhuzCRlbHUjxK89f4ypPMMaFnFuR9Ie6DoIMsw==", "dev": true, "requires": { "type-detect": "4.0.8" } }, "@sinonjs/fake-timers": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-6.0.0.tgz", - "integrity": "sha512-atR1J/jRXvQAb47gfzSK8zavXy7BcpnYq21ALon0U99etu99vsir0trzIO3wpeLtW+LLVY6X7EkfVTbjGSH8Ww==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz", + "integrity": "sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA==", "dev": true, "requires": { "@sinonjs/commons": "^1.7.0" @@ -168,9 +226,9 @@ } }, "@sinonjs/samsam": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-5.0.3.tgz", - "integrity": "sha512-QucHkc2uMJ0pFGjJUDP3F9dq5dx8QIaqISl9QgwLOh6P9yv877uONPGXh/OH/0zmM3tW1JjuJltAZV2l7zU+uQ==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-5.1.0.tgz", + "integrity": "sha512-42nyaQOVunX5Pm6GRJobmzbS7iLI+fhERITnETXzzwDZh+TtDr/Au3yAvXVjFmZ4wEUaE4Y3NFZfKv0bV0cbtg==", "dev": true, "requires": { "@sinonjs/commons": "^1.6.0", @@ -185,11 +243,22 @@ "dev": true }, "@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.5.tgz", + "integrity": "sha512-PyRA9sm1Yayuj5OIoJ1hGt2YISX45w9WcFbh6ddT0Z/0yaFxOtGLInr4jUfU1EAFVs0Yfyfev4RNwBlUaHdlDQ==", + "requires": { + "defer-to-connect": "^2.0.0" + } + }, + "@types/cacheable-request": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.1.tgz", + "integrity": "sha512-ykFq2zmBGOCbpIXtoVbz4SKY5QriWPh3AjyU4G74RYbtt5yOc5OfaY75ftjg7mikMOla1CTGpX3lLbuJh8DTrQ==", "requires": { - "defer-to-connect": "^1.0.1" + "@types/http-cache-semantics": "*", + "@types/keyv": "*", + "@types/node": "*", + "@types/responselike": "*" } }, "@types/caseless": { @@ -197,35 +266,52 @@ "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.2.tgz", "integrity": "sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w==" }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" + "@types/http-cache-semantics": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.0.tgz", + "integrity": "sha512-c3Xy026kOF7QOTn00hbIllV1dLR9hG9NkSrLQgCVs8NF6sBU+VGWjD3wLPhmh1TYAc7ugCFsvHYMN4VcBN1U1A==" + }, + "@types/js-yaml": { + "version": "3.12.5", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.5.tgz", + "integrity": "sha512-JCcp6J0GV66Y4ZMDAQCXot4xprYB+Zfd3meK9+INSJeVZwJmHAW30BBEEkPzXswMXuiyReUGOP3GxrADc9wPww==" + }, + "@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", + "dev": true }, - "@types/got": { - "version": "9.6.11", - "resolved": "https://registry.npmjs.org/@types/got/-/got-9.6.11.tgz", - "integrity": "sha512-dr3IiDNg5TDesGyuwTrN77E1Cd7DCdmCFtEfSGqr83jMMtcwhf/SGPbN2goY4JUWQfvxwY56+e5tjfi+oXeSdA==", + "@types/keyv": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.1.tgz", + "integrity": "sha512-MPtoySlAZQ37VoLaPcTHCu1RWJ4llDkULYZIzOYxlhxBqYPB0RsRlmMU0R6tahtFe27mIdkHV+551ZWV4PLmVw==", "requires": { - "@types/node": "*", - "@types/tough-cookie": "*", - "form-data": "^2.5.0" + "@types/node": "*" } }, - "@types/js-yaml": { - "version": "3.12.4", - "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.4.tgz", - "integrity": "sha512-fYMgzN+9e28R81weVN49inn/u798ruU91En1ZnGvSZzCRc5jXx9B2EDhlRaWmcO1RIxFHL8AajRXzxDuJu93+A==" + "@types/lodash": { + "version": "4.14.161", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.161.tgz", + "integrity": "sha512-EP6O3Jkr7bXvZZSZYlsgt5DIjiGr0dXP1/jVEwVLTFgg0d+3lWVQkRavYVQszV7dYUwvg0B8R0MBDpcmXg7XIA==" }, "@types/long": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, + "@types/minipass": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@types/minipass/-/minipass-2.2.0.tgz", + "integrity": "sha512-wuzZksN4w4kyfoOv/dlpov4NOunwutLA/q7uc00xU02ZyUY+aoM5PWIXEKBMnm0NHd4a+N71BMjq+x7+2Af1fg==", + "requires": { + "@types/node": "*" + } + }, "@types/node": { - "version": "13.13.9", - "resolved": "https://registry.npmjs.org/@types/node/-/node-13.13.9.tgz", - "integrity": "sha512-EPZBIGed5gNnfWCiwEIwTE2Jdg4813odnG8iNPMQGrqVxrI+wL68SPtPeCX+ZxGBaA6pKAVc6jaKgP/Q0QzfdQ==" + "version": "13.13.23", + "resolved": "https://registry.npmjs.org/@types/node/-/node-13.13.23.tgz", + "integrity": "sha512-L31WmMJYKb15PDqFWutn8HNwrNK6CE6bkWgSB0dO1XpNoHrszVKV1Clcnfgd6c/oG54TVF8XQEvY2gQrW8K6Mw==" }, "@types/request": { "version": "2.48.5", @@ -238,15 +324,40 @@ "form-data": "^2.5.0" } }, + "@types/responselike": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.0.tgz", + "integrity": "sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==", + "requires": { + "@types/node": "*" + } + }, + "@types/stream-buffers": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.3.tgz", + "integrity": "sha512-NeFeX7YfFZDYsCfbuaOmFQ0OjSmHreKBpp7MQ4alWQBHeh2USLsj7qyMyn9t82kjqIX516CR/5SRHnARduRtbQ==", + "requires": { + "@types/node": "*" + } + }, + "@types/tar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/tar/-/tar-4.0.3.tgz", + "integrity": "sha512-Z7AVMMlkI8NTWF0qGhC4QIX0zkV/+y0J8x7b/RsHrN0310+YNjoJd8UrApCiGBCWtKjxS9QhNqLi2UJNToh5hA==", + "requires": { + "@types/minipass": "*", + "@types/node": "*" + } + }, "@types/tough-cookie": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.0.tgz", "integrity": "sha512-I99sngh224D0M7XgW1s120zxCt3VYQ3IQsuw3P3jbq5GG4yc79+ZjyKznyOGIQrflfylLgcfekeZW/vk0yng6A==" }, "@types/underscore": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.10.0.tgz", - "integrity": "sha512-ZAbqul7QAKpM2h1PFGa5ETN27ulmqtj0QviYHasw9LffvXZvVHuraOx/FOsIPPDNGZN0Qo1nASxxSfMYOtSoCw==" + "version": "1.10.24", + "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.10.24.tgz", + "integrity": "sha512-T3NQD8hXNW2sRsSbLNjF/aBo18MyJlbw0lSpQHB/eZZtScPdexN4HSa8cByYwTw9Wy7KuOFr81mlDQcQQaZ79w==" }, "@types/ws": { "version": "6.0.4", @@ -266,30 +377,30 @@ } }, "acorn": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.2.0.tgz", - "integrity": "sha512-apwXVmYVpQ34m/i71vrApRrRKCWQnZZF1+npOD0WV5xZFfwWOmKGQ2RWlfdy9vWITsenisM8M0Qeq8agcFHNiQ==", + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", "dev": true }, "acorn-jsx": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", - "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", + "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", "dev": true }, "aggregate-error": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.0.1.tgz", - "integrity": "sha512-quoaXsZ9/BLNae5yiNoUz+Nhkwz83GhWwtYFglcjEQB2NDHCIpApbqXxIFnm4Pq/Nvhrsq5sYJFyohrrxnTGAA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", "requires": { "clean-stack": "^2.0.0", "indent-string": "^4.0.0" } }, "ajv": { - "version": "6.12.2", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.2.tgz", - "integrity": "sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -298,15 +409,9 @@ } }, "ansi-colors": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.3.tgz", - "integrity": "sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw==", - "dev": true - }, - "ansi-escapes": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", - "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", "dev": true }, "ansi-regex": { @@ -315,12 +420,29 @@ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=" }, "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, "requires": { - "color-convert": "^1.9.0" + "color-convert": "^2.0.1" + }, + "dependencies": { + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + } } }, "anymatch": { @@ -357,6 +479,39 @@ "is-string": "^1.0.5" } }, + "array.prototype.flat": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.3.tgz", + "integrity": "sha512-gBlRZV0VSmfPIeWfuuy56XZMvbVfbEUnOXUvt3F/eUUUSyzlgLxhEX4YAEpxNAogRGehPSnfXyPtYyKAhkzQhQ==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1" + } + }, + "array.prototype.flatmap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.2.3.tgz", + "integrity": "sha512-OOEk+lkePcg+ODXIpvuU9PAryCikCJyo7GlDG1upleEpQRx6mzL9puEBkozQ5iAx20KV0l3DbyQwqciJtqe5Pg==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1", + "function-bind": "^1.1.1" + } + }, + "array.prototype.map": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array.prototype.map/-/array.prototype.map-1.0.2.tgz", + "integrity": "sha512-Az3OYxgsa1g7xDYp86l0nnN4bcmuEITGe1rbdEBVkrqkzMgDcbdQ2R7r41pNzti+4NMces3H8gMmuioZUilLgw==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1", + "es-array-method-boxes-properly": "^1.0.0", + "is-string": "^1.0.4" + } + }, "ascli": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ascli/-/ascli-1.0.1.tgz", @@ -392,17 +547,9 @@ "dev": true }, "async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "requires": { - "lodash": "^4.17.14" - } - }, - "async-limiter": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", + "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" }, "asynckit": { "version": "0.4.0", @@ -415,9 +562,9 @@ "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" }, "aws4": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.10.0.tgz", - "integrity": "sha512-3YDiu347mtVtjpyV3u5kVqQLP242c06zwDOgpeRnybmXlYYsLbtTrUBUm8i8srONt+FWobl5aibnU1030PeeuA==" + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", + "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" }, "balanced-match": { "version": "1.0.0", @@ -438,9 +585,9 @@ } }, "binary-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.0.0.tgz", - "integrity": "sha512-Phlt0plgpIIBOGTT/ehfFnbNlfsDEiqmzE2KRXoX1bLIlir4X/MR+zSyBEkL05ffWgnRSf/DXv+WrUAVr93/ow==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.1.0.tgz", + "integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==", "dev": true }, "body-parser": { @@ -458,6 +605,13 @@ "qs": "6.7.0", "raw-body": "2.4.0", "type-is": "~1.6.17" + }, + "dependencies": { + "qs": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", + "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" + } } }, "brace-expansion": { @@ -484,6 +638,17 @@ "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", "dev": true }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", + "dev": true + }, + "byline": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", + "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=" + }, "bytebuffer": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/bytebuffer/-/bytebuffer-5.0.1.tgz", @@ -504,35 +669,45 @@ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" }, + "cacheable-lookup": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.3.tgz", + "integrity": "sha512-W+JBqF9SWe18A72XFzN/V/CULFzPm7sBXzzR6ekkE+3tLG72wFZrBiBZhrZuDoYexop4PHJVdFAKb/Nj9+tm9w==" + }, "cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.1.tgz", + "integrity": "sha512-lt0mJ6YAnsrBErpTMWeu5kl/tg9xMAWjavYTN6VQXM1A/teBITuNcccXsCxF0tDQQJf9DfAaX5O4e0zp0KlfZw==", "requires": { "clone-response": "^1.0.2", "get-stream": "^5.1.0", "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", + "keyv": "^4.0.0", "lowercase-keys": "^2.0.0", "normalize-url": "^4.1.0", - "responselike": "^1.0.2" + "responselike": "^2.0.0" }, "dependencies": { "get-stream": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", - "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", "requires": { "pump": "^3.0.0" } - }, - "lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" } } }, + "call-bind": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.0.tgz", + "integrity": "sha512-AEXsYIyyDY3MCzbwdhzG3Jx1R0J2wetQyUynn6dYHAO+bg8l1k7jwZtRv4ryryFs7EP+NDlikJlVe59jr0cM2w==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.0" + } + }, "callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -564,33 +739,15 @@ } }, "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", "dev": true, "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "dependencies": { - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - } + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" } }, - "chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", - "dev": true - }, "check-error": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", @@ -598,41 +755,67 @@ "dev": true }, "chokidar": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.3.0.tgz", - "integrity": "sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.2.tgz", + "integrity": "sha512-IZHaDeBeI+sZJRX7lGcXsdzgvZqKv6sECqsbErJA4mHWfpRrD8B97kSFN4cQz6nGBGiuFia1MKR4d6c1o8Cv7A==", "dev": true, "requires": { "anymatch": "~3.1.1", "braces": "~3.0.2", - "fsevents": "~2.1.1", + "fsevents": "~2.1.2", "glob-parent": "~5.1.0", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", - "readdirp": "~3.2.0" + "readdirp": "~3.4.0" } }, + "chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==" + }, "clean-stack": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" }, - "cli-cursor": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", - "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", - "dev": true, + "client-node-fixed-watcher": { + "version": "0.13.2", + "resolved": "https://registry.npmjs.org/client-node-fixed-watcher/-/client-node-fixed-watcher-0.13.2.tgz", + "integrity": "sha512-Ze0lahaDt28q9OnYZDTMOKq2zJs64ETwyfWEOMjUErtY7hXjL7z725Nu5Ghfb3Fagujy/bSJ2QUXRuNioQqC8w==", "requires": { - "restore-cursor": "^2.0.0" + "@types/js-yaml": "^3.12.1", + "@types/node": "^10.12.0", + "@types/request": "^2.47.1", + "@types/stream-buffers": "^3.0.3", + "@types/tar": "^4.0.3", + "@types/underscore": "^1.8.9", + "@types/ws": "^6.0.1", + "byline": "^5.0.0", + "execa": "1.0.0", + "isomorphic-ws": "^4.0.1", + "js-yaml": "^3.13.1", + "jsonpath-plus": "^0.19.0", + "openid-client": "^4.1.1", + "request": "^2.88.0", + "rfc4648": "^1.3.0", + "shelljs": "^0.8.2", + "stream-buffers": "^3.0.2", + "tar": "^6.0.2", + "tmp-promise": "^3.0.2", + "tslib": "^1.9.3", + "underscore": "^1.9.1", + "ws": "^7.3.1" + }, + "dependencies": { + "@types/node": { + "version": "10.17.44", + "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.44.tgz", + "integrity": "sha512-vHPAyBX1ffLcy4fQHmDyIUMUb42gHZjPHU66nhvbMzAWJqHnySGZ6STwN3rwrnSd1FHB0DI/RWgGELgKSYRDmw==" + } } }, - "cli-width": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.1.tgz", - "integrity": "sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw==", - "dev": true - }, "cliui": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", @@ -687,11 +870,6 @@ "simple-swizzle": "^0.2.2" } }, - "colornames": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/colornames/-/colornames-1.1.1.tgz", - "integrity": "sha1-+IiQMGhcfE/54qVZ9Qd+t2qBb5Y=" - }, "colors": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", @@ -736,6 +914,13 @@ "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", "requires": { "safe-buffer": "5.1.2" + }, + "dependencies": { + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } } }, "content-type": { @@ -762,7 +947,6 @@ "version": "6.0.5", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "dev": true, "requires": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -787,23 +971,24 @@ "ms": "2.0.0" } }, - "debug-log": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/debug-log/-/debug-log-1.0.1.tgz", - "integrity": "sha1-IwdjLUwEOCuN+KMvcLiVBG1SdF8=", - "dev": true - }, "decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=" }, "decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", "requires": { - "mimic-response": "^1.0.0" + "mimic-response": "^3.1.0" + }, + "dependencies": { + "mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==" + } } }, "deep-eql": { @@ -821,47 +1006,19 @@ "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", "dev": true }, - "deepmerge": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz", - "integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==" - }, "defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.0.tgz", + "integrity": "sha512-bYL2d05vOSf1JEZNx5vSAtPuBMkX8K9EUutg7zlKvTqKXHt7RhWJFbmd7qakVuf13i+IkGmp6FwSsONOf6VYIg==" }, "define-properties": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dev": true, "requires": { "object-keys": "^1.0.12" } }, - "deglob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/deglob/-/deglob-4.0.1.tgz", - "integrity": "sha512-/g+RDZ7yf2HvoW+E5Cy+K94YhgcFgr6C8LuHZD1O5HoNPkf3KY6RfXJ0DBGlB/NkLi5gml+G9zqRzk9S0mHZCg==", - "dev": true, - "requires": { - "find-root": "^1.0.0", - "glob": "^7.0.5", - "ignore": "^5.0.0", - "pkg-config": "^1.1.0", - "run-parallel": "^1.1.2", - "uniq": "^1.0.1" - }, - "dependencies": { - "ignore": { - "version": "5.1.8", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", - "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", - "dev": true - } - } - }, "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -877,20 +1034,10 @@ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" }, - "diagnostics": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/diagnostics/-/diagnostics-1.1.1.tgz", - "integrity": "sha512-8wn1PmdunLJ9Tqbx+Fx/ZEuHfJf4NKSN2ZBj7SJC/OWRWha843+WsTjqMe1B5E3p28jqBlp+mJ2fPVxPyNgYKQ==", - "requires": { - "colorspace": "1.1.x", - "enabled": "1.0.x", - "kuler": "1.0.x" - } - }, "diff": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", - "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", "dev": true }, "dirty-chai": { @@ -908,11 +1055,6 @@ "esutils": "^2.0.2" } }, - "duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, "ecc-jsbn": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", @@ -934,12 +1076,9 @@ "dev": true }, "enabled": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/enabled/-/enabled-1.0.2.tgz", - "integrity": "sha1-ll9lE9LC0cX0ZStkouM5ZGf8L5M=", - "requires": { - "env-variable": "0.0.x" - } + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", + "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==" }, "encodeurl": { "version": "1.0.2", @@ -954,10 +1093,14 @@ "once": "^1.4.0" } }, - "env-variable": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/env-variable/-/env-variable-0.0.6.tgz", - "integrity": "sha512-bHz59NlBbtS0NhftmR8+ExBEekE7br0e01jw+kk0NDro7TtZzBYZ5ScGPs3OmwnpyfHTHOtr1Y6uedCdrIldtg==" + "enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", + "dev": true, + "requires": { + "ansi-colors": "^4.1.1" + } }, "error-ex": { "version": "1.3.2", @@ -976,105 +1119,201 @@ } } }, - "es-abstract": { - "version": "1.17.4", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.4.tgz", - "integrity": "sha512-Ae3um/gb8F0mui/jPL+QiqmglkUsaQf7FwBEHYIFkztkneosu9imhqHpBzQ3h1vit8t5iQ74t6PEVvphBZiuiQ==", + "es-abstract": { + "version": "1.17.7", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.7.tgz", + "integrity": "sha512-VBl/gnfcJ7OercKA9MVaegWsBHFjV492syMudcnQZvt/Dw8ezpcOHYZXa/J96O8vx+g4x65YKhxOwDUh63aS5g==", + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + }, + "dependencies": { + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + }, + "object.assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.1.tgz", + "integrity": "sha512-VT/cxmx5yaoHSOTSyrCygIDFco+RsibY2NM0a4RdEeY/4KgqezwFtK1yr3U67xYhqJSlASm2pKhLVzPj2lr4bA==", + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.18.0-next.0", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" + }, + "dependencies": { + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + } + } + } + } + }, + "es-array-method-boxes-properly": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz", + "integrity": "sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==", + "dev": true + }, + "es-get-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.0.tgz", + "integrity": "sha512-UfrmHuWQlNMTs35e1ypnvikg6jCz3SK8v8ImvmDsh36fCVUR1MqoFDiyn0/k52C8NqO3YsO8Oe0azeesNuqSsQ==", "dev": true, "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", + "es-abstract": "^1.17.4", "has-symbols": "^1.0.1", - "is-callable": "^1.1.5", - "is-regex": "^1.0.5", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimleft": "^2.1.1", - "string.prototype.trimright": "^2.1.1" + "is-arguments": "^1.0.4", + "is-map": "^2.0.1", + "is-set": "^2.0.1", + "is-string": "^1.0.5", + "isarray": "^2.0.5" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + } } }, "es-to-primitive": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, "requires": { "is-callable": "^1.1.4", "is-date-object": "^1.0.1", "is-symbol": "^1.0.2" } }, + "escalade": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.0.tgz", + "integrity": "sha512-mAk+hPSO8fLDkhV7V0dXazH5pDc6MrjBTPyD3VeKzxnVFjH1MIxbCdqGZB9O8+EwWakZs3ZCbDS4IpRt79V1ig==" + }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" }, "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true }, "eslint": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.4.0.tgz", - "integrity": "sha512-WTVEzK3lSFoXUovDHEbkJqCVPEPwbhCq4trDktNI6ygs7aO41d4cDT0JFAT5MivzZeVLWlg7vHL+bgrQv/t3vA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.12.1.tgz", + "integrity": "sha512-HlMTEdr/LicJfN08LB3nM1rRYliDXOmfoO4vj39xN6BLpFzF00hbwBoqHk8UcJ2M/3nlARZWy/mslvGEuZFvsg==", "dev": true, "requires": { "@babel/code-frame": "^7.0.0", + "@eslint/eslintrc": "^0.2.1", "ajv": "^6.10.0", - "chalk": "^2.1.0", - "cross-spawn": "^6.0.5", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", "debug": "^4.0.1", "doctrine": "^3.0.0", - "eslint-scope": "^5.0.0", - "eslint-utils": "^1.4.2", - "eslint-visitor-keys": "^1.1.0", - "espree": "^6.1.1", - "esquery": "^1.0.1", + "enquirer": "^2.3.5", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.0", + "esquery": "^1.2.0", "esutils": "^2.0.2", "file-entry-cache": "^5.0.1", "functional-red-black-tree": "^1.0.1", "glob-parent": "^5.0.0", - "globals": "^11.7.0", + "globals": "^12.1.0", "ignore": "^4.0.6", "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", - "inquirer": "^6.4.1", "is-glob": "^4.0.0", "js-yaml": "^3.13.1", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.3.0", - "lodash": "^4.17.14", + "levn": "^0.4.1", + "lodash": "^4.17.19", "minimatch": "^3.0.4", - "mkdirp": "^0.5.1", "natural-compare": "^1.4.0", - "optionator": "^0.8.2", + "optionator": "^0.9.1", "progress": "^2.0.0", - "regexpp": "^2.0.1", - "semver": "^6.1.2", - "strip-ansi": "^5.2.0", - "strip-json-comments": "^3.0.1", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", "table": "^5.2.3", "text-table": "^0.2.0", "v8-compile-cache": "^2.0.3" }, "dependencies": { "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", "dev": true }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", "dev": true, "requires": { - "ms": "^2.1.1" + "ms": "2.1.2" } }, "ms": { @@ -1083,51 +1322,81 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", + "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true }, "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", "dev": true, "requires": { - "ansi-regex": "^4.1.0" + "ansi-regex": "^5.0.0" } }, "strip-json-comments": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.0.tgz", - "integrity": "sha512-e6/d0eBu7gHtdCqFt0xJr642LdToM5/cN4Qb9DbHjVx1CP5RyeM+zH7pbecEmDv/lBqb0QH+6Uqq75rxFPkM0w==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } } } }, "eslint-config-semistandard": { - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-15.0.0.tgz", - "integrity": "sha512-volIMnosUvzyxGkYUA5QvwkahZZLeUx7wcS0+7QumPn+MMEBbV6P7BY1yukamMst0w3Et3QZlCjQEwQ8tQ6nug==", + "version": "15.0.1", + "resolved": "https://registry.npmjs.org/eslint-config-semistandard/-/eslint-config-semistandard-15.0.1.tgz", + "integrity": "sha512-sfV+qNBWKOmF0kZJll1VH5XqOAdTmLlhbOl9WKI11d2eMEe+Kicxnpm24PQWHOqAfk5pAWU2An0LjNCXKa4Usg==", "dev": true }, "eslint-config-standard": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-14.1.0.tgz", - "integrity": "sha512-EF6XkrrGVbvv8hL/kYa/m6vnvmUT+K82pJJc4JJVMM6+Qgqh0pnwprSxdduDLB9p/7bIxD+YV5O0wfb8lmcPbA==", + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-16.0.0.tgz", + "integrity": "sha512-kMCehB9yXIG+LNsu9uXfm06o6Pt63TFAOzn9tUOzw4r/hFIxHhNR1Xomxy+B5zMrXhqyfHVEcmanzttEjGei9w==", "dev": true }, "eslint-config-standard-jsx": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-8.1.0.tgz", - "integrity": "sha512-ULVC8qH8qCqbU792ZOO6DaiaZyHNS/5CZt3hKqHkEhVlhPEPN3nfBqqxJCyp59XrjIBZPu1chMYe9T2DXZ7TMw==", + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/eslint-config-standard-jsx/-/eslint-config-standard-jsx-10.0.0.tgz", + "integrity": "sha512-hLeA2f5e06W1xyr/93/QJulN/rLbUVUmqTlexv9PRKHFwEC9ffJcH2LvJhMoEqYQBEYafedgGZXH2W8NUpt5lA==", "dev": true }, "eslint-import-resolver-node": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.3.tgz", - "integrity": "sha512-b8crLDo0M5RSe5YG8Pu2DYBj71tSB6OvXkfzwbJU2w7y8P4/yo0MyF8jU26IEuEuHF2K5/gcAJE3LhQGqBBbVg==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz", + "integrity": "sha512-ogtf+5AB/O+nM6DIeBUNr2fuT7ot9Qg/1harBfBtaP13ekEWFQEEMP94BCB7zaNW3gyY+8SHYF00rnqYwXKWOA==", "dev": true, "requires": { "debug": "^2.6.9", @@ -1145,40 +1414,34 @@ } }, "eslint-plugin-es": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-2.0.0.tgz", - "integrity": "sha512-f6fceVtg27BR02EYnBhgWLFQfK6bN4Ll0nQFrBHOlCsAyxeZkn0NHns5O0YZOPrV1B3ramd6cgFwaoFLcSkwEQ==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", + "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", "dev": true, "requires": { - "eslint-utils": "^1.4.2", + "eslint-utils": "^2.0.0", "regexpp": "^3.0.0" - }, - "dependencies": { - "regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", - "dev": true - } } }, "eslint-plugin-import": { - "version": "2.18.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.18.2.tgz", - "integrity": "sha512-5ohpsHAiUBRNaBWAF08izwUGlbrJoJJ+W9/TBwsGoR1MnlgfwMIKrFeSjWbt6moabiXW9xNvtFz+97KHRfI4HQ==", + "version": "2.22.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.22.1.tgz", + "integrity": "sha512-8K7JjINHOpH64ozkAhpT3sd+FswIZTfMZTjdx052pnWrgRCVfp8op9tbjpAk3DdUeI/Ba4C8OjdC0r90erHEOw==", "dev": true, "requires": { - "array-includes": "^3.0.3", + "array-includes": "^3.1.1", + "array.prototype.flat": "^1.2.3", "contains-path": "^0.1.0", "debug": "^2.6.9", "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.2", - "eslint-module-utils": "^2.4.0", + "eslint-import-resolver-node": "^0.3.4", + "eslint-module-utils": "^2.6.0", "has": "^1.0.3", "minimatch": "^3.0.4", - "object.values": "^1.1.0", + "object.values": "^1.1.1", "read-pkg-up": "^2.0.0", - "resolve": "^1.11.0" + "resolve": "^1.17.0", + "tsconfig-paths": "^3.9.0" }, "dependencies": { "doctrine": { @@ -1194,13 +1457,13 @@ } }, "eslint-plugin-node": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-10.0.0.tgz", - "integrity": "sha512-1CSyM/QCjs6PXaT18+zuAXsjXGIGo5Rw630rSKwokSs2jrYURQc4R5JZpoanNCqwNmepg+0eZ9L7YiRUJb8jiQ==", + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", + "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", "dev": true, "requires": { - "eslint-plugin-es": "^2.0.0", - "eslint-utils": "^1.4.2", + "eslint-plugin-es": "^3.0.0", + "eslint-utils": "^2.0.0", "ignore": "^5.1.1", "minimatch": "^3.0.4", "resolve": "^1.10.1", @@ -1228,20 +1491,22 @@ "dev": true }, "eslint-plugin-react": { - "version": "7.14.3", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.14.3.tgz", - "integrity": "sha512-EzdyyBWC4Uz2hPYBiEJrKCUi2Fn+BJ9B/pJQcjw5X+x/H2Nm59S4MJIvL4O5NEE0+WbnQwEBxWY03oUk+Bc3FA==", + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.21.5.tgz", + "integrity": "sha512-8MaEggC2et0wSF6bUeywF7qQ46ER81irOdWS4QWxnnlAEsnzeBevk1sWh7fhpCghPpXb+8Ks7hvaft6L/xsR6g==", "dev": true, "requires": { - "array-includes": "^3.0.3", + "array-includes": "^3.1.1", + "array.prototype.flatmap": "^1.2.3", "doctrine": "^2.1.0", "has": "^1.0.3", - "jsx-ast-utils": "^2.1.0", - "object.entries": "^1.1.0", - "object.fromentries": "^2.0.0", - "object.values": "^1.1.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "object.entries": "^1.1.2", + "object.fromentries": "^2.0.2", + "object.values": "^1.1.1", "prop-types": "^15.7.2", - "resolve": "^1.10.1" + "resolve": "^1.18.1", + "string.prototype.matchall": "^4.0.2" }, "dependencies": { "doctrine": { @@ -1252,49 +1517,75 @@ "requires": { "esutils": "^2.0.2" } + }, + "resolve": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.18.1.tgz", + "integrity": "sha512-lDfCPaMKfOJXjy0dPayzPdF1phampNWr3qFCjAu+rw/qbQmr5jWH5xN2hwh9QKfw9E5v4hwV7A+jrCmL8yjjqA==", + "dev": true, + "requires": { + "is-core-module": "^2.0.0", + "path-parse": "^1.0.6" + } } } }, "eslint-plugin-standard": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.1.tgz", - "integrity": "sha512-v/KBnfyaOMPmZc/dmc6ozOdWqekGp7bBGq4jLAecEfPGmfKiWS4sA8sC0LqiV9w5qmXAtXVn4M3p1jSyhY85SQ==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.2.tgz", + "integrity": "sha512-nKptN8l7jksXkwFk++PhJB3cCDTcXOEyhISIN86Ue2feJ1LFyY3PrY3/xT2keXlJSY5bpmbiTG0f885/YKAvTA==", "dev": true }, "eslint-scope": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz", - "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", "dev": true, "requires": { - "esrecurse": "^4.1.0", + "esrecurse": "^4.3.0", "estraverse": "^4.1.1" } }, "eslint-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", - "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", "dev": true, "requires": { "eslint-visitor-keys": "^1.1.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true + } } }, "eslint-visitor-keys": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz", - "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.0.0.tgz", + "integrity": "sha512-QudtT6av5WXels9WjIM7qz1XD1cWGvX4gGXvp/zBn9nXG02D0utdU3Em2m/QjTnrsk6bBjmCygl3rmj118msQQ==", "dev": true }, "espree": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", - "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.0.tgz", + "integrity": "sha512-dksIWsvKCixn1yrEXO8UosNSxaDoSYpq9reEjZSbHLpT5hpaCAKTLBwq0RHtLrIr+c0ByiYzWT8KTMRzoRCNlw==", "dev": true, "requires": { - "acorn": "^7.1.1", + "acorn": "^7.4.0", "acorn-jsx": "^5.2.0", - "eslint-visitor-keys": "^1.1.0" + "eslint-visitor-keys": "^1.3.0" + }, + "dependencies": { + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true + } } }, "esprima": { @@ -1312,20 +1603,28 @@ }, "dependencies": { "estraverse": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.1.0.tgz", - "integrity": "sha512-FyohXK+R0vE+y1nHLoBM7ZTyqRpqAlhdZHCWIWEviFLiGB8b04H6bQs8G+XTthacvT8VuwvteiP7RJSxMs8UEw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", "dev": true } } }, "esrecurse": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", - "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "requires": { - "estraverse": "^4.1.0" + "estraverse": "^5.2.0" + }, + "dependencies": { + "estraverse": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "dev": true + } } }, "estraverse": { @@ -1345,6 +1644,20 @@ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" }, + "execa": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "requires": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + }, "express": { "version": "4.17.1", "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", @@ -1380,6 +1693,18 @@ "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" + }, + "dependencies": { + "qs": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", + "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } } }, "extend": { @@ -1387,17 +1712,6 @@ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" }, - "external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "dev": true, - "requires": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - } - }, "extsprintf": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", @@ -1425,18 +1739,9 @@ "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" }, "fecha": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fecha/-/fecha-2.3.3.tgz", - "integrity": "sha512-lUGBnIamTAwk4znq5BcqsDaxSmZ9nDVJaij6NvRt/Tg4R69gERA+otPKbS86ROw9nxVMw2/mp1fnaiWqbs6Sdg==" - }, - "figures": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", - "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.0.tgz", + "integrity": "sha512-aN3pcx/DSmtyoovUudctc8+6Hl4T+hI9GBBHLjA76jdZl7+b1sgh5g4k+u/GL3dTy1/pnYzKp69FpJ0OicE3Wg==" }, "file-entry-cache": { "version": "5.0.1", @@ -1470,19 +1775,14 @@ "unpipe": "~1.0.0" } }, - "find-root": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", - "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", - "dev": true - }, "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, "requires": { - "locate-path": "^3.0.0" + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" } }, "flat": { @@ -1492,14 +1792,6 @@ "dev": true, "requires": { "is-buffer": "~2.0.3" - }, - "dependencies": { - "is-buffer": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz", - "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A==", - "dev": true - } } }, "flat-cache": { @@ -1519,6 +1811,11 @@ "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", "dev": true }, + "fn.name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", + "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==" + }, "forever-agent": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", @@ -1544,23 +1841,30 @@ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" }, + "fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "requires": { + "minipass": "^3.0.0" + } + }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "fsevents": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.2.tgz", - "integrity": "sha512-R4wDiBwZ0KzpgOWetKDug1FZcYhqYnUYKtfZYt4mD5SBz76q0KR4Q9o7GIPamsVPGmW3EYPPJ0dOOjvx32ldZA==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", + "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", "dev": true, "optional": true }, "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, "functional-red-black-tree": { "version": "1.0.1", @@ -1579,10 +1883,21 @@ "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", "dev": true }, + "get-intrinsic": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.0.1.tgz", + "integrity": "sha512-ZnWP+AmS1VUaLgTRy47+zKtjTxz+0xMpx3I52i+aalBK1QP19ggLF3Db89KJX7kjfOfP2eoa01qc++GwPgufPg==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1" + } + }, "get-stdin": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-7.0.0.tgz", - "integrity": "sha512-zRKcywvrXlXsA0v0i9Io4KDRaAw7+a1ZpjRwl9Wox8PFlVCCHra7E9c4kqXCoCM9nR5tBkaTTZRBoCm60bFqTQ==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", + "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", "dev": true }, "get-stream": { @@ -1615,36 +1930,39 @@ } }, "glob-parent": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.0.tgz", - "integrity": "sha512-qjtRgnIVmOfnKUE3NJAQEdk+lKrxfw8t5ke7SXtfMTHcjsBfOfWXCQfdb30zfDoZQ2IRSIiidmjtbHZPZ++Ihw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", + "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", "dev": true, "requires": { "is-glob": "^4.0.1" } }, "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "dev": true, + "requires": { + "type-fest": "^0.8.1" + } }, "got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "requires": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" + "version": "11.8.0", + "resolved": "https://registry.npmjs.org/got/-/got-11.8.0.tgz", + "integrity": "sha512-k9noyoIIY9EejuhaBNLyZ31D5328LeqnyPNXJQb2XlJZcKakLqN5m6O/ikhq/0lw56kUYS54fVm+D1x57YC9oQ==", + "requires": { + "@sindresorhus/is": "^4.0.0", + "@szmarczak/http-timer": "^4.0.5", + "@types/cacheable-request": "^6.0.1", + "@types/responselike": "^1.0.0", + "cacheable-lookup": "^5.0.3", + "cacheable-request": "^7.0.1", + "decompress-response": "^6.0.0", + "http2-wrapper": "^1.0.0-beta.5.2", + "lowercase-keys": "^2.0.0", + "p-cancelable": "^2.0.0", + "responselike": "^2.0.0" } }, "graceful-fs": { @@ -1708,11 +2026,11 @@ "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" }, "har-validator": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", - "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", "requires": { - "ajv": "^6.5.5", + "ajv": "^6.12.3", "har-schema": "^2.0.0" } }, @@ -1720,22 +2038,20 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, "requires": { "function-bind": "^1.1.1" } }, "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true }, "has-symbols": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", - "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", - "dev": true + "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==" }, "he": { "version": "1.2.0", @@ -1776,6 +2092,15 @@ "sshpk": "^1.7.0" } }, + "http2-wrapper": { + "version": "1.0.0-beta.5.2", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.0-beta.5.2.tgz", + "integrity": "sha512-xYz9goEyBnC8XwXDTuC/MZ6t+MrKVQZOk4s7+PaDkwIsQd8IwqvM+0M6bA/2lvG8GHXcPdf+MejTUeO2LCPCeQ==", + "requires": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.0.0" + } + }, "iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -1791,9 +2116,9 @@ "dev": true }, "import-fresh": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", - "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", "dev": true, "requires": { "parent-module": "^1.0.0", @@ -1825,83 +2150,21 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" }, - "inquirer": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.5.2.tgz", - "integrity": "sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==", + "internal-slot": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.2.tgz", + "integrity": "sha512-2cQNfwhAfJIkU4KZPkDI+Gj5yNNnbqi40W9Gge6dfnk4TocEVm00B3bdiL+JINrbGJil2TeHvM4rETGzk/f/0g==", "dev": true, "requires": { - "ansi-escapes": "^3.2.0", - "chalk": "^2.4.2", - "cli-cursor": "^2.1.0", - "cli-width": "^2.0.0", - "external-editor": "^3.0.3", - "figures": "^2.0.0", - "lodash": "^4.17.12", - "mute-stream": "0.0.7", - "run-async": "^2.2.0", - "rxjs": "^6.4.0", - "string-width": "^2.1.0", - "strip-ansi": "^5.1.0", - "through": "^2.3.6" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "dev": true - } - } - } + "es-abstract": "^1.17.0-next.1", + "has": "^1.0.3", + "side-channel": "^1.0.2" } }, "interpret": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.2.0.tgz", - "integrity": "sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==" + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" }, "invert-kv": { "version": "1.0.0", @@ -1913,6 +2176,12 @@ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" }, + "is-arguments": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.0.4.tgz", + "integrity": "sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA==", + "dev": true + }, "is-arrayish": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", @@ -1927,17 +2196,30 @@ "binary-extensions": "^2.0.0" } }, - "is-callable": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.5.tgz", - "integrity": "sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q==", + "is-buffer": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz", + "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A==", "dev": true }, + "is-callable": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.2.tgz", + "integrity": "sha512-dnMqspv5nU3LoewK2N/y7KLtxtakvTuaCsU9FU50/QDmdbHNy/4/JuRtMHqRU22o3q+W89YQndQEeCVwK+3qrA==" + }, + "is-core-module": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.1.0.tgz", + "integrity": "sha512-YcV7BgVMRFRua2FqQzKtTDMz8iCuLEyGKjr70q8Zm1yy2qKcurbFEd79PAdHV77oL3NrAaOVQIbMmiHQCHB7ZA==", + "dev": true, + "requires": { + "has": "^1.0.3" + } + }, "is-date-object": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", - "dev": true + "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==" }, "is-extglob": { "version": "2.1.1", @@ -1962,29 +2244,43 @@ "is-extglob": "^2.1.1" } }, + "is-map": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.1.tgz", + "integrity": "sha512-T/S49scO8plUiAOA2DBTBG3JHpn1yiw0kRp6dgiZ0v2/6twi5eiB0rHtHFH9ZIrvlWc6+4O+m4zg5+Z833aXgw==", + "dev": true + }, + "is-negative-zero": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.0.tgz", + "integrity": "sha1-lVOxIbD6wohp2p7UWeIMdUN4hGE=" + }, "is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true }, - "is-plain-object": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-3.0.0.tgz", - "integrity": "sha512-tZIpofR+P05k8Aocp7UI/2UTa9lTJSebCXpFFoR9aibpokDj/uXBsJ8luUu0tTVYKkMU6URDUuOfJZ7koewXvg==", - "requires": { - "isobject": "^4.0.0" - } + "is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", + "dev": true }, "is-regex": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.5.tgz", - "integrity": "sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ==", - "dev": true, + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.1.tgz", + "integrity": "sha512-1+QkEcxiLlB7VEyFtyBg94e08OAsvq7FUBgApTq/w2ymCLyKJgDPsybBENVtA7XCQEgEXxKPonG+mvYRxh/LIg==", "requires": { - "has": "^1.0.3" + "has-symbols": "^1.0.1" } }, + "is-set": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.1.tgz", + "integrity": "sha512-eJEzOtVyenDs1TMzSQ3kU3K+E0GUS9sno+F0OBT97xsgcJsF9nXMBtkT9/kut5JEpM7oL7X/0qxR17K3mcwIAA==", + "dev": true + }, "is-stream": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", @@ -2000,7 +2296,6 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", - "dev": true, "requires": { "has-symbols": "^1.0.1" } @@ -2018,13 +2313,7 @@ "isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", - "dev": true - }, - "isobject": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-4.0.0.tgz", - "integrity": "sha512-S/2fF5wH8SJA/kmwr6HYhK/RI/OkhD84k8ntalo0iJjZikgq1XFvR5M8NPT1x5F7fBwCG3qHfnzeP/Vh/ZxCUA==" + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" }, "isomorphic-ws": { "version": "4.0.1", @@ -2036,10 +2325,26 @@ "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" }, + "iterate-iterator": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/iterate-iterator/-/iterate-iterator-1.0.1.tgz", + "integrity": "sha512-3Q6tudGN05kbkDQDI4CqjaBf4qf85w6W6GnuZDtUVYwKgtC1q8yxYX7CZed7N+tLzQqS6roujWvszf13T+n9aw==", + "dev": true + }, + "iterate-value": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/iterate-value/-/iterate-value-1.0.2.tgz", + "integrity": "sha512-A6fMAio4D2ot2r/TYzr4yUWrmwNdsN5xL7+HUiyACE4DXm+q8HtPcnFTp+NnW3k4N05tZ7FVYFFb2CR13NxyHQ==", + "dev": true, + "requires": { + "es-get-iterator": "^1.0.2", + "iterate-iterator": "^1.0.1" + } + }, "jose": { - "version": "1.27.0", - "resolved": "https://registry.npmjs.org/jose/-/jose-1.27.0.tgz", - "integrity": "sha512-SxYPCM9pWDaK070CXbxgL4ktVzLlE0yJxevDJtbWxv2WMQwYfpBZLYlG8PhChsiOfOXp6FrceRgTuZh1vZeDlg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/jose/-/jose-2.0.3.tgz", + "integrity": "sha512-L+RlDgjO0Tk+Ki6/5IXCSEnmJCV8iMFZoBuEgu2vPQJJ4zfG/k3CAqZUMKDYNRHIDyy0QidJpOvX0NgpsAqFlw==", "requires": { "@panva/asn1.js": "^1.0.0" } @@ -2065,9 +2370,9 @@ "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" }, "json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" }, "json-parse-better-errors": { "version": "1.0.2", @@ -2091,16 +2396,20 @@ "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", "dev": true }, - "json-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-stream/-/json-stream-1.0.0.tgz", - "integrity": "sha1-GjhU4o0rvuqzHMfd9oPS3cVlJwg=" - }, "json-stringify-safe": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "dev": true, + "requires": { + "minimist": "^1.2.0" + } + }, "jsonpath-plus": { "version": "0.19.0", "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-0.19.0.tgz", @@ -2118,73 +2427,47 @@ } }, "jsx-ast-utils": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-2.3.0.tgz", - "integrity": "sha512-3HNoc7nZ1hpZIKB3hJ7BlFRkzCx2BynRtfSwbkqZdpRdvAPsGMnzclPwrvDBS7/lalHTj21NwIeaEpysHBOudg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.1.0.tgz", + "integrity": "sha512-d4/UOjg+mxAWxCiF0c5UTSwyqbchkbqCvK87aBovhnh8GtysTjWmgC63tY0cJx/HzGgm9qnA147jVBdpOiQ2RA==", "dev": true, "requires": { "array-includes": "^3.1.1", - "object.assign": "^4.1.0" + "object.assign": "^4.1.1" + }, + "dependencies": { + "object.assign": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" + } + } } }, "just-extend": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-4.1.0.tgz", - "integrity": "sha512-ApcjaOdVTJ7y4r08xI5wIqpvwS48Q0PBG4DJROcEkH1f8MdAiNFyFxz3xoL0LWAVwjrwPYZdVHHxhRHcx/uGLA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-4.1.1.tgz", + "integrity": "sha512-aWgeGFW67BP3e5181Ep1Fv2v8z//iBJfrvyTnq8wG86vEESwmonn1zPBJ0VfmT9CJq2FIT0VsETtrNFm2a+SHA==", "dev": true }, "keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "requires": { - "json-buffer": "3.0.0" - } - }, - "kubernetes-client": { - "version": "8.3.7", - "resolved": "https://registry.npmjs.org/kubernetes-client/-/kubernetes-client-8.3.7.tgz", - "integrity": "sha512-A0rvfQAvwAuPTooBOSErpTcnwcQxhkmawjOm/gUdGDWCUZoYmAVgVGFnc/klda+X1tvHwleavDsLqmqaYscH2w==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.0.3.tgz", + "integrity": "sha512-zdGa2TOpSZPq5mU6iowDARnMBZgtCqJ11dJROFi6tg6kTn4nuUdU09lFyLFSaHrWqpIJ+EBq4E8/Dc0Vx5vLdA==", "requires": { - "@kubernetes/client-node": "0.10.2", - "camelcase": "^6.0.0", - "deepmerge": "^4.2.2", - "depd": "^2.0.0", - "js-yaml": "^3.13.1", - "json-stream": "^1.0.0", - "openid-client": "^3.14.0", - "pump": "^3.0.0", - "qs": "^6.9.0", - "request": "^2.88.2", - "swagger-fluent": "^5.0.1", - "url-join": "^4.0.1", - "ws": "^7.2.3" - }, - "dependencies": { - "camelcase": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.0.0.tgz", - "integrity": "sha512-8KMDF1Vz2gzOq54ONPJS65IvTUaB1cHJ2DMM7MbPmLZljDH1qpzzLsWdiN9pHh6qvkRVDTi/07+eNGch/oLU4w==" - }, - "depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==" - }, - "qs": { - "version": "6.9.4", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.9.4.tgz", - "integrity": "sha512-A1kFqHekCTM7cz0udomYUoYNWjBebHm/5wzU/XqrBRBNWectVH0QIiN+NEcZ0Dte5hvzHwbr8+XQmguPhJ6WdQ==" - } + "json-buffer": "3.0.1" } }, "kuler": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/kuler/-/kuler-1.0.1.tgz", - "integrity": "sha512-J9nVUucG1p/skKul6DU3PUZrhs0LPulNaeUOox0IyXDi8S4CztTHs1gQphhuZmzXG7VOQSf6NJfKuzteQLv9gQ==", - "requires": { - "colornames": "^1.1.1" - } + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", + "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==" }, "lcid": { "version": "1.0.0", @@ -2195,13 +2478,13 @@ } }, "levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dev": true, "requires": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" } }, "load-json-file": { @@ -2217,19 +2500,18 @@ } }, "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" + "p-locate": "^5.0.0" } }, "lodash": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", - "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==" }, "lodash.camelcase": { "version": "4.3.0", @@ -2248,22 +2530,22 @@ "dev": true }, "log-symbols": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-3.0.0.tgz", - "integrity": "sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", + "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", "dev": true, "requires": { - "chalk": "^2.4.2" + "chalk": "^4.0.0" } }, "logform": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/logform/-/logform-2.1.2.tgz", - "integrity": "sha512-+lZh4OpERDBLqjiwDLpAWNQu6KMjnlXH2ByZwCuSqVPJletw0kTWJf5CgSNAUKn1KUkv3m2cUz/LK8zyEy7wzQ==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.2.0.tgz", + "integrity": "sha512-N0qPlqfypFx7UHNn4B3lzS/b0uLqt2hmuoa+PpuXNYgozdJYAyauF5Ky0BWVjrxDlMWiT3qN4zPq3vVAfZy7Yg==", "requires": { "colors": "^1.2.1", "fast-safe-stringify": "^2.0.4", - "fecha": "^2.3.3", + "fecha": "^4.2.0", "ms": "^2.1.1", "triple-beam": "^1.3.0" }, @@ -2290,16 +2572,16 @@ } }, "lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", + "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" }, "lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", "requires": { - "yallist": "^3.0.2" + "yallist": "^4.0.0" } }, "make-error": { @@ -2340,12 +2622,6 @@ "mime-db": "1.44.0" } }, - "mimic-fn": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", - "dev": true - }, "mimic-response": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", @@ -2365,6 +2641,23 @@ "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", "dev": true }, + "minipass": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.3.tgz", + "integrity": "sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg==", + "requires": { + "yallist": "^4.0.0" + } + }, + "minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "requires": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + } + }, "mkdirp": { "version": "0.5.5", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", @@ -2375,35 +2668,36 @@ } }, "mocha": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-7.1.1.tgz", - "integrity": "sha512-3qQsu3ijNS3GkWcccT5Zw0hf/rWvu1fTN9sPvEd81hlwsr30GX2GcDSSoBxo24IR8FelmrAydGC6/1J5QQP4WA==", + "version": "8.1.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.1.3.tgz", + "integrity": "sha512-ZbaYib4hT4PpF4bdSO2DohooKXIn4lDeiYqB+vTmCdr6l2woW0b6H3pf5x4sM5nwQMru9RvjjHYWVGltR50ZBw==", "dev": true, "requires": { - "ansi-colors": "3.2.3", + "ansi-colors": "4.1.1", "browser-stdout": "1.3.1", - "chokidar": "3.3.0", - "debug": "3.2.6", - "diff": "3.5.0", - "escape-string-regexp": "1.0.5", - "find-up": "3.0.0", - "glob": "7.1.3", + "chokidar": "3.4.2", + "debug": "4.1.1", + "diff": "4.0.2", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.6", "growl": "1.10.5", "he": "1.2.0", - "js-yaml": "3.13.1", - "log-symbols": "3.0.0", + "js-yaml": "3.14.0", + "log-symbols": "4.0.0", "minimatch": "3.0.4", - "mkdirp": "0.5.3", - "ms": "2.1.1", - "node-environment-flags": "1.0.6", + "ms": "2.1.2", "object.assign": "4.1.0", - "strip-json-comments": "2.0.1", - "supports-color": "6.0.0", - "which": "1.3.1", + "promise.allsettled": "1.0.2", + "serialize-javascript": "4.0.0", + "strip-json-comments": "3.0.1", + "supports-color": "7.1.0", + "which": "2.0.2", "wide-align": "1.1.3", + "workerpool": "6.0.0", "yargs": "13.3.2", "yargs-parser": "13.1.2", - "yargs-unparser": "1.6.0" + "yargs-unparser": "1.6.1" }, "dependencies": { "ansi-regex": { @@ -2412,6 +2706,15 @@ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", "dev": true }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, "cliui": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", @@ -2424,63 +2727,58 @@ } }, "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", "dev": true, "requires": { "ms": "^2.1.1" } }, - "glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, "is-fullwidth-code-point": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", "dev": true }, - "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", "dev": true, "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" } }, - "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, - "mkdirp": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.3.tgz", - "integrity": "sha512-P+2gwrFqx8lhew375MQHHeTlY8AuOJSrGf0R5ddkEndUkmwpgUob/vQuBD1V22/Cw1/lJr4x+EjllSezBThzBg==", + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "requires": { - "minimist": "^1.2.5" + "p-try": "^2.0.0" } }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", "dev": true }, "string-width": { @@ -2503,6 +2801,15 @@ "ansi-regex": "^4.1.0" } }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, "wrap-ansi": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", @@ -2536,6 +2843,17 @@ "which-module": "^2.0.0", "y18n": "^4.0.0", "yargs-parser": "^13.1.2" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + } } } } @@ -2545,24 +2863,17 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, - "mute-stream": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz", - "integrity": "sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=", - "dev": true - }, "nan": { "version": "2.14.1", "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==" }, "nats": { - "version": "2.0.0-27", - "resolved": "https://registry.npmjs.org/nats/-/nats-2.0.0-27.tgz", - "integrity": "sha512-5uxxGx08/xucEJBz3c11hV36TZgUOunTSPtoJSS7GjY731WDXKE91mxdfP/7YMkLDSBSn0sDXhBkDnt87FFCtQ==", + "version": "2.0.0-209", + "resolved": "https://registry.npmjs.org/nats/-/nats-2.0.0-209.tgz", + "integrity": "sha512-lHYqr+wtzj2UonFnkOzfTiYhK5aVr+UYrlH7rApfR3+ZFx1vpbNLdZcGg03p++A05gzmtKGXgZfJyQ12VSTHbQ==", "requires": { - "nuid": "^1.1.4", - "ts-nkeys": "^1.0.16" + "nkeys.js": "^1.0.0-5" } }, "natural-compare": { @@ -2579,13 +2890,12 @@ "nice-try": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", - "dev": true + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" }, "nise": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/nise/-/nise-4.0.3.tgz", - "integrity": "sha512-EGlhjm7/4KvmmE6B/UFsKh7eHykRl9VH+au8dduHLCyWUO/hr7+N+WtTvDUwc9zHuM1IaIJs/0lQ6Ag1jDkQSg==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/nise/-/nise-4.0.4.tgz", + "integrity": "sha512-bTTRUNlemx6deJa+ZyoCUTRvH3liK5+N6VQZ4NIw90AgDXY6iPnsqplNFf6STcj+ePk0H/xqxnP75Lr0J0Fq3A==", "dev": true, "requires": { "@sinonjs/commons": "^1.7.0", @@ -2612,14 +2922,25 @@ } } }, - "node-environment-flags": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/node-environment-flags/-/node-environment-flags-1.0.6.tgz", - "integrity": "sha512-5Evy2epuL+6TM0lCQGpFIj6KwiEsGh1SrHUhTbNX+sLbBtjidPZFAnVK9y5yU1+h//RitLbRHTIMyxQPtxMdHw==", - "dev": true, + "nkeys.js": { + "version": "1.0.0-6", + "resolved": "https://registry.npmjs.org/nkeys.js/-/nkeys.js-1.0.0-6.tgz", + "integrity": "sha512-DctD6XECr3NYfWs2CvcwoerY6zo3pWG83JiPaLjjDLZg+CnQOd1AXYCFBxMcSEZyypHh+M7GBFgP0He8QC/ndw==", "requires": { - "object.getownpropertydescriptors": "^2.0.3", - "semver": "^5.7.0" + "@types/node": "^14.0.26", + "tweetnacl": "^1.0.3" + }, + "dependencies": { + "@types/node": { + "version": "14.11.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.11.5.tgz", + "integrity": "sha512-jVFzDV6NTbrLMxm4xDSIW/gKnk8rQLF9wAzLWIOg+5nU6ACrIMndeBdXci0FGtqJbP9tQvm6V39eshc96TO2wQ==" + }, + "tweetnacl": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", + "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" + } } }, "normalize-package-data": { @@ -2645,10 +2966,13 @@ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz", "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==" }, - "nuid": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/nuid/-/nuid-1.1.4.tgz", - "integrity": "sha512-PXiYyHhGfrq8H4g5HyC8enO1lz6SBe5z6x1yx/JG4tmADzDGJVQy3l1sRf3VtEvPsN8dGn9hRFRwDKWL62x0BA==" + "npm-run-path": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", + "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", + "requires": { + "path-key": "^2.0.0" + } }, "number-is-nan": { "version": "1.0.1", @@ -2672,16 +2996,14 @@ "integrity": "sha512-JPKn0GMu+Fa3zt3Bmr66JhokJU5BaNBIh4ZeTlaCBzrBsOeXzwcKKAK1tbLiPKgvwmPXsDvvLHoWh5Bm7ofIYg==" }, "object-inspect": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.7.0.tgz", - "integrity": "sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw==", - "dev": true + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.8.0.tgz", + "integrity": "sha512-jLdtEOB112fORuypAyl/50VRVIBIdVQOSUUGQHzJ4xBSbit81zRarz7GThkEFZy1RceYrWYcPcBFPQwHyAc1gA==" }, "object-keys": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==" }, "object.assign": { "version": "4.1.0", @@ -2704,27 +3026,6 @@ "define-properties": "^1.1.3", "es-abstract": "^1.17.5", "has": "^1.0.3" - }, - "dependencies": { - "es-abstract": { - "version": "1.17.5", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.5.tgz", - "integrity": "sha512-BR9auzDbySxOcfog0tLECW8l28eRGpDpU3Dm3Hp4q/N+VtLTmyj4EUN088XZWQDW/hzj6sYRDXeOFsaAODKvpg==", - "dev": true, - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.1.5", - "is-regex": "^1.0.5", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimleft": "^2.1.1", - "string.prototype.trimright": "^2.1.1" - } - } } }, "object.fromentries": { @@ -2739,16 +3040,6 @@ "has": "^1.0.3" } }, - "object.getownpropertydescriptors": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.0.tgz", - "integrity": "sha512-Z53Oah9A3TdLoblT7VKJaTDdXdT+lQO+cNpKVnya5JDe9uLvzu1YyY1yFDFrcxrlRgWrEFH0jJtD/IbuwjcEVg==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1" - } - }, "object.values": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.1.tgz", @@ -2783,30 +3074,22 @@ } }, "one-time": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/one-time/-/one-time-0.0.4.tgz", - "integrity": "sha1-+M33eISCb+Tf+T46nMN7HkSAdC4=" - }, - "onetime": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", - "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", - "dev": true, + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", + "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", "requires": { - "mimic-fn": "^1.0.0" + "fn.name": "1.x.x" } }, "openid-client": { - "version": "3.15.1", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-3.15.1.tgz", - "integrity": "sha512-USoxzLuL08IhRiA+4z5FW25nsLgBM6lOoh+U/XWqyKJzrMbjfmVWNfof7706RgMypyvAFcAPCxPtSFqb+GpHjA==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-4.2.1.tgz", + "integrity": "sha512-07eOcJeMH3ZHNvx5DVMZQmy3vZSTQqKSSunbtM1pXb+k5LBPi5hMum1vJCFReXlo4wuLEqZ/OgbsZvXPhbGRtA==", "requires": { - "@types/got": "^9.6.9", "base64url": "^3.0.1", - "got": "^9.6.0", - "jose": "^1.25.2", - "lodash": "^4.17.15", - "lru-cache": "^5.1.1", + "got": "^11.8.0", + "jose": "^2.0.2", + "lru-cache": "^6.0.0", "make-error": "^1.3.6", "object-hash": "^2.0.1", "oidc-token-hash": "^5.0.0", @@ -2814,17 +3097,17 @@ } }, "optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", "dev": true, "requires": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" } }, "optjs": { @@ -2840,12 +3123,6 @@ "lcid": "^1.0.0" } }, - "os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", - "dev": true - }, "p-any": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/p-any/-/p-any-3.0.0.tgz", @@ -2853,35 +3130,34 @@ "requires": { "p-cancelable": "^2.0.0", "p-some": "^5.0.0" - }, - "dependencies": { - "p-cancelable": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.0.0.tgz", - "integrity": "sha512-wvPXDmbMmu2ksjkB4Z3nZWTSkJEb9lqVdMaCKpZUGJG9TMiNp9XcbG3fn9fPKjem04fJMJnXoyFPk2FmgiaiNg==" - } } }, "p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.0.0.tgz", + "integrity": "sha512-wvPXDmbMmu2ksjkB4Z3nZWTSkJEb9lqVdMaCKpZUGJG9TMiNp9XcbG3fn9fPKjem04fJMJnXoyFPk2FmgiaiNg==" + }, + "p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" }, "p-limit": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.1.tgz", - "integrity": "sha512-85Tk+90UCVWvbDavCLKPOLC9vvY8OwEX/RtKF+/1OADJMVlFfEHOiMTPVyxg7mk/dKa+ipdHm0OUkTvCpMTuwg==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.0.2.tgz", + "integrity": "sha512-iwqZSOoWIW+Ew4kAGUlN16J4M7OB3ysMLSZtnhmqx7njIHFPlxWBX8xo3lVTyFVq6mI/lL9qt2IsN1sHwaxJkg==", + "dev": true, "requires": { "p-try": "^2.0.0" } }, "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, "requires": { - "p-limit": "^2.0.0" + "p-limit": "^3.0.2" } }, "p-some": { @@ -2891,19 +3167,13 @@ "requires": { "aggregate-error": "^3.0.0", "p-cancelable": "^2.0.0" - }, - "dependencies": { - "p-cancelable": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.0.0.tgz", - "integrity": "sha512-wvPXDmbMmu2ksjkB4Z3nZWTSkJEb9lqVdMaCKpZUGJG9TMiNp9XcbG3fn9fPKjem04fJMJnXoyFPk2FmgiaiNg==" - } } }, "p-try": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true }, "parent-module": { "version": "1.0.1", @@ -2929,9 +3199,9 @@ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" }, "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true }, "path-is-absolute": { @@ -2942,8 +3212,7 @@ "path-key": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "dev": true + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { "version": "1.0.6", @@ -2976,9 +3245,9 @@ "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, "picomatch": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.1.tgz", - "integrity": "sha512-ISBaA8xQNmwELC7eOjqFKMESB2VIqt4PPDD0nsS95b/9dZXvVKOlz9keMSnoGGKcOHXfTvDD6WMaRoSc9UuhRA==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", + "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", "dev": true }, "pify": { @@ -2997,6 +3266,15 @@ "load-json-file": "^5.2.0" }, "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, "load-json-file": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", @@ -3010,6 +3288,34 @@ "type-fest": "^0.3.0" } }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, "parse-json": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", @@ -3020,25 +3326,26 @@ "json-parse-better-errors": "^1.0.1" } }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, "pify": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", "dev": true + }, + "type-fest": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", + "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", + "dev": true } } }, - "pkg-config": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pkg-config/-/pkg-config-1.1.1.tgz", - "integrity": "sha1-VX7yLXPaPIg3EHdmxS6tq94pj+Q=", - "dev": true, - "requires": { - "debug-log": "^1.0.0", - "find-root": "^1.0.0", - "xtend": "^4.0.1" - } - }, "pkg-dir": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", @@ -3090,20 +3397,21 @@ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true } } }, "prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true }, - "prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" - }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -3115,6 +3423,19 @@ "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", "dev": true }, + "promise.allsettled": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/promise.allsettled/-/promise.allsettled-1.0.2.tgz", + "integrity": "sha512-UpcYW5S1RaNKT6pd+s9jp9K9rlQge1UXKskec0j6Mmuq7UJCvlS2J2/s/yuPN8ehftf9HXMxWlKiPbGGUzpoRg==", + "dev": true, + "requires": { + "array.prototype.map": "^1.0.1", + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1", + "function-bind": "^1.1.1", + "iterate-value": "^1.0.0" + } + }, "prop-types": { "version": "15.7.2", "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz", @@ -3127,9 +3448,9 @@ } }, "protobufjs": { - "version": "6.9.0", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.9.0.tgz", - "integrity": "sha512-LlGVfEWDXoI/STstRDdZZKb/qusoAWUnmLg9R8OLSO473mBLWHowx8clbX5/+mKDEI+v7GzjoK9tRPZMMcoTrg==", + "version": "6.10.1", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.10.1.tgz", + "integrity": "sha512-pb8kTchL+1Ceg4lFd5XUpK8PdWacbvV5SK2ULH2ebrYtl4GjJmS24m6CKME67jzV53tbJxHlnNOSqQHbTsR9JQ==", "requires": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -3175,9 +3496,23 @@ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, "qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + }, + "quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==" + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } }, "range-parser": { "version": "1.2.1", @@ -3264,6 +3599,12 @@ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true } } }, @@ -3278,12 +3619,12 @@ } }, "readdirp": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.2.0.tgz", - "integrity": "sha512-crk4Qu3pmXwgxdSgGhgA/eXiJAPQiX4GMOZZMXnqKxHX7TaoL+3gQVo/WeuAiogr07DpnfjIMpXXa+PAIvwPGQ==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.4.0.tgz", + "integrity": "sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ==", "dev": true, "requires": { - "picomatch": "^2.0.4" + "picomatch": "^2.2.1" } }, "rechoir": { @@ -3294,10 +3635,20 @@ "resolve": "^1.1.6" } }, + "regexp.prototype.flags": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.0.tgz", + "integrity": "sha512-2+Q0C5g951OlYlJz6yu5/M33IcsESLlLfsyIaLJaG4FA2r4yP8MvVMJUUP/fVBkSpbbbZlS5gynbEWLipiiXiQ==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1" + } + }, "regexpp": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", - "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", + "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", "dev": true }, "request": { @@ -3335,12 +3686,7 @@ "asynckit": "^0.4.0", "combined-stream": "^1.0.6", "mime-types": "^2.1.12" - } - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + } } } }, @@ -3352,7 +3698,8 @@ "require-main-filename": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true }, "resolve": { "version": "1.17.0", @@ -3362,6 +3709,11 @@ "path-parse": "^1.0.6" } }, + "resolve-alpn": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.0.0.tgz", + "integrity": "sha512-rTuiIEqFmGxne4IovivKSDzld2lWW9QCjqv80SYjPgf+gS35eaCAjaP54CCwGAwBtnCsvNLYtqxe1Nw+i6JEmA==" + }, "resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -3369,23 +3721,18 @@ "dev": true }, "responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "requires": { - "lowercase-keys": "^1.0.0" - } - }, - "restore-cursor": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", - "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", - "dev": true, + "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.0.tgz", + "integrity": "sha512-xH48u3FTB9VsZw7R+vvgaKeLKzT6jOogbQhEe/jewwnZgzPcnyWui2Av6JpoYZF/91uueC+lqhWqeURw5/qhCw==", "requires": { - "onetime": "^2.0.0", - "signal-exit": "^3.0.2" + "lowercase-keys": "^2.0.0" } }, + "rfc4648": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.4.0.tgz", + "integrity": "sha512-3qIzGhHlMHA6PoT6+cdPKZ+ZqtxkIvg8DZGKA5z6PQ33/uuhoJ+Ws/D/J9rXW6gXodgH8QYlz2UCl+sdUDmNIg==" + }, "rimraf": { "version": "2.6.3", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", @@ -3395,31 +3742,10 @@ "glob": "^7.1.3" } }, - "run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", - "dev": true - }, - "run-parallel": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.9.tgz", - "integrity": "sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q==", - "dev": true - }, - "rxjs": { - "version": "6.5.5", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.5.tgz", - "integrity": "sha512-WfQI+1gohdf0Dai/Bbmk5L5ItH5tYqm3ki2c5GdWhKjalzjg93N3avFjVStyZZz+A2Em+ZxKH5bNghw9UeylGQ==", - "dev": true, - "requires": { - "tslib": "^1.9.0" - } - }, "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" }, "safer-buffer": { "version": "2.1.2", @@ -3427,28 +3753,27 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "semistandard": { - "version": "14.2.0", - "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-14.2.0.tgz", - "integrity": "sha512-mQ0heTpbW7WWBXKOIqitlfEcAZhgGTwaHr1zzv70PnZZc53J+4u31+vLUEsh2oKVWfVgcjrykT2hz02B1Cfaaw==", + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/semistandard/-/semistandard-16.0.0.tgz", + "integrity": "sha512-pLETGjFyl0ETMDAEZxkC1OJBmNmPIMpMkayStGTgHMMh/5FM7Rbk5NWc1t7yfQ4PrRURQH8MUg3ZxvojJJifcw==", "dev": true, "requires": { - "eslint": "~6.4.0", - "eslint-config-semistandard": "15.0.0", - "eslint-config-standard": "14.1.0", - "eslint-config-standard-jsx": "8.1.0", - "eslint-plugin-import": "~2.18.0", - "eslint-plugin-node": "~10.0.0", + "eslint": "~7.12.1", + "eslint-config-semistandard": "15.0.1", + "eslint-config-standard": "16.0.0", + "eslint-config-standard-jsx": "10.0.0", + "eslint-plugin-import": "~2.22.1", + "eslint-plugin-node": "~11.1.0", "eslint-plugin-promise": "~4.2.1", - "eslint-plugin-react": "~7.14.2", - "eslint-plugin-standard": "~4.0.0", - "standard-engine": "^12.0.0" + "eslint-plugin-react": "~7.21.5", + "eslint-plugin-standard": "~4.0.2", + "standard-engine": "^14.0.0" } }, "semver": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" }, "send": { "version": "0.17.1", @@ -3477,6 +3802,15 @@ } } }, + "serialize-javascript": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz", + "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", + "dev": true, + "requires": { + "randombytes": "^2.1.0" + } + }, "serve-static": { "version": "1.14.1", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", @@ -3491,7 +3825,8 @@ "set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true }, "setprototypeof": { "version": "1.1.1", @@ -3502,7 +3837,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dev": true, "requires": { "shebang-regex": "^1.0.0" } @@ -3510,8 +3844,7 @@ "shebang-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "dev": true + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" }, "shelljs": { "version": "0.8.4", @@ -3523,11 +3856,54 @@ "rechoir": "^0.6.2" } }, + "side-channel": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.3.tgz", + "integrity": "sha512-A6+ByhlLkksFoUepsGxfj5x1gTSrs+OydsRptUxeNCabQpCFUvcwIczgOigI8vhY/OJCnPnyE9rGiwgvr9cS1g==", + "dev": true, + "requires": { + "es-abstract": "^1.18.0-next.0", + "object-inspect": "^1.8.0" + }, + "dependencies": { + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "dev": true, + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + }, + "object.assign": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" + } + } + } + }, "signal-exit": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", - "dev": true + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" }, "simple-swizzle": { "version": "0.2.2", @@ -3538,41 +3914,18 @@ } }, "sinon": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-9.0.1.tgz", - "integrity": "sha512-iTTyiQo5T94jrOx7X7QLBZyucUJ2WvL9J13+96HMfm2CGoJYbIPqRfl6wgNcqmzk0DI28jeGx5bUTXizkrqBmg==", + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/sinon/-/sinon-9.1.0.tgz", + "integrity": "sha512-9zQShgaeylYH6qtsnNXlTvv0FGTTckuDfHBi+qhgj5PvW2r2WslHZpgc3uy3e/ZAoPkqaOASPi+juU6EdYRYxA==", "dev": true, "requires": { - "@sinonjs/commons": "^1.7.0", - "@sinonjs/fake-timers": "^6.0.0", + "@sinonjs/commons": "^1.7.2", + "@sinonjs/fake-timers": "^6.0.1", "@sinonjs/formatio": "^5.0.1", - "@sinonjs/samsam": "^5.0.3", + "@sinonjs/samsam": "^5.1.0", "diff": "^4.0.2", - "nise": "^4.0.1", + "nise": "^4.0.4", "supports-color": "^7.1.0" - }, - "dependencies": { - "diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } } }, "sleep-promise": { @@ -3591,6 +3944,15 @@ "is-fullwidth-code-point": "^2.0.0" }, "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, "is-fullwidth-code-point": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", @@ -3599,6 +3961,22 @@ } } }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + }, + "source-map-support": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", + "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, "spdx-correct": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", @@ -3626,9 +4004,9 @@ } }, "spdx-license-ids": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", - "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.6.tgz", + "integrity": "sha512-+orQK83kyMva3WyPf59k1+Y525csj5JejicWut55zeTWANuN17qSiSLUXWtzHeNWORSvT7GLDJ/E/XiIWoXBTw==", "dev": true }, "sprintf-js": { @@ -3658,15 +4036,15 @@ "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" }, "standard-engine": { - "version": "12.1.0", - "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-12.1.0.tgz", - "integrity": "sha512-DVJnWM1CGkag4ucFLGdiYWa5/kJURPONmMmk17p8FT5NE4UnPZB1vxWnXnRo2sPSL78pWJG8xEM+1Tu19z0deg==", + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/standard-engine/-/standard-engine-14.0.1.tgz", + "integrity": "sha512-7FEzDwmHDOGva7r9ifOzD3BGdTbA7ujJ50afLVdW/tK14zQEptJjbFuUfn50irqdHDcTbNh0DTIoMPynMCXb0Q==", "dev": true, "requires": { - "deglob": "^4.0.1", - "get-stdin": "^7.0.0", + "get-stdin": "^8.0.0", "minimist": "^1.2.5", - "pkg-conf": "^3.1.0" + "pkg-conf": "^3.1.0", + "xdg-basedir": "^4.0.0" } }, "statuses": { @@ -3674,6 +4052,11 @@ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=" }, + "stream-buffers": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", + "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==" + }, "string-width": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", @@ -3684,24 +4067,36 @@ "strip-ansi": "^3.0.0" } }, - "string.prototype.trimleft": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.1.tgz", - "integrity": "sha512-iu2AGd3PuP5Rp7x2kEZCrB2Nf41ehzh+goo8TV7z8/XDBbsvc6HQIlUl9RjkZ4oyrW1XM5UwlGl1oVEaDjg6Ag==", + "string.prototype.matchall": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.2.tgz", + "integrity": "sha512-N/jp6O5fMf9os0JU3E72Qhf590RSRZU/ungsL/qJUYVTNv7hTG0P/dbPjxINVN9jpscu3nzYwKESU3P3RY5tOg==", "dev": true, "requires": { "define-properties": "^1.1.3", - "function-bind": "^1.1.1" + "es-abstract": "^1.17.0", + "has-symbols": "^1.0.1", + "internal-slot": "^1.0.2", + "regexp.prototype.flags": "^1.3.0", + "side-channel": "^1.0.2" } }, - "string.prototype.trimright": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.1.tgz", - "integrity": "sha512-qFvWL3/+QIgZXVmJBfpHmxLB7xsUXz6HsUmP8+5dRaC3Q7oKUv9Vo6aMCRZC1smrtyECFsIT30PqBJ1gTjAs+g==", - "dev": true, + "string.prototype.trimend": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.1.tgz", + "integrity": "sha512-LRPxFUaTtpqYsTeNKaFOw3R4bxIzWOnbQ837QfBylo8jIxtcbK/A/sMV7Q+OAV/vWo+7s25pOE10KYSjaSO06g==", "requires": { "define-properties": "^1.1.3", - "function-bind": "^1.1.1" + "es-abstract": "^1.17.5" + } + }, + "string.prototype.trimstart": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.1.tgz", + "integrity": "sha512-XxZn+QpvrBI1FOcg6dIpxUPgWCPuNXvMD72aaRaUQv1eD4e/Qy8i/hFTe0BUmD60p/QA6bh1avmuPTfNjqVWRw==", + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" } }, "string_decoder": { @@ -3710,13 +4105,6 @@ "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "requires": { "safe-buffer": "~5.2.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - } } }, "strip-ansi": { @@ -3733,29 +4121,24 @@ "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", "dev": true }, + "strip-eof": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", + "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=" + }, "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz", + "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==", "dev": true }, "supports-color": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.0.0.tgz", - "integrity": "sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", + "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", "dev": true, "requires": { - "has-flag": "^3.0.0" - } - }, - "swagger-fluent": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/swagger-fluent/-/swagger-fluent-5.0.3.tgz", - "integrity": "sha512-i43ADMtPi7dxAN75Lw50SlncMB31FgaVwXqKioR8SWs+Yon2RbiLU1J1PGMXA4N8cSt9Vz5RHzaoKjz/+iW88g==", - "requires": { - "deepmerge": "^4.2.2", - "is-plain-object": "^3.0.0", - "request": "^2.88.0" + "has-flag": "^4.0.0" } }, "table": { @@ -3804,6 +4187,26 @@ } } }, + "tar": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.0.5.tgz", + "integrity": "sha512-0b4HOimQHj9nXNEAA7zWwMM91Zhhba3pspja6sQbgTpynOJf+bkjBnfybNYzbpLbnwXnbyB4LOREvlyXLkCHSg==", + "requires": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^3.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "dependencies": { + "mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" + } + } + }, "text-hex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", @@ -3815,25 +4218,31 @@ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", "dev": true }, - "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", - "dev": true - }, "tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dev": true, + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", + "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", "requires": { - "os-tmpdir": "~1.0.2" + "rimraf": "^3.0.0" + }, + "dependencies": { + "rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "requires": { + "glob": "^7.1.3" + } + } } }, - "to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==" + "tmp-promise": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.2.tgz", + "integrity": "sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA==", + "requires": { + "tmp": "^0.2.0" + } }, "to-regex-range": { "version": "5.0.1", @@ -3863,25 +4272,22 @@ "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" }, - "ts-nkeys": { - "version": "1.0.16", - "resolved": "https://registry.npmjs.org/ts-nkeys/-/ts-nkeys-1.0.16.tgz", - "integrity": "sha512-1qrhAlavbm36wtW+7NtKOgxpzl+70NTF8xlz9mEhiA5zHMlMxjj3sEVKWm3pGZhHXE0Q3ykjrj+OSRVaYw+Dqg==", + "tsconfig-paths": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.9.0.tgz", + "integrity": "sha512-dRcuzokWhajtZWkQsDVKbWyY+jgcLC5sqJhg2PSgf4ZkH2aHPvaOY8YWGhmjb68b5qqTfasSsDO9k7RUiEmZAw==", + "dev": true, "requires": { - "tweetnacl": "^1.0.3" - }, - "dependencies": { - "tweetnacl": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", - "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" - } + "@types/json5": "^0.0.29", + "json5": "^1.0.1", + "minimist": "^1.2.0", + "strip-bom": "^3.0.0" } }, "tslib": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.13.0.tgz", - "integrity": "sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q==" + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" }, "tunnel-agent": { "version": "0.6.0", @@ -3897,12 +4303,12 @@ "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" }, "type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "dev": true, "requires": { - "prelude-ls": "~1.1.2" + "prelude-ls": "^1.2.1" } }, "type-detect": { @@ -3912,9 +4318,9 @@ "dev": true }, "type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", "dev": true }, "type-is": { @@ -3927,21 +4333,15 @@ } }, "typescript": { - "version": "3.9.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.3.tgz", - "integrity": "sha512-D/wqnB2xzNFIcoBG9FG8cXRDjiqSTbG2wd8DMZeQyJlP1vfTkIxH4GKveWaEBYySKIg+USu+E+EDIR47SqnaMQ==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.0.3.tgz", + "integrity": "sha512-tEu6DGxGgRJPb/mVPIZ48e69xCn2yRmCgYmDugAVwmJ6o+0u1RI18eO7E7WBTLYLaEVVOhwQmcdhQHweux/WPg==", "dev": true }, "underscore": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.10.2.tgz", - "integrity": "sha512-N4P+Q/BuyuEKFJ43B9gYuOj4TQUHXX+j2FqguVOpjkssLUUrnJofCcBccJSCoeturDoZU6GorDTHSvUDlSQbTg==" - }, - "uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=", - "dev": true + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.11.0.tgz", + "integrity": "sha512-xY96SsN3NA461qIRKZ/+qox37YXPtSBswMGfiNptr+wrt6ds4HaMw23TP612fEyGekRE6LNRiLYr/aqbHXNedw==" }, "unpipe": { "version": "1.0.0", @@ -3956,19 +4356,6 @@ "punycode": "^2.1.0" } }, - "url-join": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", - "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==" - }, - "url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "requires": { - "prepend-http": "^2.0.0" - } - }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -3985,9 +4372,9 @@ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" }, "v8-compile-cache": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.1.tgz", - "integrity": "sha512-8OQ9CL+VWyt3JStj7HX7/ciTL2V3Rl1Wf5OL+SNTm0yK1KvtReVulksyeRnCANHHuUxHlQig+JJDlUhBt1NQDQ==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.2.0.tgz", + "integrity": "sha512-gTpR5XQNKFwOd4clxfnhaqvfqMpqEwr4tOtCyz4MtYZX2JYhfr1JvBFKdS+7K/9rfpZR3VLX+YWBbKoxCgS43Q==", "dev": true }, "validate-npm-package-license": { @@ -4019,7 +4406,6 @@ "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dev": true, "requires": { "isexe": "^2.0.0" } @@ -4027,7 +4413,8 @@ "which-module": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true }, "wide-align": { "version": "1.1.3", @@ -4044,27 +4431,34 @@ "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=" }, "winston": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/winston/-/winston-3.2.1.tgz", - "integrity": "sha512-zU6vgnS9dAWCEKg/QYigd6cgMVVNwyTzKs81XZtTFuRwJOcDdBg7AU0mXVyNbs7O5RH2zdv+BdNZUlx7mXPuOw==", - "requires": { - "async": "^2.6.1", - "diagnostics": "^1.1.1", - "is-stream": "^1.1.0", - "logform": "^2.1.1", - "one-time": "0.0.4", - "readable-stream": "^3.1.1", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.3.3.tgz", + "integrity": "sha512-oEXTISQnC8VlSAKf1KYSSd7J6IWuRPQqDdo8eoRNaYKLvwSb5+79Z3Yi1lrl6KDpU6/VWaxpakDAtb1oQ4n9aw==", + "requires": { + "@dabh/diagnostics": "^2.0.2", + "async": "^3.1.0", + "is-stream": "^2.0.0", + "logform": "^2.2.0", + "one-time": "^1.0.0", + "readable-stream": "^3.4.0", "stack-trace": "0.0.x", "triple-beam": "^1.3.0", - "winston-transport": "^4.3.0" + "winston-transport": "^4.4.0" + }, + "dependencies": { + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==" + } } }, "winston-transport": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.3.0.tgz", - "integrity": "sha512-B2wPuwUi3vhzn/51Uukcao4dIduEiPOcOt9HJ3QeaXgkJ5Z7UwpBzxS4ZGNHtrxrUvTwemsQiSys0ihOf8Mp1A==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.4.0.tgz", + "integrity": "sha512-Lc7/p3GtqtqPBYYtS6KCN3c77/2QCev51DvcJKbkFPQNoj1sinkGwLGFDxkXY9J6p9+EPnYs+D90uwbnaiURTw==", "requires": { - "readable-stream": "^2.3.6", + "readable-stream": "^2.3.7", "triple-beam": "^1.2.0" }, "dependencies": { @@ -4082,6 +4476,11 @@ "util-deprecate": "~1.0.1" } }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", @@ -4098,6 +4497,12 @@ "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", "dev": true }, + "workerpool": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.0.0.tgz", + "integrity": "sha512-fU2OcNA/GVAJLLyKUoHkAgIhKb0JoCpSjLC/G2vYKxUjVmQwGbRVeoPJ1a8U4pnVofz4AQV5Y/NEw8oKqxEBtA==", + "dev": true + }, "wrap-ansi": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", @@ -4122,20 +4527,20 @@ } }, "ws": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.3.0.tgz", - "integrity": "sha512-iFtXzngZVXPGgpTlP1rBqsUK82p9tKqsWRPg5L56egiljujJT3vGAYnHANvFxBieXrTFavhzhxW52jnaWV+w2w==" + "version": "7.4.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.0.tgz", + "integrity": "sha512-kyFwXuV/5ymf+IXhS6f0+eAFvydbaBW3zjpT6hUdAh/hbVjTIB5EHBGi0bPoCLSK2wcuz3BrEkB9LrYv1Nm4NQ==" }, "wtfnode": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/wtfnode/-/wtfnode-0.8.1.tgz", - "integrity": "sha512-S7S7D8CGHVCtlTn1IWX+nEbxavpL9+fk3vk02RPZHiExyZFb9oKTTig3nEnMCL2yaJ4047V5lAkuulXuO2OsOw==", + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/wtfnode/-/wtfnode-0.8.3.tgz", + "integrity": "sha512-Ll7iH8MbRQTE+QTw20Xax/0PM5VeSVSOhsmoR3+knWuJkEWTV5d9yPO6Sb+IDbt9I4UCrKpvHuF9T9zteRNOuA==", "dev": true }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "xdg-basedir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", + "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", "dev": true }, "y18n": { @@ -4144,26 +4549,22 @@ "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=" }, "yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "yargs": { - "version": "15.3.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.3.1.tgz", - "integrity": "sha512-92O1HWEjw27sBfgmXiixJWT5hRBp2eobqXicLtPBIDBhYB+1HpwZlXmbW2luivBJHBzki+7VyCLRtAkScbTBQA==", + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.0.3.tgz", + "integrity": "sha512-6+nLw8xa9uK1BOEOykaiYAJVh6/CjxWXK/q9b5FpRgNslt8s22F2xMBqVIKgCRjNgGvGPBy8Vog7WN7yh4amtA==", "requires": { - "cliui": "^6.0.0", - "decamelize": "^1.2.0", - "find-up": "^4.1.0", - "get-caller-file": "^2.0.1", + "cliui": "^7.0.0", + "escalade": "^3.0.2", + "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", "string-width": "^4.2.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^18.1.1" + "y18n": "^5.0.1", + "yargs-parser": "^20.0.0" }, "dependencies": { "ansi-regex": { @@ -4172,27 +4573,21 @@ "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" }, "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" - }, "cliui": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", - "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.1.tgz", + "integrity": "sha512-rcvHOWyGyid6I1WjT/3NatKj2kDt9OdSHSXpyLXaMWFbKpGACNW8pRhhdPUq9MWUOdwn8Rz9AVETjF4105rZZQ==", "requires": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", - "wrap-ansi": "^6.2.0" + "wrap-ansi": "^7.0.0" } }, "color-convert": { @@ -4213,41 +4608,11 @@ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, "is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "requires": { - "p-locate": "^4.1.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "requires": { - "p-limit": "^2.2.0" - } - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" - }, "string-width": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", @@ -4267,9 +4632,9 @@ } }, "wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "requires": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -4277,18 +4642,14 @@ } }, "y18n": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", - "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==" + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.2.tgz", + "integrity": "sha512-CkwaeZw6dQgqgPGeTWKMXCRmMcBgETFlTml1+ZOO+q7kGst8NREJ+eWwFNPVUQ4QGdAaklbqCZHH6Zuep1RjiA==" }, "yargs-parser": { - "version": "18.1.3", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } + "version": "20.2.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.1.tgz", + "integrity": "sha512-yYsjuSkjbLMBp16eaOt7/siKTjNVjMm3SoJnIg3sEh/JsvqVVDyjRKmaJV4cl+lNIgq6QEco2i3gDebJl7/vLA==" } } }, @@ -4311,14 +4672,16 @@ } }, "yargs-unparser": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-1.6.0.tgz", - "integrity": "sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-1.6.1.tgz", + "integrity": "sha512-qZV14lK9MWsGCmcr7u5oXGH0dbGqZAIxTDrWXZDo5zUr6b6iUmelNKO6x6R1dQT24AH3LgRxJpr8meWy2unolA==", "dev": true, "requires": { + "camelcase": "^5.3.1", + "decamelize": "^1.2.0", "flat": "^4.1.0", - "lodash": "^4.17.15", - "yargs": "^13.3.0" + "is-plain-obj": "^1.1.0", + "yargs": "^14.2.3" }, "dependencies": { "ansi-regex": { @@ -4327,6 +4690,21 @@ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", "dev": true }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, "cliui": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", @@ -4338,12 +4716,55 @@ "wrap-ansi": "^5.1.0" } }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, "is-fullwidth-code-point": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", "dev": true }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, "string-width": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", @@ -4382,12 +4803,13 @@ "dev": true }, "yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-14.2.3.tgz", + "integrity": "sha512-ZbotRWhF+lkjijC/VhmOT9wSgyBQ7+zr13+YLkhfsSiTriYsMzkTUFP18pFhWwBeMa5gUc1MzbhrO6/VB7c9Xg==", "dev": true, "requires": { "cliui": "^5.0.0", + "decamelize": "^1.2.0", "find-up": "^3.0.0", "get-caller-file": "^2.0.1", "require-directory": "^2.1.1", @@ -4396,7 +4818,17 @@ "string-width": "^3.0.0", "which-module": "^2.0.0", "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" + "yargs-parser": "^15.0.1" + } + }, + "yargs-parser": { + "version": "15.0.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-15.0.1.tgz", + "integrity": "sha512-0OAMV2mAZQrs3FkNpDQcBk1x5HXb8X4twADss4S0Iuk+2dGnLOE/fRHrsYm542GduMveyA77OF4wrNJuanRCWw==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" } } } diff --git a/csi/moac/package.json b/csi/moac/package.json index fca408ba8..c6c9fc6de 100644 --- a/csi/moac/package.json +++ b/csi/moac/package.json @@ -15,7 +15,7 @@ "scripts": { "prepare": "./bundle_protos.sh", "clean": "rm -f replica.js pool.js nexus.js", - "purge": "rm -rf node_modules proto replica.js pool.js nexus.js", + "purge": "rm -rf node_modules proto replica.js pool.js nexus.js watcher.js node_operator.js pool_operator.js volume_operator.js", "compile": "tsc --pretty", "start": "./index.js", "test": "mocha test/index.js", @@ -24,26 +24,28 @@ }, "license": "ISC", "dependencies": { - "@grpc/proto-loader": "^0.5.3", + "@grpc/proto-loader": "^0.5.5", + "@types/lodash": "^4.14.161", + "client-node-fixed-watcher": "^0.13.2", "express": "^4.17.1", "grpc-promise": "^1.4.0", "grpc-uds": "^0.1.4", - "js-yaml": "^3.13.1", - "kubernetes-client": "^8.3.6", - "lodash": "^4.17.19", + "js-yaml": "^3.14.0", + "lodash": "^4.17.20", "nats": "^2.0.0-27", "sleep-promise": "^8.0.1", - "winston": "^3.2.1", - "yargs": "^15.3.1" + "winston": "^3.3.3", + "yargs": "^16.0.3" }, "devDependencies": { "chai": "^4.2.0", "dirty-chai": "^2.0.1", - "mocha": "^7.1.1", - "semistandard": "^14.2.0", - "sinon": "^9.0.1", - "typescript": "^3.9.3", - "wtfnode": "^0.8.1" + "mocha": "^8.1.3", + "semistandard": "^16.0.0", + "sinon": "^9.1.0", + "source-map-support": "^0.5.19", + "typescript": "^4.0.3", + "wtfnode": "^0.8.3" }, "files": [ "*.js", diff --git a/csi/moac/pool_operator.js b/csi/moac/pool_operator.js deleted file mode 100644 index 3ea9d21cd..000000000 --- a/csi/moac/pool_operator.js +++ /dev/null @@ -1,427 +0,0 @@ -// Pool operator monitors k8s pool resources (desired state). It creates -// and destroys pools on storage nodes to reflect the desired state. - -'use strict'; - -const _ = require('lodash'); -const path = require('path'); -const fs = require('fs'); -const yaml = require('js-yaml'); -const log = require('./logger').Logger('pool-operator'); -const Watcher = require('./watcher'); -const EventStream = require('./event_stream'); -const Workq = require('./workq'); -const { FinalizerHelper } = require('./finalizer_helper'); -const poolFinalizerValue = 'finalizer.mayastor.openebs.io'; - -// Load custom resource definition -const crdPool = yaml.safeLoad( - fs.readFileSync(path.join(__dirname, '/crds/mayastorpool.yaml'), 'utf8') -); - -// Pool operator tries to bring the real state of storage pools on mayastor -// nodes in sync with mayastorpool custom resources in k8s. -class PoolOperator { - constructor (namespace) { - this.namespace = namespace; - this.k8sClient = null; // k8s client - this.registry = null; // registry containing info about mayastor nodes - this.eventStream = null; // A stream of node and pool events. - this.resource = {}; // List of storage pool resources indexed by name. - this.watcher = null; // pool CRD watcher. - this.workq = new Workq(); // for serializing pool operations - this.finalizerHelper = new FinalizerHelper( - this.namespace, - crdPool.spec.group, - crdPool.spec.version, - crdPool.spec.names.plural - ); - } - - // Create pool CRD if it doesn't exist and augment client object so that CRD - // can be manipulated as any other standard k8s api object. - // Bind node operator to pool operator through events. - // - // @param {object} k8sClient Client for k8s api server. - // @param {object} registry Registry with node and pool information. - // - async init (k8sClient, registry) { - log.info('Initializing pool operator'); - - try { - await k8sClient.apis[ - 'apiextensions.k8s.io' - ].v1beta1.customresourcedefinitions.post({ body: crdPool }); - log.info('Created CRD ' + crdPool.spec.names.kind); - } catch (err) { - // API returns a 409 Conflict if CRD already exists. - if (err.statusCode !== 409) throw err; - } - k8sClient.addCustomResourceDefinition(crdPool); - - this.k8sClient = k8sClient; - this.registry = registry; - this.watcher = new Watcher( - 'pool', - this.k8sClient.apis['openebs.io'].v1alpha1.namespaces( - this.namespace - ).mayastorpools, - this.k8sClient.apis['openebs.io'].v1alpha1.watch.namespaces( - this.namespace - ).mayastorpools, - this._filterMayastorPool - ); - } - - // Convert pool CRD to an object with specification of the pool. - // - // @param {object} msp MayaStor pool custom resource. - // @returns {object} Pool properties defining a pool. - // - _filterMayastorPool (msp) { - const props = { - name: msp.metadata.name, - node: msp.spec.node, - disks: msp.spec.disks - }; - // sort the disks for easy string to string comparison - props.disks.sort(); - return props; - } - - // Start pool operator's watcher loop. - // - // NOTE: Not getting the start sequence right can have catastrophic - // consequence leading to unintended pool destruction and data loss - // (i.e. when node info is available before the pool CRD is). - // - // The right order of steps is: - // 1. Get pool resources - // 2. Get info about pools on storage nodes - async start () { - var self = this; - - // get pool k8s resources for initial synchronization and install - // event handlers to follow changes to them. - await self.watcher.start(); - self._bindWatcher(self.watcher); - self.watcher.list().forEach((r) => { - const poolName = r.name; - log.debug(`Reading pool custom resource "${poolName}"`); - self.resource[poolName] = r; - }); - - // this will start async processing of node and pool events - self.eventStream = new EventStream({ registry: self.registry }); - self.eventStream.on('data', async (ev) => { - if (ev.kind === 'pool') { - await self.workq.push(ev, self._onPoolEvent.bind(self)); - } else if (ev.kind === 'node' && (ev.eventType === 'sync' || ev.eventType === 'mod')) { - await self.workq.push(ev.object.name, self._onNodeSyncEvent.bind(self)); - } else if (ev.kind === 'replica' && (ev.eventType === 'new' || ev.eventType === 'del')) { - await self.workq.push(ev, self._onReplicaEvent.bind(self)); - } - }); - } - - // Handler for new/mod/del pool events - // - // @param {object} ev Pool event as received from event stream. - // - async _onPoolEvent (ev) { - const name = ev.object.name; - const resource = this.resource[name]; - - log.debug(`Received "${ev.eventType}" event for pool "${name}"`); - - if (ev.eventType === 'new') { - if (!resource) { - log.warn(`Unknown pool "${name}" will be destroyed`); - await this._destroyPool(name); - } else { - await this._updateResource(ev.object); - } - } else if (ev.eventType === 'mod') { - await this._updateResource(ev.object); - } else if (ev.eventType === 'del' && resource) { - log.warn(`Recreating destroyed pool "${name}"`); - await this._createPool(resource); - } - } - - // Handler for node sync event. - // - // Either the node is new or came up after an outage - check that we - // don't have any pending pools waiting to be created on it. - // - // @param {string} nodeName Name of the new node. - // - async _onNodeSyncEvent (nodeName) { - log.debug(`Syncing pool records for node "${nodeName}"`); - - const resources = Object.values(this.resource).filter( - (ent) => ent.node === nodeName - ); - for (let i = 0; i < resources.length; i++) { - await this._createPool(resources[i]); - } - } - - // Handler for new/del replica events - // - // @param {object} ev Replica event as received from event stream. - // - async _onReplicaEvent (ev) { - const replica = ev.object; - - log.debug(`Received "${ev.eventType}" event for replica "${replica.name}"`); - - if (replica.pool === undefined) { - log.warn(`not processing for finalizers: pool not defined for replica ${replica.name}.`); - return; - } - - const pool = this.registry.getPool(replica.pool.name); - if (pool == null) { - log.warn(`not processing for finalizers: failed to retrieve pool ${replica.pool.name}`); - return; - } - - log.debug(`On "${ev.eventType}" event for replica "${replica.name}", replica count=${pool.replicas.length}`); - - if (pool.replicas.length > 0) { - this.finalizerHelper.addFinalizerToCR(replica.pool.name, poolFinalizerValue); - } else { - this.finalizerHelper.removeFinalizerFromCR(replica.pool.name, poolFinalizerValue); - } - } - - // Stop the watcher, destroy event stream and reset resource cache. - async stop () { - this.watcher.removeAllListeners(); - await this.watcher.stop(); - this.eventStream.destroy(); - this.eventStream = null; - this.resource = {}; - } - - // Bind watcher's new/mod/del events to pool operator's callbacks. - // - // @param {object} watcher k8s pool resource watcher. - // - _bindWatcher (watcher) { - var self = this; - watcher.on('new', (resource) => { - self.workq.push(resource, self._createPool.bind(self)); - }); - watcher.on('mod', (resource) => { - self.workq.push(resource, self._modifyPool.bind(self)); - }); - watcher.on('del', (resource) => { - self.workq.push(resource.name, self._destroyPool.bind(self)); - }); - } - - // Create a pool according to the specification. - // That includes parameters checks, node lookup and a call to registry - // to create the pool. - // - // @param {object} resource Pool resource properties. - // @param {string} resource.name Pool name. - // @param {string} resource.node Node name for the pool. - // @param {string[]} resource.disks Disks comprising the pool. - // - async _createPool (resource) { - const name = resource.name; - const nodeName = resource.node; - this.resource[name] = resource; - - let pool = this.registry.getPool(name); - if (pool) { - // the pool already exists, just update its properties in k8s - await this._updateResource(pool); - return; - } - - const node = this.registry.getNode(nodeName); - if (!node) { - const msg = `mayastor does not run on node "${nodeName}"`; - log.error(`Cannot create pool "${name}": ${msg}`); - await this._updateResourceProps(name, 'pending', msg); - return; - } - if (!node.isSynced()) { - log.debug( - `The pool "${name}" will be synced when the node "${nodeName}" is synced` - ); - return; - } - - // We will update the pool status once the pool is created, but - // that can take a time, so set reasonable default now. - await this._updateResourceProps(name, 'pending', 'Creating the pool'); - - try { - // pool resource props will be updated when "new" pool event is emitted - pool = await node.createPool(name, resource.disks); - } catch (err) { - log.error(`Failed to create pool "${name}": ${err}`); - await this._updateResourceProps(name, 'pending', err.toString()); - } - } - - // Remove the pool from internal state and if it exists destroy it. - // Does not throw - only logs an error. - // - // @param {string} name Name of the pool to destroy. - // - async _destroyPool (name) { - var resource = this.resource[name]; - var pool = this.registry.getPool(name); - - if (resource) { - delete this.resource[name]; - } - if (pool) { - try { - await pool.destroy(); - } catch (err) { - log.error(`Failed to destroy pool "${name}@${pool.node.name}": ${err}`); - } - } - } - - // Changing pool parameters is actually not supported. However the pool - // operator's state should reflect the k8s state, so we make the change - // only at operator level and log a warning message. - // - // @param {string} newPool New pool parameters. - // - async _modifyPool (newProps) { - const name = newProps.name; - const curProps = this.resource[name]; - if (!curProps) { - log.warn(`Ignoring modification to unknown pool "${name}"`); - return; - } - if (!_.isEqual(curProps.disks, newProps.disks)) { - // TODO: Growing pools, mirrors, etc. is currently unsupported. - log.error(`Changing disks of the pool "${name}" is not supported`); - curProps.disks = newProps.disks; - } - // Changing node implies destroying the pool on the old node and recreating - // it on the new node that is destructive action -> unsupported. - if (curProps.node !== newProps.node) { - log.error(`Moving pool "${name}" between nodes is not supported`); - curProps.node = newProps.node; - } - } - - // Update status properties of k8s resource to be aligned with pool object - // properties. - // - // NOTE: This method does not throw if the update fails as there is nothing - // we can do if it fails. Though it logs an error message. - // - // @param {object} pool Pool object. - // - async _updateResource (pool) { - var name = pool.name; - var resource = this.resource[name]; - - // we don't track this pool so we cannot update the CRD - if (!resource) { - log.warn(`State of unknown pool "${name}" has changed`); - return; - } - var state = pool.state.replace(/^POOL_/, '').toLowerCase(); - var reason = ''; - if (state === 'offline') { - reason = `mayastor does not run on the node "${pool.node}"`; - } - - await this._updateResourceProps( - name, - state, - reason, - pool.disks, - pool.capacity, - pool.used, - pool.replicas.length - ); - } - - // Update status properties of k8s CRD object. - // - // Parameters "name" and "state" are required, the rest is optional. - // - // NOTE: This method does not throw if the update fails as there is nothing - // we can do if it fails. Though we log an error message in such a case. - // - // @param {string} name Name of the pool. - // @param {string} state State of the pool. - // @param {string} [reason] Reason describing the root cause of the state. - // @param {string[]} [disks] Disk URIs. - // @param {number} [capacity] Capacity of the pool in bytes. - // @param {number} [used] Used bytes in the pool. - // @param {number} [replicacount] Count of replicas using the pool. - // - async _updateResourceProps (name, state, reason, disks, capacity, used, replicacount) { - // For the update of CRD status we need a real k8s pool object, change the - // status in it and store it back. Another reason for grabbing the latest - // version of CRD from watcher cache (even if this.resource contains an older - // version than the one fetched from watcher cache) is that k8s refuses to - // update CRD unless the object's resourceVersion is the latest. - var k8sPool = this.watcher.getRaw(name); - - // it could happen that the object was deleted in the meantime - if (!k8sPool) { - log.warn( - `Pool resource "${name}" was deleted before its status could be updated` - ); - return; - } - const status = k8sPool.status || {}; - // avoid the update if the object has not changed - if ( - state === status.state && - reason === status.reason && - capacity === status.capacity && - used === status.used && - _.isEqual(disks, status.disks) - ) { - return; - } - - log.debug(`Updating properties of pool resource "${name}"`); - status.state = state; - status.reason = reason || ''; - status.disks = disks || []; - if (capacity != null) { - status.capacity = capacity; - } - if (used != null) { - status.used = used; - } - - k8sPool.status = status; - try { - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastorpools(name) - .status.put({ body: k8sPool }); - } catch (err) { - log.error(`Failed to update status of pool "${name}": ${err}`); - } - - if (replicacount != null) { - if (replicacount === 0) { - this.finalizerHelper.removeFinalizer(k8sPool, name, poolFinalizerValue); - } else { - this.finalizerHelper.addFinalizer(k8sPool, name, poolFinalizerValue); - } - } - } -} - -module.exports = PoolOperator; diff --git a/csi/moac/pool_operator.ts b/csi/moac/pool_operator.ts new file mode 100644 index 000000000..ae67d7714 --- /dev/null +++ b/csi/moac/pool_operator.ts @@ -0,0 +1,476 @@ +// Pool operator monitors k8s pool resources (desired state). It creates +// and destroys pools on storage nodes to reflect the desired state. + +import * as fs from 'fs'; +import * as _ from 'lodash'; +import * as path from 'path'; +import { + ApiextensionsV1Api, + KubeConfig, +} from 'client-node-fixed-watcher'; +import { + CustomResource, + CustomResourceCache, + CustomResourceMeta, +} from './watcher'; + +const yaml = require('js-yaml'); +const log = require('./logger').Logger('pool-operator'); +const EventStream = require('./event_stream'); +const Workq = require('./workq'); + +const RESOURCE_NAME: string = 'mayastorpool'; +const POOL_FINALIZER = 'finalizer.mayastor.openebs.io'; + +// Load custom resource definition +const crdPool = yaml.safeLoad( + fs.readFileSync(path.join(__dirname, '/crds/mayastorpool.yaml'), 'utf8') +); + +// Set of possible pool states. Some of them come from mayastor and +// offline, pending and error are deduced in the control plane itself. +enum PoolState { + Unknown = "unknown", + Online = "online", + Degraded = "degraded", + Faulted = "faulted", + Offline = "offline", + Pending = "pending", + Error = "error", +} + +function poolStateFromString(val: string): PoolState { + if (val === PoolState.Online) { + return PoolState.Online; + } else if (val === PoolState.Degraded) { + return PoolState.Degraded; + } else if (val === PoolState.Faulted) { + return PoolState.Faulted; + } else if (val === PoolState.Offline) { + return PoolState.Offline; + } else if (val === PoolState.Pending) { + return PoolState.Pending; + } else if (val === PoolState.Error) { + return PoolState.Error; + } else { + return PoolState.Unknown; + } +} + +// Object defines properties of pool resource. +export class PoolResource extends CustomResource { + apiVersion?: string; + kind?: string; + metadata: CustomResourceMeta; + spec: { + node: string, + disks: string[], + }; + status: { + state: string, + reason?: string, + disks?: string[], + capacity?: number, + used?: number + }; + + // Create and validate pool custom resource. + constructor(cr: CustomResource) { + super(); + this.apiVersion = cr.apiVersion; + this.kind = cr.kind; + if (cr.metadata === undefined) { + throw new Error('missing metadata'); + } else { + this.metadata = cr.metadata; + } + if (cr.spec === undefined) { + throw new Error('missing spec'); + } else { + let node = (cr.spec as any).node; + if (typeof node !== 'string') { + throw new Error('missing or invalid node in spec'); + } + let disks = (cr.spec as any).disks; + if (!Array.isArray(disks)) { + throw new Error('missing or invalid disks in spec'); + } + disks = disks.slice(0).sort(); + //if (typeof disks !== 'string') { + this.spec = { node, disks }; + } + this.status = { + state: poolStateFromString(cr.status?.state), + reason: cr.status?.reason, + disks: cr.status?.disks, + capacity: cr.status?.capacity, + used: cr.status?.used, + }; + } + + // Extract name of the pool from the resource metadata. + getName(): string { + if (this.metadata.name === undefined) { + throw Error("Resource object does not have a name") + } else { + return this.metadata.name; + } + } +} + +// Pool operator tries to bring the real state of storage pools on mayastor +// nodes in sync with mayastorpool custom resources in k8s. +export class PoolOperator { + namespace: string; + watcher: CustomResourceCache; // k8s resource watcher for pools + registry: any; // registry containing info about mayastor nodes + eventStream: any; // A stream of node and pool events. + workq: any; // for serializing pool operations + + // Create pool operator. + // + // @param namespace Namespace the operator should operate on. + // @param kubeConfig KubeConfig. + // @param registry Registry with node objects. + // @param [idleTimeout] Timeout for restarting watcher connection when idle. + constructor ( + namespace: string, + kubeConfig: KubeConfig, + registry: any, + idleTimeout: number | undefined, + ) { + this.namespace = namespace; + this.registry = registry; // registry containing info about mayastor nodes + this.eventStream = null; // A stream of node and pool events. + this.workq = new Workq(); // for serializing pool operations + this.watcher = new CustomResourceCache( + this.namespace, + RESOURCE_NAME, + kubeConfig, + PoolResource, + { idleTimeout } + ); + } + + // Create pool CRD if it doesn't exist. + // + // @param kubeConfig KubeConfig. + async init (kubeConfig: KubeConfig) { + log.info('Initializing pool operator'); + let k8sExtApi = kubeConfig.makeApiClient(ApiextensionsV1Api); + try { + await k8sExtApi.createCustomResourceDefinition(crdPool); + log.info(`Created CRD ${RESOURCE_NAME}`); + } catch (err) { + // API returns a 409 Conflict if CRD already exists. + if (err.statusCode !== 409) throw err; + } + } + + // Start pool operator's watcher loop. + // + // NOTE: Not getting the start sequence right can have catastrophic + // consequence leading to unintended pool destruction and data loss + // (i.e. when node info is available before the pool CRD is). + // + // The right order of steps is: + // 1. Get pool resources + // 2. Get info about pools on storage nodes + async start () { + var self = this; + + // get pool k8s resources for initial synchronization and install + // event handlers to follow changes to them. + self._bindWatcher(self.watcher); + await self.watcher.start(); + + // this will start async processing of node and pool events + self.eventStream = new EventStream({ registry: self.registry }); + self.eventStream.on('data', async (ev: any) => { + if (ev.kind === 'pool') { + await self.workq.push(ev, self._onPoolEvent.bind(self)); + } else if (ev.kind === 'node' && (ev.eventType === 'sync' || ev.eventType === 'mod')) { + await self.workq.push(ev.object.name, self._onNodeSyncEvent.bind(self)); + } else if (ev.kind === 'replica' && (ev.eventType === 'new' || ev.eventType === 'del')) { + await self.workq.push(ev, self._onReplicaEvent.bind(self)); + } + }); + } + + // Handler for new/mod/del pool events + // + // @param ev Pool event as received from event stream. + // + async _onPoolEvent (ev: any) { + const name: string = ev.object.name; + const resource = this.watcher.get(name); + + log.debug(`Received "${ev.eventType}" event for pool "${name}"`); + + if (ev.eventType === 'new') { + if (resource === undefined) { + log.warn(`Unknown pool "${name}" will be destroyed`); + await this._destroyPool(name); + } else { + await this._updateResource(ev.object); + } + } else if (ev.eventType === 'mod') { + await this._updateResource(ev.object); + } else if (ev.eventType === 'del' && resource) { + log.warn(`Recreating destroyed pool "${name}"`); + await this._createPool(resource); + } + } + + // Handler for node sync event. + // + // Either the node is new or came up after an outage - check that we + // don't have any pending pools waiting to be created on it. + // + // @param nodeName Name of the new node. + // + async _onNodeSyncEvent (nodeName: string) { + log.debug(`Syncing pool records for node "${nodeName}"`); + + const resources = this.watcher.list().filter( + (ent) => ent.spec.node === nodeName + ); + for (let i = 0; i < resources.length; i++) { + await this._createPool(resources[i]); + } + } + + // Handler for new/del replica events + // + // @param ev Replica event as received from event stream. + // + async _onReplicaEvent (ev: any) { + const pool = ev.object.pool; + if (!pool) { + // can happen if the node goes away (replica will shortly disappear too) + return; + } + await this._updateFinalizer(pool.name, pool.replicas.length > 0); + } + + // Stop the events, destroy event stream and reset resource cache. + stop () { + this.watcher.stop(); + this.watcher.removeAllListeners(); + if (this.eventStream) { + this.eventStream.destroy(); + this.eventStream = null; + } + } + + // Bind watcher's new/mod/del events to pool operator's callbacks. + // + // @param watcher k8s pool resource watcher. + // + _bindWatcher (watcher: CustomResourceCache) { + watcher.on('new', (resource: PoolResource) => { + this.workq.push(resource, this._createPool.bind(this)); + }); + watcher.on('mod', (resource: PoolResource) => { + this.workq.push(resource, this._modifyPool.bind(this)); + }); + watcher.on('del', (resource: PoolResource) => { + this.workq.push(resource, async (arg: PoolResource) => { + await this._destroyPool(arg.getName()); + }); + }); + } + + // Create a pool according to the specification. + // That includes parameters checks, node lookup and a call to registry + // to create the pool. + // + // @param resource Pool resource properties. + // + async _createPool (resource: PoolResource) { + const name: string = resource.getName(); + const nodeName = resource.spec.node; + + let pool = this.registry.getPool(name); + if (pool) { + // the pool already exists, just update its properties in k8s + await this._updateResource(pool); + return; + } + + const node = this.registry.getNode(nodeName); + if (!node) { + const msg = `mayastor does not run on node "${nodeName}"`; + log.error(`Cannot create pool "${name}": ${msg}`); + await this._updateResourceProps(name, PoolState.Pending, msg); + return; + } + if (!node.isSynced()) { + const msg = `mayastor on node "${nodeName}" is offline`; + log.error(`Cannot sync pool "${name}": ${msg}`); + await this._updateResourceProps(name, PoolState.Pending, msg); + return; + } + + // We will update the pool status once the pool is created, but + // that can take a time, so set reasonable default now. + await this._updateResourceProps(name, PoolState.Pending, 'Creating the pool'); + + try { + // pool resource props will be updated when "new" pool event is emitted + pool = await node.createPool(name, resource.spec.disks); + } catch (err) { + log.error(`Failed to create pool "${name}": ${err}`); + await this._updateResourceProps(name, PoolState.Error, err.toString()); + } + } + + // Remove the pool from internal state and if it exists destroy it. + // Does not throw - only logs an error. + // + // @param name Name of the pool to destroy. + // + async _destroyPool (name: string) { + var pool = this.registry.getPool(name); + + if (pool) { + try { + await pool.destroy(); + } catch (err) { + log.error(`Failed to destroy pool "${name}@${pool.node.name}": ${err}`); + } + } + } + + // Changing pool parameters is actually not supported. However the pool + // operator's state should reflect the k8s state, so we make the change + // only at operator level and log a warning message. + // + // @param newPool New pool parameters. + // + async _modifyPool (resource: PoolResource) { + const name = resource.getName(); + const pool = this.registry.getPool(name); + if (!pool) { + log.warn(`Ignoring modification to pool "${name}" that does not exist`); + return; + } + // Just now we don't even try to compare that the disks are the same as in + // the spec because mayastor returns disks prefixed by aio/iouring protocol + // and with uuid query parameter. + // TODO: Growing pools, mirrors, etc. is currently unsupported. + + // Changing node implies destroying the pool on the old node and recreating + // it on the new node that is destructive action -> unsupported. + if (pool.node.name !== resource.spec.node) { + log.error(`Moving pool "${name}" between nodes is not supported`); + } + } + + // Update status properties of k8s resource to be aligned with pool object + // properties. + // + // NOTE: This method does not throw if the update fails as there is nothing + // we can do if it fails. Though it logs an error message. + // + // @param pool Pool object. + // + async _updateResource (pool: any) { + var name = pool.name; + var resource = this.watcher.get(name); + + // we don't track this pool so we cannot update the CRD + if (!resource) { + log.warn(`State of unknown pool "${name}" has changed`); + return; + } + var state = poolStateFromString( + pool.state.replace(/^POOL_/, '').toLowerCase() + ); + var reason; + if (state === PoolState.Offline) { + reason = `mayastor does not run on the node "${pool.node}"`; + } + + await this._updateResourceProps( + name, + state, + reason, + pool.disks, + pool.capacity, + pool.used, + ); + } + + // Update status properties of k8s CRD object. + // + // Parameters "name" and "state" are required, the rest is optional. + // + // NOTE: This method does not throw if the update fails as there is nothing + // we can do if it fails. Though we log an error message in such a case. + // + // @param name Name of the pool. + // @param state State of the pool. + // @param [reason] Reason describing the root cause of the state. + // @param [disks] Disk URIs. + // @param [capacity] Capacity of the pool in bytes. + // @param [used] Used bytes in the pool. + async _updateResourceProps ( + name: string, + state: PoolState, + reason?: string, + disks?: string[], + capacity?: number, + used?: number, + ) { + try { + await this.watcher.updateStatus(name, (orig: PoolResource) => { + // avoid the update if the object has not changed + if ( + state === orig.status.state && + (reason === orig.status.reason || (!reason && !orig.status.reason)) && + (capacity === undefined || capacity === orig.status.capacity) && + (used === undefined || used === orig.status.used) && + (disks === undefined || _.isEqual(disks, orig.status.disks)) + ) { + return; + } + + log.debug(`Updating properties of pool resource "${name}"`); + let resource: PoolResource = _.cloneDeep(orig); + resource.status = { + state: state, + reason: reason || '' + }; + if (disks != null) { + resource.status.disks = disks; + } + if (capacity != null) { + resource.status.capacity = capacity; + } + if (used != null) { + resource.status.used = used; + } + return resource; + }); + } catch (err) { + log.error(`Failed to update status of pool "${name}": ${err}`); + } + } + + // Place or remove finalizer from pool resource. + // + // @param name Name of the pool. + // @param [busy] At least one replica on it. + async _updateFinalizer(name: string, busy: boolean) { + try { + if (busy) { + this.watcher.addFinalizer(name, POOL_FINALIZER); + } else { + this.watcher.removeFinalizer(name, POOL_FINALIZER); + } + } catch (err) { + log.error(`Failed to update finalizer on pool "${name}": ${err}`); + } + } +} diff --git a/csi/moac/test/index.js b/csi/moac/test/index.js index 55489baa2..d80b36adb 100644 --- a/csi/moac/test/index.js +++ b/csi/moac/test/index.js @@ -24,7 +24,8 @@ const volumeOperator = require('./volume_operator_test.js'); const restApi = require('./rest_api_test.js'); const csiTest = require('./csi_test.js'); -logger.setLevel('debug'); +require('source-map-support').install(); +logger.setLevel('silly'); // Function form for terminating assertion properties to make JS linter happy chai.use(dirtyChai); diff --git a/csi/moac/test/nats_test.js b/csi/moac/test/nats_test.js index 4e19394e1..f9afd82f3 100644 --- a/csi/moac/test/nats_test.js +++ b/csi/moac/test/nats_test.js @@ -18,13 +18,13 @@ const RECONNECT_DELAY = 300; const GRPC_ENDPOINT = '127.0.0.1:12345'; const NODE_NAME = 'node-name'; -var natsProc; +let natsProc; // Starts nats server and call callback when the server is up and ready. function startNats (done) { natsProc = spawn('nats-server', ['-a', NATS_HOST, '-p', NATS_PORT]); - var doneCalled = false; - var stderr = ''; + let doneCalled = false; + let stderr = ''; natsProc.stderr.on('data', (data) => { stderr += data.toString(); @@ -56,9 +56,25 @@ function stopNats () { } module.exports = function () { - var eventBus; - var registry; - var nc; + let eventBus; + let registry; + let nc; + const sc = nats.StringCodec(); + + function connectNats (done) { + nats.connect({ + servers: [`nats://${NATS_EP}`] + }) + .then((res) => { + nc = res; + done(); + }) + .catch(() => { + setTimeout(() => { + connectNats(done); + }, 200); + }); + } // Create registry, event bus object, nats client and start nat server before((done) => { @@ -67,8 +83,7 @@ module.exports = function () { eventBus = new MessageBus(registry, RECONNECT_DELAY); startNats(err => { if (err) return done(err); - nc = nats.connect(`nats://${NATS_EP}`); - nc.on('connect', () => done()); + connectNats(done); }); }); @@ -90,10 +105,10 @@ module.exports = function () { }); it('should register a node', async () => { - nc.publish('v0/registry', JSON.stringify({ + nc.publish('v0/registry', sc.encode(JSON.stringify({ id: 'v0/register', data: { id: NODE_NAME, grpcEndpoint: GRPC_ENDPOINT } - })); + }))); await waitUntil(async () => { return registry.getNode(NODE_NAME); }, 1000, 'new node'); @@ -103,34 +118,34 @@ module.exports = function () { }); it('should ignore register request with missing node name', async () => { - nc.publish('v0/registry', JSON.stringify({ + nc.publish('v0/registry', sc.encode(JSON.stringify({ id: 'v0/register', data: { grpcEndpoint: GRPC_ENDPOINT } - })); + }))); // small delay to wait for a possible crash of moac await sleep(10); }); it('should ignore register request with missing grpc endpoint', async () => { - nc.publish('v0/registry', JSON.stringify({ + nc.publish('v0/registry', sc.encode(JSON.stringify({ id: 'v0/register', data: { id: NODE_NAME } - })); + }))); // small delay to wait for a possible crash of moac await sleep(10); }); it('should not crash upon a request with invalid JSON', async () => { - nc.publish('v0/register', '{"id": "NODE", "grpcEndpoint": "something"'); + nc.publish('v0/register', sc.encode('{"id": "NODE", "grpcEndpoint": "something"')); // small delay to wait for a possible crash of moac await sleep(10); }); it('should deregister a node', async () => { - nc.publish('v0/registry', JSON.stringify({ + nc.publish('v0/registry', sc.encode(JSON.stringify({ id: 'v0/deregister', data: { id: NODE_NAME } - })); + }))); await waitUntil(async () => { return !registry.getNode(NODE_NAME); }, 1000, 'node removal'); diff --git a/csi/moac/test/node_operator_test.js b/csi/moac/test/node_operator_test.js index ef273c7fd..c02b3eca6 100644 --- a/csi/moac/test/node_operator_test.js +++ b/csi/moac/test/node_operator_test.js @@ -1,26 +1,38 @@ // Unit tests for the node operator -// -// We don't test the init method which depends on k8s api client and watcher. -// That method *must* be tested manually and in real k8s environment. For the -// rest of the dependencies we provide fake objects which mimic the real -// behaviour and allow us to test node operator in isolation from other -// components. 'use strict'; const expect = require('chai').expect; const sinon = require('sinon'); const sleep = require('sleep-promise'); +const { KubeConfig } = require('client-node-fixed-watcher'); const Registry = require('../registry'); -const NodeOperator = require('../node_operator'); +const { NodeOperator, NodeResource } = require('../node_operator'); +const { mockCache } = require('./watcher_stub'); const Node = require('./node_stub'); -const Watcher = require('./watcher_stub'); +const EVENT_PROPAGATION_DELAY = 10; const NAME = 'node-name'; const NAMESPACE = 'mayastor'; const ENDPOINT = 'localhost:1234'; const ENDPOINT2 = 'localhost:1235'; +const fakeConfig = { + clusters: [ + { + name: 'cluster', + server: 'foo.company.com' + } + ], + contexts: [ + { + cluster: 'cluster', + user: 'user' + } + ], + users: [{ name: 'user' }] +}; + function defaultMeta (name) { return { creationTimestamp: '2019-02-15T18:23:53Z', @@ -33,174 +45,151 @@ function defaultMeta (name) { }; } -module.exports = function () { - var msStub, putStub, putStatusStub, deleteStub, postStub; - - // Create k8s node resource object - function createNodeResource (name, grpcEndpoint, status) { - const obj = { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorNode', - metadata: defaultMeta(name), - spec: { grpcEndpoint } - }; - if (status) { - obj.status = status; - } - return obj; +// Create k8s node resource object +function createK8sNodeResource (name, grpcEndpoint, status) { + const obj = { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorNode', + metadata: defaultMeta(name), + spec: { grpcEndpoint } + }; + if (status) { + obj.status = status; } + return obj; +} - // k8s api client stub. - // - // Note that this stub serves only for PUT method on mayastor resource - // endpoint to update the status of resource. Fake watcher that is used - // in the tests does not use this client stub. - function createK8sClient (watcher) { - const mayastornodes = { mayastornodes: function (name) {} }; - const namespaces = function (ns) { - expect(ns).to.equal(NAMESPACE); - return mayastornodes; - }; - const client = { - apis: { - 'openebs.io': { - v1alpha1: { namespaces } - } - } - }; - - msStub = sinon.stub(mayastornodes, 'mayastornodes'); - msStub.post = async function (payload) { - watcher.objects[payload.body.metadata.name] = payload.body; - // simulate the asynchronicity of the put - await sleep(1); - }; - postStub = sinon.stub(msStub, 'post'); - postStub.callThrough(); - - const msObject = { - // the tricky thing here is that we have to update watcher's cache - // if we use this fake k8s client to change the object in order to - // mimic real behaviour. - put: async function (payload) { - watcher.objects[payload.body.metadata.name].spec = payload.body.spec; - }, - delete: async function () {}, - status: { - put: async function (payload) { - watcher.objects[payload.body.metadata.name].status = - payload.body.status; - } - } - }; - putStub = sinon.stub(msObject, 'put'); - putStub.callThrough(); - putStatusStub = sinon.stub(msObject.status, 'put'); - putStatusStub.callThrough(); - deleteStub = sinon.stub(msObject, 'delete'); - deleteStub.callThrough(); - msStub.returns(msObject); - return client; - } +// Create k8s node resource object +function createNodeResource (name, grpcEndpoint, status) { + return new NodeResource(createK8sNodeResource(name, grpcEndpoint, status)); +} - // Create a pool operator object suitable for testing - with fake watcher - // and fake k8s api client. - async function mockedNodeOperator (k8sObjects, registry) { - const oper = new NodeOperator(NAMESPACE); - oper.registry = registry; - oper.watcher = new Watcher(oper._filterMayastorNode, k8sObjects); - oper.k8sClient = createK8sClient(oper.watcher); - - await oper.start(); - // give event-stream time to run its _start method to prevent race - // conditions in test code when the underlaying source is modified - // before _start is run. - await sleep(1); - return oper; - } +// Create a pool operator object suitable for testing - with fake watcher +// and fake k8s api client. +function createNodeOperator (registry) { + const kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + return new NodeOperator(NAMESPACE, kc, registry); +} - describe('resource filter', () => { - it('valid mayastor node with status should pass the filter', () => { +module.exports = function () { + describe('NodeResource constructor', () => { + it('should create valid node resource with status', () => { const obj = createNodeResource(NAME, ENDPOINT, 'online'); - const res = NodeOperator.prototype._filterMayastorNode(obj); - expect(res.metadata.name).to.equal(NAME); - expect(res.spec.grpcEndpoint).to.equal(ENDPOINT); - expect(res.status).to.equal('online'); + expect(obj.metadata.name).to.equal(NAME); + expect(obj.spec.grpcEndpoint).to.equal(ENDPOINT); + expect(obj.status).to.equal('online'); }); - it('valid mayastor node without status should pass the filter', () => { + it('should create valid node resource without status', () => { const obj = createNodeResource(NAME, ENDPOINT); - const res = NodeOperator.prototype._filterMayastorNode(obj); - expect(res.metadata.name).to.equal(NAME); - expect(res.spec.grpcEndpoint).to.equal(ENDPOINT); - expect(res.status).to.equal('unknown'); + expect(obj.metadata.name).to.equal(NAME); + expect(obj.spec.grpcEndpoint).to.equal(ENDPOINT); + expect(obj.status).to.equal('unknown'); }); - it('mayastor node without grpc-endpoint should be ignored', () => { - const obj = createNodeResource(NAME); - const res = NodeOperator.prototype._filterMayastorNode(obj); - expect(res).to.be.null(); + it('should not create node resource without grpc endpoint', () => { + expect(() => createNodeResource(NAME)).to.throw(); }); }); - describe('watcher events', () => { - var oper; // node operator + describe('init method', () => { + let kc, oper, fakeApiStub; - afterEach(async () => { + beforeEach(() => { + const registry = new Registry(); + kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + oper = new NodeOperator(NAMESPACE, kc, registry); + const makeApiStub = sinon.stub(kc, 'makeApiClient'); + const fakeApi = { + createCustomResourceDefinition: () => null + }; + fakeApiStub = sinon.stub(fakeApi, 'createCustomResourceDefinition'); + makeApiStub.returns(fakeApi); + }); + + afterEach(() => { if (oper) { - await oper.stop(); - oper = null; + oper.stop(); + oper = undefined; } }); - it('should add node to registry for existing resource when starting the operator', async () => { - const registry = new Registry(); + it('should create CRD if it does not exist', async () => { + fakeApiStub.resolves(); + await oper.init(kc); + }); + + it('should ignore error if CRD already exists', async () => { + fakeApiStub.rejects({ + statusCode: 409 + }); + await oper.init(kc); + }); + + it('should throw if CRD creation fails', async () => { + fakeApiStub.rejects({ + statusCode: 404 + }); + try { + await oper.init(kc); + } catch (err) { + return; + } + throw new Error('Init did not fail'); + }); + }); + + describe('watcher events', () => { + let oper; // node operator + let stubs, registry, nodeResource; + + beforeEach(async () => { + registry = new Registry(); registry.Node = Node; - const addNodeSpy = sinon.spy(registry, 'addNode'); - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); - sinon.assert.calledOnce(addNodeSpy); - sinon.assert.calledWith(addNodeSpy, NAME, ENDPOINT); + oper = createNodeOperator(registry); + nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + }); + + afterEach(() => { + if (oper) { + oper.stop(); + oper = null; + } }); it('should add node to registry upon "new" event', async () => { - const registry = new Registry(); - registry.Node = Node; const addNodeSpy = sinon.spy(registry, 'addNode'); - oper = await mockedNodeOperator([], registry); - // trigger "new" event - oper.watcher.newObject(createNodeResource(NAME, ENDPOINT)); + oper.watcher.emit('new', nodeResource); + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.calledOnce(addNodeSpy); sinon.assert.calledWith(addNodeSpy, NAME, ENDPOINT); }); it('should remove node from registry upon "del" event', async () => { // create registry with a node - const registry = new Registry(); const node = new Node(NAME); node.connect(ENDPOINT); registry.nodes[NAME] = node; - const addNodeSpy = sinon.spy(registry, 'addNode'); const removeNodeSpy = sinon.spy(registry, 'removeNode'); - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); - sinon.assert.calledOnce(addNodeSpy); // trigger "del" event - oper.watcher.delObject(NAME); - sinon.assert.calledOnce(addNodeSpy); - sinon.assert.calledOnce(removeNodeSpy); + oper.watcher.emit('del', nodeResource); + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.calledWith(removeNodeSpy, NAME); }); it('should not do anything upon "mod" event', async () => { // create registry with a node - const registry = new Registry(); const node = new Node(NAME); node.connect(ENDPOINT); registry.nodes[NAME] = node; @@ -209,242 +198,270 @@ module.exports = function () { const removeNodeStub = sinon.stub(registry, 'removeNode'); removeNodeStub.returns(); - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); - sinon.assert.calledOnce(addNodeStub); // trigger "mod" event - oper.watcher.modObject(createNodeResource(NAME, ENDPOINT)); + oper.watcher.emit('mod', nodeResource); + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.notCalled(removeNodeStub); - sinon.assert.calledOnce(addNodeStub); + sinon.assert.notCalled(addNodeStub); }); }); - describe('registry node events', () => { - var oper; // node operator + describe('registry events', () => { + let registry, oper; + + beforeEach(async () => { + registry = new Registry(); + registry.Node = Node; + oper = createNodeOperator(registry); + }); - afterEach(async () => { + afterEach(() => { if (oper) { - await oper.stop(); + oper.stop(); oper = null; } }); it('should create a resource upon "new" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator([], registry); - registry.addNode(NAME, ENDPOINT); - await sleep(20); - sinon.assert.calledOnce(postStub); - sinon.assert.calledWithMatch(postStub, { - body: { - metadata: { - name: NAME, - namespace: NAMESPACE - }, - spec: { - grpcEndpoint: ENDPOINT - } - } - }); - sinon.assert.notCalled(putStub); - sinon.assert.calledOnce(putStatusStub); - sinon.assert.calledWithMatch(putStatusStub, { - body: { - status: 'online' - } + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.onFirstCall().returns(); + stubs.get.onSecondCall().returns(nodeResource); }); - sinon.assert.notCalled(deleteStub); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + registry.addNode(NAME, ENDPOINT); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.calledOnce(stubs.create); + expect(stubs.create.args[0][4].metadata.name).to.equal(NAME); + expect(stubs.create.args[0][4].spec.grpcEndpoint).to.equal(ENDPOINT); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); it('should not crash if POST fails upon "new" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator([], registry); - postStub.rejects(new Error('post failed')); - registry.addNode(NAME, ENDPOINT); - await sleep(10); - sinon.assert.calledOnce(postStub); - sinon.assert.calledWithMatch(postStub, { - body: { - metadata: { - name: NAME, - namespace: NAMESPACE - }, - spec: { - grpcEndpoint: ENDPOINT - } - } + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.onFirstCall().returns(); + stubs.get.onSecondCall().returns(nodeResource); + stubs.create.rejects(new Error('post failed')); }); - sinon.assert.notCalled(putStatusStub); - sinon.assert.notCalled(deleteStub); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + registry.addNode(NAME, ENDPOINT); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.calledOnce(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); it('should update the resource upon "new" node event if it exists', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator([], registry); - oper.watcher.injectObject(createNodeResource(NAME, ENDPOINT, 'offline')); - registry.addNode(NAME, ENDPOINT2); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - spec: { - grpcEndpoint: ENDPOINT2 - } - } + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'offline'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); }); - sinon.assert.calledOnce(putStatusStub); - sinon.assert.calledWithMatch(putStatusStub, { - body: { - status: 'online' - } - }); - sinon.assert.notCalled(deleteStub); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + registry.addNode(NAME, ENDPOINT2); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.calledOnce(stubs.update); + expect(stubs.update.args[0][5].metadata.name).to.equal(NAME); + expect(stubs.update.args[0][5].spec.grpcEndpoint).to.equal(ENDPOINT2); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].metadata.name).to.equal(NAME); + expect(stubs.updateStatus.args[0][5].status).to.equal('online'); + sinon.assert.notCalled(stubs.delete); }); it('should not update the resource upon "new" node event if it is the same', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator([], registry); - oper.watcher.injectObject(createNodeResource(NAME, ENDPOINT, 'online')); + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); registry.addNode(NAME, ENDPOINT); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(putStatusStub); - sinon.assert.notCalled(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); it('should update the resource upon "mod" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); - registry.addNode(NAME, ENDPOINT2); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - spec: { - grpcEndpoint: ENDPOINT2 - } - } + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); }); - sinon.assert.notCalled(putStatusStub); - sinon.assert.notCalled(deleteStub); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + registry.addNode(NAME, ENDPOINT2); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.calledOnce(stubs.update); + expect(stubs.update.args[0][5].metadata.name).to.equal(NAME); + expect(stubs.update.args[0][5].spec.grpcEndpoint).to.equal(ENDPOINT2); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); it('should update status of the resource upon "mod" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); registry.addNode(NAME, ENDPOINT); + await sleep(EVENT_PROPAGATION_DELAY); const node = registry.getNode(NAME); const isSyncedStub = sinon.stub(node, 'isSynced'); isSyncedStub.returns(false); node._offline(); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.calledOnce(putStatusStub); - sinon.assert.calledWithMatch(putStatusStub, { - body: { - status: 'offline' - } - }); - sinon.assert.notCalled(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].metadata.name).to.equal(NAME); + expect(stubs.updateStatus.args[0][5].status).to.equal('offline'); + sinon.assert.notCalled(stubs.delete); }); it('should not crash if PUT fails upon "mod" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); - putStub.rejects(new Error('put failed')); + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); + stubs.update.rejects(new Error('put failed')); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); registry.addNode(NAME, ENDPOINT2); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.calledOnce(putStub); - sinon.assert.notCalled(putStatusStub); - sinon.assert.notCalled(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.calledTwice(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); - it('should not crash if the resource does not exist upon "mod" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator([], registry); + it('should not create the resource upon "mod" node event', async () => { + let stubs; + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // secretly inject node to registry (watcher does not know) const node = new Node(NAME); node.connect(ENDPOINT); registry.nodes[NAME] = node; - // modify the node registry.addNode(NAME, ENDPOINT2); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(putStatusStub); - sinon.assert.notCalled(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); it('should delete the resource upon "del" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + // secretly inject node to registry (watcher does not know) + const node = new Node(NAME); + node.connect(ENDPOINT); + registry.nodes[NAME] = node; registry.removeNode(NAME); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(putStatusStub); - sinon.assert.calledOnce(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.calledOnce(stubs.delete); }); it('should not crash if DELETE fails upon "del" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator( - [createNodeResource(NAME, ENDPOINT, 'online')], - registry - ); - deleteStub.rejects(new Error('delete failed')); + let stubs; + const nodeResource = createNodeResource(NAME, ENDPOINT, 'online'); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(nodeResource); + stubs.delete.rejects(new Error('delete failed')); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + // secretly inject node to registry (watcher does not know) + const node = new Node(NAME); + node.connect(ENDPOINT); + registry.nodes[NAME] = node; registry.removeNode(NAME); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(putStatusStub); - sinon.assert.calledOnce(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.calledOnce(stubs.delete); }); it('should not crash if the resource does not exist upon "del" node event', async () => { - const registry = new Registry(); - registry.Node = Node; - oper = await mockedNodeOperator([], registry); + let stubs; + mockCache(oper.watcher, (arg) => { + stubs = arg; + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // secretly inject node to registry (watcher does not know) const node = new Node(NAME); node.connect(ENDPOINT); registry.nodes[NAME] = node; - // modify the node registry.removeNode(NAME); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(putStatusStub); - sinon.assert.notCalled(deleteStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); + sinon.assert.notCalled(stubs.delete); }); }); }; diff --git a/csi/moac/test/pool_operator_test.js b/csi/moac/test/pool_operator_test.js index 6317d190a..8fab1fa45 100644 --- a/csi/moac/test/pool_operator_test.js +++ b/csi/moac/test/pool_operator_test.js @@ -1,17 +1,10 @@ // Unit tests for the pool operator // -// We don't test the init method which depends on k8s api client and watcher. -// That method *must* be tested manually and in real k8s environment. For the -// rest of the dependencies we provide fake objects which mimic the real -// behaviour and allow us to test pool operator in isolation from other -// components. -// // Pool operator depends on a couple of modules: // * registry (real) // * node object (fake) // * pool object (fake) -// * watcher (fake) -// * k8s client (fake) +// * watcher (mocked) // // As you can see most of them must be fake in order to do detailed testing // of pool operator. That makes the code more complicated and less readable. @@ -21,162 +14,232 @@ const expect = require('chai').expect; const sinon = require('sinon'); const sleep = require('sleep-promise'); +const { KubeConfig } = require('client-node-fixed-watcher'); const Registry = require('../registry'); const { GrpcError, GrpcCode } = require('../grpc_client'); -const PoolOperator = require('../pool_operator'); +const { PoolOperator, PoolResource } = require('../pool_operator'); const { Pool } = require('../pool'); -const Watcher = require('./watcher_stub'); +const { Replica } = require('../replica'); +const { mockCache } = require('./watcher_stub'); const Node = require('./node_stub'); const NAMESPACE = 'mayastor'; +const EVENT_PROPAGATION_DELAY = 10; -module.exports = function () { - var msStub, putStub; +const fakeConfig = { + clusters: [ + { + name: 'cluster', + server: 'foo.company.com' + } + ], + contexts: [ + { + cluster: 'cluster', + user: 'user' + } + ], + users: [{ name: 'user' }] +}; - // Create k8s pool resource object - function createPoolResource ( +// Create k8s pool resource object +function createK8sPoolResource ( + name, + node, + disks, + finalizers, + state, + reason, + capacity, + used +) { + const obj = { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorPool', + metadata: { + creationTimestamp: '2019-02-15T18:23:53Z', + generation: 1, + name: name, + namespace: NAMESPACE, + finalizers: finalizers, + resourceVersion: '627981', + selfLink: `/apis/openebs.io/v1alpha1/namespaces/${NAMESPACE}/mayastorpools/${name}`, + uid: 'd99f06a9-314e-11e9-b086-589cfc0d76a7' + }, + spec: { + node: node, + disks: disks + } + }; + if (state) { + const status = { state }; + status.disks = disks.map((d) => `aio://${d}`); + if (reason != null) status.reason = reason; + if (capacity != null) status.capacity = capacity; + if (used != null) status.used = used; + obj.status = status; + } + return obj; +} + +function createPoolResource ( + name, + node, + disks, + finalizers, + state, + reason, + capacity, + used +) { + return new PoolResource(createK8sPoolResource( name, node, disks, + finalizers, state, reason, capacity, used - ) { - const obj = { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorPool', - metadata: { - creationTimestamp: '2019-02-15T18:23:53Z', - generation: 1, - name: name, - namespace: NAMESPACE, - resourceVersion: '627981', - selfLink: `/apis/openebs.io/v1alpha1/namespaces/${NAMESPACE}/mayastorpools/${name}`, - uid: 'd99f06a9-314e-11e9-b086-589cfc0d76a7' - }, - spec: { - node: node, - disks: disks - } - }; - if (state) { - const status = { state }; - status.disks = disks.map((d) => `aio://${d}`); - if (reason != null) status.reason = reason; - if (capacity != null) status.capacity = capacity; - if (used != null) status.used = used; - obj.status = status; - } - return obj; - } - - // k8s api client stub. - // - // Note that this stub serves only for PUT method on mayastor resource - // endpoint to update the status of resource. Fake watcher that is used - // in the tests does not use this client stub. - function createK8sClient (watcher) { - const mayastorpools = { mayastorpools: function (name) {} }; - const namespaces = function (ns) { - expect(ns).to.equal(NAMESPACE); - return mayastorpools; - }; - const client = { - apis: { - 'openebs.io': { - v1alpha1: { namespaces } - } - } - }; - msStub = sinon.stub(mayastorpools, 'mayastorpools'); - const msObject = { - status: { - // the tricky thing here is that we have to update watcher's cache - // if we use this fake k8s client to change the object in order to - // mimic real behaviour. - put: async function (payload) { - watcher.objects[payload.body.metadata.name].status = - payload.body.status; - // simulate the asynchronicity of the put - // await sleep(1); - } - } - }; - putStub = sinon.stub(msObject.status, 'put'); - putStub.callThrough(); - msStub.returns(msObject); - return client; - } - - // Create a pool operator object suitable for testing - with fake watcher - // and fake k8s api client. - async function MockedPoolOperator (k8sObjects, nodes) { - const oper = new PoolOperator(NAMESPACE); - const registry = new Registry(); - registry.Node = Node; - nodes = nodes || []; - nodes.forEach((n) => (registry.nodes[n.name] = n)); - oper.registry = registry; - oper.watcher = new Watcher(oper._filterMayastorPool, k8sObjects); - oper.k8sClient = createK8sClient(oper.watcher); - - await oper.start(); - - // Let the initial "new" events pass by so that they don't interfere with - // whatever we are going to do with the operator after we return. - // - // TODO: Hardcoded delays are ugly. Find a better way. Applies to all - // sleeps in this file. - if (nodes.length > 0) { - await sleep(10); - } + )); +} + +// Create a pool operator object suitable for testing - with mocked watcher etc. +function createPoolOperator (nodes) { + const registry = new Registry(); + registry.Node = Node; + nodes = nodes || []; + nodes.forEach((n) => (registry.nodes[n.name] = n)); + const kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + return new PoolOperator(NAMESPACE, kc, registry); +} - return oper; - } - - describe('resource filter', () => { - it('valid mayastor pool should pass the filter', () => { +module.exports = function () { + describe('PoolResource constructor', () => { + it('should create valid mayastor pool with status', () => { const obj = createPoolResource( 'pool', 'node', ['/dev/sdc', '/dev/sdb'], - 'OFFLINE', + ['some.finalizer.com'], + 'offline', 'The node is down' ); - const res = PoolOperator.prototype._filterMayastorPool(obj); - expect(res).to.have.all.keys('name', 'node', 'disks'); - expect(res.name).to.equal('pool'); - expect(res.node).to.equal('node'); + expect(obj.metadata.name).to.equal('pool'); + expect(obj.spec.node).to.equal('node'); // the filter should sort the disks - expect(JSON.stringify(res.disks)).to.equal( + expect(JSON.stringify(obj.spec.disks)).to.equal( JSON.stringify(['/dev/sdb', '/dev/sdc']) ); - expect(res.state).to.be.undefined(); + expect(obj.status.state).to.equal('offline'); + expect(obj.status.reason).to.equal('The node is down'); + expect(obj.status.disks).to.deep.equal(['aio:///dev/sdc', 'aio:///dev/sdb']); + expect(obj.status.capacity).to.be.undefined(); + expect(obj.status.used).to.be.undefined(); }); - it('valid mayastor pool without status should pass the filter', () => { + it('should create valid mayastor pool without status', () => { const obj = createPoolResource('pool', 'node', ['/dev/sdc', '/dev/sdb']); - const res = PoolOperator.prototype._filterMayastorPool(obj); - expect(res).to.have.all.keys('name', 'node', 'disks'); - expect(res.name).to.equal('pool'); - expect(res.node).to.equal('node'); - expect(res.state).to.be.undefined(); + expect(obj.metadata.name).to.equal('pool'); + expect(obj.spec.node).to.equal('node'); + expect(obj.status.state).to.equal('unknown'); + }); + + it('should not create mayastor pool without node specification', () => { + expect(() => createPoolResource( + 'pool', + undefined, + ['/dev/sdc', '/dev/sdb'] + )).to.throw(); + }); + }); + + describe('init method', () => { + let kc, oper, fakeApiStub; + + beforeEach(() => { + const registry = new Registry(); + kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + oper = new PoolOperator(NAMESPACE, kc, registry); + const makeApiStub = sinon.stub(kc, 'makeApiClient'); + const fakeApi = { + createCustomResourceDefinition: () => null + }; + fakeApiStub = sinon.stub(fakeApi, 'createCustomResourceDefinition'); + makeApiStub.returns(fakeApi); + }); + + afterEach(() => { + if (oper) { + oper.stop(); + oper = undefined; + } + }); + + it('should create CRD if it does not exist', async () => { + fakeApiStub.resolves(); + await oper.init(kc); + }); + + it('should ignore error if CRD already exists', async () => { + fakeApiStub.rejects({ + statusCode: 409 + }); + await oper.init(kc); + }); + + it('should throw if CRD creation fails', async () => { + fakeApiStub.rejects({ + statusCode: 404 + }); + try { + await oper.init(kc); + } catch (err) { + return; + } + throw new Error('Init did not fail'); }); }); describe('watcher events', () => { - var oper; // pool operator + let oper; // pool operator - afterEach(async () => { + afterEach(() => { if (oper) { - await oper.stop(); + oper.stop(); oper = null; } }); describe('new event', () => { + it('should process resources that existed before the operator was started', async () => { + let stubs; + oper = createPoolOperator([]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.list.returns([poolResource]); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].metadata.name).to.equal('pool'); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'mayastor does not run on node "node"' + }); + }); + it('should set "state" to PENDING when creating a pool', async () => { + let stubs; const node = new Node('node'); const createPoolStub = sinon.stub(node, 'createPool'); createPoolStub.resolves( @@ -189,37 +252,35 @@ module.exports = function () { used: 10 }) ); - oper = await MockedPoolOperator([], [node]); + oper = createPoolOperator([node]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "new" event - oper.watcher.newObject( - createPoolResource('pool', 'node', ['/dev/sdb']) - ); - + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.calledOnce(createPoolStub); sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - kind: 'MayastorPool', - metadata: { - name: 'pool', - generation: 1, - resourceVersion: '627981' - }, - status: { - state: 'pending', - reason: 'Creating the pool' - } - } + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].metadata.name).to.equal('pool'); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'Creating the pool' }); }); it('should not try to create a pool if the node has not been synced', async () => { + let stubs; const node = new Node('node'); sinon.stub(node, 'isSynced').returns(false); const createPoolStub = sinon.stub(node, 'createPool'); @@ -233,21 +294,30 @@ module.exports = function () { used: 10 }) ); - oper = await MockedPoolOperator([], [node]); + oper = createPoolOperator([node]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "new" event - oper.watcher.newObject( - createPoolResource('pool', 'node', ['/dev/sdb']) - ); - + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.notCalled(createPoolStub); - sinon.assert.notCalled(msStub); - sinon.assert.notCalled(putStub); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); }); it('should not try to create a pool when pool with the same name already exists', async () => { + let stubs; + const node = new Node('node', {}, []); const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -255,270 +325,248 @@ module.exports = function () { capacity: 100, used: 10 }); - const node = new Node('node', {}, []); const createPoolStub = sinon.stub(node, 'createPool'); createPoolStub.resolves(pool); - oper = await MockedPoolOperator([], [node]); + + oper = createPoolOperator([node]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb', '/dev/sdc']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // this creates the inconsistency between real and k8s state which we are testing node.pools.push(pool); // trigger "new" event - oper.watcher.newObject( - // does not matter that the disks are different - still the same pool - createPoolResource('pool', 'node', ['/dev/sdb', '/dev/sdc']) - ); - + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); - - // the stub is called when the new node is synced - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'degraded', - reason: '', - disks: ['aio:///dev/sdb'], - capacity: 100, - used: 10 - } - } - }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.notCalled(createPoolStub); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'degraded', + reason: '', + disks: ['aio:///dev/sdb'], + capacity: 100, + used: 10 + }); }); // important test as moving the pool between nodes would destroy data it('should leave the pool untouched when pool exists and is on a different node', async () => { + let stubs; + const node1 = new Node('node1', {}, []); + const node2 = new Node('node2'); const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', + state: 'POOL_DEGRADED', capacity: 100, used: 10 }); - const node1 = new Node('node1', {}, []); - const node2 = new Node('node2'); const createPoolStub1 = sinon.stub(node1, 'createPool'); const createPoolStub2 = sinon.stub(node2, 'createPool'); createPoolStub1.resolves(pool); createPoolStub2.resolves(pool); - oper = await MockedPoolOperator([], [node1, node2]); + + oper = createPoolOperator([node1, node2]); + const poolResource = createPoolResource('pool', 'node2', ['/dev/sdb', '/dev/sdc']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // we assign the pool to node1 but later in the event it will be on node2 node1.pools.push(pool); // trigger "new" event - oper.watcher.newObject( - // does not matter that the disks are different - still the same pool - createPoolResource('pool', 'node2', ['/dev/sdb', '/dev/sdc']) - ); - + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); - - // the stub is called when the new node is synced - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'online', - reason: '', - disks: ['aio:///dev/sdb'] - } - } - }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.notCalled(createPoolStub1); sinon.assert.notCalled(createPoolStub2); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'degraded', + reason: '', + disks: ['aio:///dev/sdb'], + capacity: 100, + used: 10 + }); }); it('should set "reason" to error message when create pool fails', async () => { + let stubs; const node = new Node('node'); const createPoolStub = sinon.stub(node, 'createPool'); createPoolStub.rejects( new GrpcError(GrpcCode.INTERNAL, 'create failed') ); - oper = await MockedPoolOperator([], [node]); + oper = createPoolOperator([node]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "new" event - oper.watcher.newObject( - createPoolResource('pool', 'node', ['/dev/sdb']) - ); - + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); - - sinon.assert.calledTwice(msStub); - sinon.assert.alwaysCalledWith(msStub, 'pool'); - sinon.assert.calledTwice(putStub); - sinon.assert.calledWithMatch(putStub.firstCall, { - body: { - status: { - state: 'pending', - reason: 'Creating the pool' - } - } - }); - sinon.assert.calledWithMatch(putStub.secondCall, { - body: { - status: { - state: 'pending', - reason: 'Error: create failed' - } - } - }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(createPoolStub); sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledTwice(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'Creating the pool' + }); + expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ + state: 'error', + reason: 'Error: create failed' + }); }); it('should ignore failure to update the resource state', async () => { + let stubs; const node = new Node('node'); const createPoolStub = sinon.stub(node, 'createPool'); createPoolStub.rejects( new GrpcError(GrpcCode.INTERNAL, 'create failed') ); - oper = await MockedPoolOperator([], [node]); - putStub.rejects(new Error('http put error')); + oper = createPoolOperator([node]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.updateStatus.resolves(new Error('http put error')); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "new" event - oper.watcher.newObject( - createPoolResource('pool', 'node', ['/dev/sdb']) - ); - + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); - - sinon.assert.calledTwice(msStub); - sinon.assert.alwaysCalledWith(msStub, 'pool'); - sinon.assert.calledTwice(putStub); - sinon.assert.calledWithMatch(putStub.firstCall, { - body: { - status: { - state: 'pending', - reason: 'Creating the pool' - } - } - }); - sinon.assert.calledWithMatch(putStub.secondCall, { - body: { - status: { - state: 'pending', - reason: 'Error: create failed' - } - } - }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(createPoolStub); sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledTwice(stubs.updateStatus); }); it('should not create a pool if node does not exist', async () => { - oper = await MockedPoolOperator([], []); - // trigger "new" event - oper.watcher.newObject( - createPoolResource('pool', 'node', ['/dev/sdb']) - ); - - // give event callbacks time to propagate - await sleep(10); - - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'mayastor does not run on node "node"' - } - } + let stubs; + oper = createPoolOperator([]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); }); - }); - - it('when a pool is pre-imported it should be created once the node arrives and is synced', async () => { - const node = new Node('node'); - oper = await MockedPoolOperator([createPoolResource('pool', 'node', ['/dev/sdb'])], [node]); - + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + // trigger "new" event + oper.watcher.emit('new', poolResource); // give event callbacks time to propagate - await sleep(10); - - sinon.assert.calledTwice(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledTwice(putStub); - - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'Error: Broken connection to mayastor on node "node"' - } - } + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'mayastor does not run on node "node"' }); }); it('should create a pool once the node arrives and is synced', async () => { - oper = await MockedPoolOperator([], []); - oper.watcher.newObject( - createPoolResource('pool', 'node', ['/dev/sdb']) - ); - - // give event callbacks time to propagate - await sleep(10); - - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'mayastor does not run on node "node"' - } - } + let stubs; + oper = createPoolOperator([]); + const poolResource = createPoolResource('pool', 'node', ['/dev/sdb']); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.list.returns([poolResource]); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'mayastor does not run on node "node"' }); const node = new Node('node'); + const syncedStub = sinon.stub(node, 'isSynced'); + syncedStub.returns(false); oper.registry._registerNode(node); oper.registry.emit('node', { eventType: 'mod', object: node }); - // give event callbacks time to propagate - await sleep(10); + await sleep(EVENT_PROPAGATION_DELAY); // node is not yet synced - sinon.assert.calledThrice(msStub); - sinon.assert.calledThrice(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'mayastor does not run on node "node"' - } - } - }); - - node.connect(); + sinon.assert.calledTwice(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'mayastor does not run on node "node"' + }); + expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ + state: 'pending', + reason: 'mayastor on node "node" is offline' + }); + + syncedStub.returns(true); oper.registry.emit('node', { eventType: 'mod', object: node }); - // give event callbacks time to propagate - await sleep(10); + await sleep(EVENT_PROPAGATION_DELAY); // tried to create the pool but the node is a fake - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'Error: Broken connection to mayastor on node "node"' - } - } + sinon.assert.callCount(stubs.updateStatus, 4); + expect(stubs.updateStatus.args[2][5].status).to.deep.equal({ + state: 'pending', + reason: 'Creating the pool' + }); + expect(stubs.updateStatus.args[3][5].status).to.deep.equal({ + state: 'error', + reason: 'Error: Broken connection to mayastor on node "node"' }); }); }); describe('del event', () => { it('should destroy a pool', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -529,32 +577,39 @@ module.exports = function () { const destroyStub = sinon.stub(pool, 'destroy'); destroyStub.resolves(); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator( - [ - createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - 'degraded', - '', - 100, - 10 - ) - ], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'degraded', + '', + 100, + 10 ); - + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "del" event - oper.watcher.delObject('pool'); + oper.watcher.emit('del', poolResource); // give event callbacks time to propagate - await sleep(10); + await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.notCalled(msStub); + // called in response to registry new event + sinon.assert.notCalled(stubs.updateStatus); sinon.assert.calledOnce(destroyStub); - expect(oper.resource).to.not.have.key('pool'); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); }); it('should not fail if pool does not exist', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -562,22 +617,42 @@ module.exports = function () { capacity: 100, used: 10 }); + const destroyStub = sinon.stub(pool, 'destroy'); + destroyStub.resolves(); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator( - [createPoolResource('pool', 'node', ['/dev/sdb'], 'OFFLINE', '')], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'offline', + '' ); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // we create the inconsistency between k8s and real state node.pools = []; // trigger "del" event - oper.watcher.delObject('pool'); - - // called during the initial sync - sinon.assert.calledOnce(msStub); - expect(oper.resource).to.not.have.key('pool'); + oper.watcher.emit('del', poolResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); + + // called in response to registry new event + sinon.assert.calledOnce(stubs.updateStatus); + sinon.assert.notCalled(destroyStub); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); }); it('should destroy the pool even if it is on a different node', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -589,52 +664,84 @@ module.exports = function () { destroyStub.resolves(); const node1 = new Node('node1', {}, []); const node2 = new Node('node2', {}, [pool]); - oper = await MockedPoolOperator( - [createPoolResource('pool', 'node1', ['/dev/sdb'], 'online', '')], - [node1, node2] + oper = createPoolOperator([node1, node2]); + const poolResource = createPoolResource( + 'pool', + 'node1', + ['/dev/sdb'], + [], + 'degraded', + '', + 100, + 10 ); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "del" event - oper.watcher.delObject('pool'); - - // called during the initial sync - sinon.assert.calledOnce(msStub); + oper.watcher.emit('del', poolResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); + // called in response to registry new event + sinon.assert.notCalled(stubs.updateStatus); sinon.assert.calledOnce(destroyStub); - expect(oper.resource).to.not.have.key('pool'); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); }); - it('should delete the resource even if the destroy fails', async () => { + it('should not crash if the destroy fails', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], state: 'POOL_DEGRADED', capacity: 100, - used: 10, - destroy: async function () {} + used: 10 }); const destroyStub = sinon.stub(pool, 'destroy'); destroyStub.rejects(new GrpcError(GrpcCode.INTERNAL, 'destroy failed')); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator( - [createPoolResource('pool', 'node', ['/dev/sdb'], 'DEGRADED', '')], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'degraded', + '', + 100, + 10 ); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "del" event - oper.watcher.delObject('pool'); - + oper.watcher.emit('del', poolResource); // give event callbacks time to propagate - await sleep(10); - - // called during the initial sync - sinon.assert.calledOnce(msStub); + await sleep(EVENT_PROPAGATION_DELAY); + // called in response to registry new event + sinon.assert.notCalled(stubs.updateStatus); sinon.assert.calledOnce(destroyStub); - expect(oper.resource).to.not.have.key('pool'); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); }); }); describe('mod event', () => { it('should not do anything if pool object has not changed', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb', 'aio:///dev/sdc'], @@ -643,36 +750,36 @@ module.exports = function () { used: 10 }); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator( - [ - createPoolResource( - 'pool', - 'node', - ['/dev/sdb', '/dev/sdc'], - 'DEGRADED', - '' - ) - ], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdb', '/dev/sdc'], + [], + 'degraded', + '' ); - - // called during the initial sync - sinon.assert.calledOnce(msStub); - + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "mod" event - oper.watcher.modObject( - createPoolResource('pool', 'node', ['/dev/sdc', '/dev/sdb']) - ); + oper.watcher.emit('mod', poolResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); - // called during the initial sync - sinon.assert.calledOnce(msStub); - // operator state - expect(oper.resource.pool.disks).to.have.lengthOf(2); - expect(oper.resource.pool.disks[0]).to.equal('/dev/sdb'); - expect(oper.resource.pool.disks[1]).to.equal('/dev/sdc'); + // called in response to registry new event + sinon.assert.calledOnce(stubs.updateStatus); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); }); it('should not do anything if disks change', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -681,27 +788,38 @@ module.exports = function () { used: 10 }); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator( - [createPoolResource('pool', 'node', ['/dev/sdb'], 'DEGRADED', '')], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdc'], + [], + 'degraded', + '' ); - + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "mod" event - oper.watcher.modObject( - createPoolResource('pool', 'node', ['/dev/sdc']) - ); + oper.watcher.emit('mod', poolResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); - // called during the initial sync - sinon.assert.calledOnce(msStub); + // called in response to registry new event + sinon.assert.calledOnce(stubs.updateStatus); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); // the real state expect(node.pools[0].disks[0]).to.equal('aio:///dev/sdb'); - // watcher state - expect(oper.watcher.list()[0].disks[0]).to.equal('/dev/sdc'); - // operator state - expect(oper.resource.pool.disks[0]).to.equal('/dev/sdc'); }); it('should not do anything if node changes', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -711,31 +829,38 @@ module.exports = function () { }); const node1 = new Node('node1', {}, [pool]); const node2 = new Node('node2', {}, []); - oper = await MockedPoolOperator( - [createPoolResource('pool', 'node1', ['/dev/sdb'], 'DEGRADED', '')], - [node1] + oper = createPoolOperator([node1, node2]); + const poolResource = createPoolResource( + 'pool', + 'node2', + ['/dev/sdb'], + [], + 'degraded', + '' ); - + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); // trigger "mod" event - oper.watcher.modObject( - createPoolResource('pool', 'node2', ['/dev/sdb']) - ); + oper.watcher.emit('mod', poolResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); - // called during the initial sync - sinon.assert.calledOnce(msStub); - // the real state - expect(node1.pools).to.have.lengthOf(1); - expect(node2.pools).to.have.lengthOf(0); - // watcher state - expect(oper.watcher.list()[0].node).to.equal('node2'); - // operator state - expect(oper.resource.pool.node).to.equal('node2'); + // called in response to registry new event + sinon.assert.calledOnce(stubs.updateStatus); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); }); }); }); describe('node events', () => { - var oper; // pool operator + let oper; // pool operator afterEach(async () => { if (oper) { @@ -745,39 +870,125 @@ module.exports = function () { }); it('should create pool upon node sync event if it does not exist', async () => { + let stubs; + const pool = new Pool({ + name: 'pool', + disks: ['aio:///dev/sdb'], + state: 'POOL_DEGRADED', + capacity: 100, + used: 10 + }); const node = new Node('node', {}, []); const createPoolStub = sinon.stub(node, 'createPool'); - createPoolStub.resolves( - new Pool({ - name: 'pool', - node: node, - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }) + const isSyncedStub = sinon.stub(node, 'isSynced'); + createPoolStub.resolves(pool); + isSyncedStub.onCall(0).returns(false); + isSyncedStub.onCall(1).returns(true); + oper = createPoolOperator([node]); + const poolResource1 = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'degraded', + '' ); - oper = await MockedPoolOperator( - [createPoolResource('pool', 'node', ['/dev/sdb'])], - [node] + const poolResource2 = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'pending', + 'mayastor on node "node" is offline' ); - - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'Creating the pool' - } - } + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.onCall(0).returns(poolResource1); + stubs.get.onCall(1).returns(poolResource2); + stubs.list.returns([poolResource1]); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + oper.registry.emit('node', { + eventType: 'sync', + object: node + }); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledTwice(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'mayastor on node "node" is offline' + }); + expect(stubs.updateStatus.args[1][5].status).to.deep.equal({ + state: 'pending', + reason: 'Creating the pool' }); sinon.assert.calledOnce(createPoolStub); sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); }); - it('should not create pool upon node sync event if it exists', async () => { + it('should add finalizer for new pool resource', async () => { + let stubs; + const pool = new Pool({ + name: 'pool', + disks: ['aio:///dev/sdb'], + state: 'POOL_ONLINE', + capacity: 100, + used: 4 + }); + // replica will trigger finalizer + const replica1 = new Replica({ uuid: 'UUID1' }); + const replica2 = new Replica({ uuid: 'UUID2' }); + replica1.pool = pool; + pool.replicas = [replica1]; + const node = new Node('node', {}, [pool]); + oper = createPoolOperator([node]); + + const poolResource = createK8sPoolResource( + 'pool', + 'node1', + ['/dev/sdb'], + [], + 'online', + '', + 100, + 4 + ); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.update.resolves(); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.calledOnce(stubs.update); + expect(stubs.update.args[0][5].metadata.finalizers).to.deep.equal([ + 'finalizer.mayastor.openebs.io' + ]); + + // add a second replica - should not change anything + pool.replicas.push(replica2); + oper.registry.emit('replica', { + eventType: 'new', + object: replica2 + }); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.calledOnce(stubs.update); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); + }); + + it('should remove finalizer when last replica is removed', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -785,36 +996,101 @@ module.exports = function () { capacity: 100, used: 4 }); + const replica1 = new Replica({ uuid: 'UUID1' }); + const replica2 = new Replica({ uuid: 'UUID2' }); + pool.replicas = [replica1, replica2]; + replica1.pool = pool; + replica2.pool = pool; + const node = new Node('node', {}, [pool]); + oper = createPoolOperator([node]); + + const poolResource = createK8sPoolResource( + 'pool', + 'node1', + ['/dev/sdb'], + ['finalizer.mayastor.openebs.io'], + 'online', + '', + 100, + 4 + ); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.update.resolves(); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.update); + pool.replicas.splice(1, 1); + oper.registry.emit('replica', { + eventType: 'del', + object: replica2 + }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.notCalled(stubs.update); + pool.replicas = []; + oper.registry.emit('replica', { + eventType: 'del', + object: replica1 + }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(stubs.update); + expect(stubs.update.args[0][5].metadata.finalizers).to.have.lengthOf(0); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); + }); + + it('should not create pool upon node sync event if it exists', async () => { + let stubs; + const pool = new Pool({ + name: 'pool', + disks: ['aio:///dev/sdb'], + state: 'POOL_DEGRADED', + capacity: 100, + used: 10 + }); const node = new Node('node', {}, [pool]); const createPoolStub = sinon.stub(node, 'createPool'); createPoolStub.resolves(pool); - oper = await MockedPoolOperator( - [ - createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - 'online', - '', - 100, - 4 - ) - ], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'degraded', + '', + 100, + 10 ); - - sinon.assert.notCalled(msStub); - sinon.assert.notCalled(putStub); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.list.returns([poolResource]); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); sinon.assert.notCalled(createPoolStub); }); it('should not create pool upon node sync event if it exists on another node', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', + state: 'POOL_DEGRADED', capacity: 100, - used: 4 + used: 10 }); const node1 = new Node('node1', {}, []); const node2 = new Node('node2', {}, [pool]); @@ -822,47 +1098,64 @@ module.exports = function () { const createPoolStub2 = sinon.stub(node2, 'createPool'); createPoolStub1.resolves(pool); createPoolStub2.resolves(pool); - oper = await MockedPoolOperator( - [ - createPoolResource( - 'pool', - 'node1', - ['/dev/sdb'], - 'online', - '', - 100, - 4 - ) - ], - [node1, node2] + oper = createPoolOperator([node1, node2]); + const poolResource = createPoolResource( + 'pool', + 'node1', + ['/dev/sdb'], + [], + 'degraded', + '', + 100, + 10 ); - - sinon.assert.notCalled(msStub); - sinon.assert.notCalled(putStub); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + stubs.list.returns([poolResource]); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); sinon.assert.notCalled(createPoolStub1); sinon.assert.notCalled(createPoolStub2); }); it('should remove pool upon pool new event if there is no pool resource', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], state: 'POOL_ONLINE', capacity: 100, - used: 4, - destroy: async function () {} + used: 4 }); const destroyStub = sinon.stub(pool, 'destroy'); destroyStub.resolves(); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator([], [node]); + oper = createPoolOperator([node]); - sinon.assert.notCalled(msStub); - sinon.assert.notCalled(putStub); + mockCache(oper.watcher, (arg) => { + stubs = arg; + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); sinon.assert.calledOnce(destroyStub); }); it('should update resource properties upon pool mod event', async () => { + let stubs; const offlineReason = 'mayastor does not run on the node "node"'; const pool = new Pool({ name: 'pool', @@ -872,70 +1165,84 @@ module.exports = function () { used: 4 }); const node = new Node('node', {}, [pool]); - oper = await MockedPoolOperator( - [ - createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - 'online', - '', - 100, - 4 - ) - ], - [node] + oper = createPoolOperator([node]); + + const poolResource = createPoolResource( + 'pool', + 'node1', + ['/dev/sdb'], + [], + 'online', + '', + 100, + 4 ); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); - pool.state = 'POOL_OFFLINE'; // simulate pool mod event + pool.state = 'POOL_OFFLINE'; oper.registry.emit('pool', { eventType: 'mod', object: pool }); - // Give event time to propagate - await sleep(10); - - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'offline', - reason: offlineReason - } - } - }); - expect(oper.watcher.objects.pool.status.state).to.equal('offline'); - expect(oper.watcher.objects.pool.status.reason).to.equal(offlineReason); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'offline', + reason: offlineReason, + capacity: 100, + disks: ['aio:///dev/sdb'], + used: 4 + }); }); it('should ignore pool mod event if pool resource does not exist', async () => { - const node = new Node('node', {}, []); - oper = await MockedPoolOperator([], [node]); + let stubs; + const pool = new Pool({ + name: 'pool', + disks: ['aio:///dev/sdb'], + state: 'POOL_ONLINE', + capacity: 100, + used: 4 + }); + const node = new Node('node', {}, [pool]); + oper = createPoolOperator([node]); + mockCache(oper.watcher, (arg) => { + stubs = arg; + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + + // simulate pool mod event + pool.state = 'POOL_OFFLINE'; oper.registry.emit('pool', { eventType: 'mod', - object: new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_OFFLINE', - capacity: 100, - used: 4 - }) + object: pool }); - // Give event time to propagate - await sleep(10); + await sleep(EVENT_PROPAGATION_DELAY); - sinon.assert.notCalled(msStub); - sinon.assert.notCalled(putStub); - expect(oper.resource.pool).to.be.undefined(); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); }); it('should create pool upon pool del event if pool resource exist', async () => { + let stubs; const pool = new Pool({ name: 'pool', disks: ['aio:///dev/sdb'], @@ -946,22 +1253,24 @@ module.exports = function () { const node = new Node('node', {}, [pool]); const createPoolStub = sinon.stub(node, 'createPool'); createPoolStub.resolves(pool); - oper = await MockedPoolOperator( - [ - createPoolResource( - 'pool', - 'node', - ['/dev/sdb'], - 'online', - '', - 100, - 4 - ) - ], - [node] + oper = createPoolOperator([node]); + const poolResource = createPoolResource( + 'pool', + 'node', + ['/dev/sdb'], + [], + 'online', + '', + 100, + 4 ); - - sinon.assert.notCalled(msStub); + mockCache(oper.watcher, (arg) => { + stubs = arg; + stubs.get.returns(poolResource); + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.notCalled(createPoolStub); node.pools = []; @@ -969,43 +1278,51 @@ module.exports = function () { eventType: 'del', object: pool }); - // Give event time to propagate - await sleep(10); - - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, 'pool'); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - status: { - state: 'pending', - reason: 'Creating the pool' - } - } - }); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(createPoolStub); sinon.assert.calledWith(createPoolStub, 'pool', ['/dev/sdb']); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + state: 'pending', + reason: 'Creating the pool' + }); }); it('should ignore pool del event if pool resource does not exist', async () => { + let stubs; + const pool = new Pool({ + name: 'pool', + disks: ['aio:///dev/sdb'], + state: 'POOL_ONLINE', + capacity: 100, + used: 4 + }); const node = new Node('node', {}, []); - oper = await MockedPoolOperator([], [node]); + oper = createPoolOperator([node]); + mockCache(oper.watcher, (arg) => { + stubs = arg; + }); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + node.pools = []; oper.registry.emit('pool', { eventType: 'del', - object: new Pool({ - name: 'pool', - disks: ['aio:///dev/sdb'], - state: 'POOL_ONLINE', - capacity: 100, - used: 4 - }) + object: pool }); - // Give event time to propagate - await sleep(10); - sinon.assert.notCalled(msStub); + await sleep(EVENT_PROPAGATION_DELAY); + + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.updateStatus); }); }); }; diff --git a/csi/moac/test/volume_operator_test.js b/csi/moac/test/volume_operator_test.js index 15a0eec01..9bdbb0f43 100644 --- a/csi/moac/test/volume_operator_test.js +++ b/csi/moac/test/volume_operator_test.js @@ -1,10 +1,4 @@ // Unit tests for the volume operator -// -// We don't test the init method which depends on k8s api client and watcher. -// That method *must* be tested manually and in real k8s environment. For the -// rest of the dependencies we provide fake objects which mimic the real -// behaviour and allow us to test volume operator in isolation from other -// components. 'use strict'; @@ -12,15 +6,33 @@ const _ = require('lodash'); const expect = require('chai').expect; const sinon = require('sinon'); const sleep = require('sleep-promise'); +const { KubeConfig } = require('client-node-fixed-watcher'); const Registry = require('../registry'); const Volume = require('../volume'); const Volumes = require('../volumes'); -const VolumeOperator = require('../volume_operator'); +const { VolumeOperator, VolumeResource } = require('../volume_operator'); const { GrpcError, GrpcCode } = require('../grpc_client'); -const Watcher = require('./watcher_stub'); +const { mockCache } = require('./watcher_stub'); const UUID = 'd01b8bfb-0116-47b0-a03a-447fcbdc0e99'; const NAMESPACE = 'mayastor'; +const EVENT_PROPAGATION_DELAY = 10; + +const fakeConfig = { + clusters: [ + { + name: 'cluster', + server: 'foo.company.com' + } + ], + contexts: [ + { + cluster: 'cluster', + user: 'user' + } + ], + users: [{ name: 'user' }] +}; function defaultMeta (uuid) { return { @@ -34,122 +46,75 @@ function defaultMeta (uuid) { }; } -module.exports = function () { - var msStub, putStub, putStatusStub, deleteStub, postStub; - var defaultSpec = { - replicaCount: 1, - preferredNodes: ['node1', 'node2'], - requiredNodes: ['node2'], - requiredBytes: 100, - limitBytes: 120, - protocol: 'nbd' - }; - var defaultStatus = { - size: 110, - node: 'node2', - state: 'healthy', - nexus: { - deviceUri: 'file:///dev/nbd0', - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE' - } - ] - }, - replicas: [ +const defaultSpec = { + replicaCount: 1, + preferredNodes: ['node1', 'node2'], + requiredNodes: ['node2'], + requiredBytes: 100, + limitBytes: 120, + protocol: 'nbd' +}; + +const defaultStatus = { + size: 110, + node: 'node2', + state: 'healthy', + nexus: { + deviceUri: 'file:///dev/nbd0', + state: 'NEXUS_ONLINE', + children: [ { uri: 'bdev:///' + UUID, - node: 'node2', - pool: 'pool', - offline: false + state: 'CHILD_ONLINE' } ] - }; - - // Create k8s volume resource object - function createVolumeResource (uuid, spec, status) { - const obj = { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorVolume', - metadata: defaultMeta(uuid), - spec: spec - }; - if (status) { - obj.status = status; + }, + replicas: [ + { + uri: 'bdev:///' + UUID, + node: 'node2', + pool: 'pool', + offline: false } - return obj; - } + ] +}; - // k8s api client stub. - // - // Note that this stub serves only for PUT method on mayastor resource - // endpoint to update the status of resource. Fake watcher that is used - // in the tests does not use this client stub. - function createK8sClient (watcher) { - const mayastorvolumes = { mayastorvolumes: function (name) {} }; - const namespaces = function (ns) { - expect(ns).to.equal(NAMESPACE); - return mayastorvolumes; - }; - const client = { - apis: { - 'openebs.io': { - v1alpha1: { namespaces } - } - } - }; - - msStub = sinon.stub(mayastorvolumes, 'mayastorvolumes'); - msStub.post = async function (payload) { - watcher.objects[payload.body.metadata.name] = payload.body; - // simulate the asynchronicity of the put - await sleep(1); - }; - postStub = sinon.stub(msStub, 'post'); - postStub.callThrough(); - - const msObject = { - // the tricky thing here is that we have to update watcher's cache - // if we use this fake k8s client to change the object in order to - // mimic real behaviour. - put: async function (payload) { - watcher.objects[payload.body.metadata.name].spec = payload.body.spec; - }, - delete: async function () {}, - status: { - put: async function (payload) { - watcher.objects[payload.body.metadata.name].status = - payload.body.status; - } - } - }; - putStub = sinon.stub(msObject, 'put'); - putStub.callThrough(); - putStatusStub = sinon.stub(msObject.status, 'put'); - putStatusStub.callThrough(); - deleteStub = sinon.stub(msObject, 'delete'); - deleteStub.callThrough(); - msStub.returns(msObject); - return client; +// Create k8s volume resource object +function createK8sVolumeResource (uuid, spec, status) { + const obj = { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorVolume', + metadata: defaultMeta(uuid), + spec: spec + }; + if (status) { + obj.status = status; } + return obj; +} - // Create a pool operator object suitable for testing - with fake watcher - // and fake k8s api client. - async function mockedVolumeOperator (k8sObjects, volumes) { - const oper = new VolumeOperator(NAMESPACE); - oper.volumes = volumes; - oper.watcher = new Watcher(oper._filterMayastorVolume, k8sObjects); - oper.k8sClient = createK8sClient(oper.watcher); +// Create volume resource object +function createVolumeResource (uuid, spec, status) { + return new VolumeResource(createK8sVolumeResource(uuid, spec, status)); +} - await oper.start(); - return oper; - } +// Create a pool operator object suitable for testing - with fake watcher +// and fake k8s api client. +async function createVolumeOperator (volumes, stubsCb) { + const kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + const oper = new VolumeOperator(NAMESPACE, kc, volumes); + mockCache(oper.watcher, stubsCb); + await oper.start(); + // give time to registry to install its callbacks + await sleep(EVENT_PROPAGATION_DELAY); + return oper; +} - describe('resource filter', () => { - it('valid mayastor volume with status should pass the filter', () => { - const obj = createVolumeResource( +module.exports = function () { + describe('VolumeResource constructor', () => { + it('should create mayastor volume with status', () => { + const res = createVolumeResource( UUID, { replicaCount: 3, @@ -182,8 +147,6 @@ module.exports = function () { ] } ); - - const res = VolumeOperator.prototype._filterMayastorVolume(obj); expect(res.metadata.name).to.equal(UUID); expect(res.spec.replicaCount).to.equal(3); expect(res.spec.preferredNodes).to.have.lengthOf(2); @@ -208,8 +171,28 @@ module.exports = function () { expect(res.status.replicas[0].offline).to.equal(false); }); - it('valid mayastor volume with status without nexus should pass the filter', () => { - const obj = createVolumeResource( + it('should create mayastor volume with unknown state', () => { + const res = createVolumeResource( + UUID, + { + replicaCount: 1, + requiredBytes: 100 + }, + { + size: 100, + node: 'node2', + state: 'online' // "online" is not a valid volume state + } + ); + expect(res.metadata.name).to.equal(UUID); + expect(res.spec.replicaCount).to.equal(1); + expect(res.status.size).to.equal(100); + expect(res.status.node).to.equal('node2'); + expect(res.status.state).to.equal('unknown'); + }); + + it('should create mayastor volume with status without nexus', () => { + const res = createVolumeResource( UUID, { replicaCount: 3, @@ -226,7 +209,6 @@ module.exports = function () { } ); - const res = VolumeOperator.prototype._filterMayastorVolume(obj); expect(res.metadata.name).to.equal(UUID); expect(res.spec.replicaCount).to.equal(3); expect(res.spec.preferredNodes).to.have.lengthOf(2); @@ -243,25 +225,23 @@ module.exports = function () { expect(res.status.replicas).to.have.lengthOf(0); }); - it('valid mayastor volume without status should pass the filter', () => { - const obj = createVolumeResource(UUID, { + it('should create mayastor volume without status', () => { + const res = createVolumeResource(UUID, { replicaCount: 3, preferredNodes: ['node1', 'node2'], requiredNodes: ['node2'], requiredBytes: 100, limitBytes: 120 }); - const res = VolumeOperator.prototype._filterMayastorVolume(obj); expect(res.metadata.name).to.equal(UUID); expect(res.spec.replicaCount).to.equal(3); expect(res.status).to.be.undefined(); }); - it('mayastor volume without optional parameters should pass the filter', () => { - const obj = createVolumeResource(UUID, { + it('should create mayastor volume without optional parameters', () => { + const res = createVolumeResource(UUID, { requiredBytes: 100 }); - const res = VolumeOperator.prototype._filterMayastorVolume(obj); expect(res.metadata.name).to.equal(UUID); expect(res.spec.replicaCount).to.equal(1); expect(res.spec.preferredNodes).to.have.lengthOf(0); @@ -271,32 +251,76 @@ module.exports = function () { expect(res.status).to.be.undefined(); }); - it('mayastor volume without requiredSize should be ignored', () => { - const obj = createVolumeResource(UUID, { + it('should throw if requiredSize is missing', () => { + expect(() => createVolumeResource(UUID, { replicaCount: 3, preferredNodes: ['node1', 'node2'], requiredNodes: ['node2'], limitBytes: 120 - }); - const res = VolumeOperator.prototype._filterMayastorVolume(obj); - expect(res).to.be.null(); + })).to.throw(); }); - it('mayastor volume with invalid UUID should be ignored', () => { - const obj = createVolumeResource('blabla', { + it('should throw if UUID is invalid', () => { + expect(() => createVolumeResource('blabla', { replicaCount: 3, preferredNodes: ['node1', 'node2'], requiredNodes: ['node2'], requiredBytes: 100, limitBytes: 120 + })).to.throw(); + }); + }); + + describe('init method', () => { + let kc, oper, fakeApiStub; + + beforeEach(() => { + const registry = new Registry(); + kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + oper = new VolumeOperator(NAMESPACE, kc, registry); + const makeApiStub = sinon.stub(kc, 'makeApiClient'); + const fakeApi = { + createCustomResourceDefinition: () => null + }; + fakeApiStub = sinon.stub(fakeApi, 'createCustomResourceDefinition'); + makeApiStub.returns(fakeApi); + }); + + afterEach(() => { + if (oper) { + oper.stop(); + oper = undefined; + } + }); + + it('should create CRD if it does not exist', async () => { + fakeApiStub.resolves(); + await oper.init(kc); + }); + + it('should ignore error if CRD already exists', async () => { + fakeApiStub.rejects({ + statusCode: 409 }); - const res = VolumeOperator.prototype._filterMayastorVolume(obj); - expect(res).to.be.null(); + await oper.init(kc); + }); + + it('should throw if CRD creation fails', async () => { + fakeApiStub.rejects({ + statusCode: 404 + }); + try { + await oper.init(kc); + } catch (err) { + return; + } + throw new Error('Init did not fail'); }); }); describe('watcher events', () => { - var oper; // volume operator + let oper; // volume operator afterEach(async () => { if (oper) { @@ -306,63 +330,29 @@ module.exports = function () { }); it('should call import volume for existing resources when starting the operator', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const importVolumeStub = sinon.stub(volumes, 'importVolume'); // return value is not used so just return something importVolumeStub.resolves({ uuid: UUID }); - oper = await mockedVolumeOperator( - [createVolumeResource(UUID, defaultSpec, defaultStatus)], - volumes - ); - sinon.assert.calledOnce(importVolumeStub); - sinon.assert.calledWith(importVolumeStub, UUID, defaultSpec); - }); - - it('should import volume upon "new" event', async () => { - const registry = new Registry(); - const volumes = new Volumes(registry); - const defaultStatus = - { - node: 'ksnode-1', - replicas: [], - size: 1024, - state: 'healthy' - }; - - const importVolumeStub = sinon.stub(volumes, 'importVolume'); - importVolumeStub.resolves({ uuid: UUID }); - - oper = await mockedVolumeOperator([], volumes); + const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + }); // trigger "new" event - oper.watcher.newObject(createVolumeResource(UUID, defaultSpec, defaultStatus)); - sinon.assert.calledOnce(importVolumeStub); - sinon.assert.calledWith(importVolumeStub, UUID, defaultSpec, defaultStatus); - }); - - it('should not try to import volume upon "new" event if the resource was self-created', async () => { - const registry = new Registry(); - const volumes = new Volumes(registry); - sinon.stub(volumes, 'get').returns([]); - const importVolumeStub = sinon.stub(volumes, 'importVolume'); - importVolumeStub.resolves({ uuid: UUID }); + oper.watcher.emit('new', volumeResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); - oper = await mockedVolumeOperator([], volumes); - // Pretend the volume creation through i.e. CSI. - await sleep(10); - const volume = new Volume(UUID, registry, defaultSpec); - volumes.emit('volume', { - eventType: 'new', - object: volume - }); - await sleep(10); - // now trigger "new" watcher event (natural consequence of the above) - oper.watcher.newObject(createVolumeResource(UUID, defaultSpec)); - sinon.assert.notCalled(importVolumeStub); + sinon.assert.calledOnce(importVolumeStub); + sinon.assert.calledWith(importVolumeStub, UUID, defaultSpec); }); it('should set reason in resource if volume import fails upon "new" event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const importVolumeStub = sinon.stub(volumes, 'importVolume'); @@ -370,60 +360,72 @@ module.exports = function () { new GrpcError(GrpcCode.INTERNAL, 'create failed') ); - oper = await mockedVolumeOperator([], volumes); + const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + }); // trigger "new" event - oper.watcher.newObject(createVolumeResource(UUID, defaultSpec)); - await sleep(10); + oper.watcher.emit('new', volumeResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(importVolumeStub); - sinon.assert.calledOnce(msStub); - sinon.assert.calledWith(msStub, UUID); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.calledOnce(putStatusStub); - sinon.assert.calledWithMatch(putStatusStub, { - body: { - metadata: defaultMeta(UUID), - status: { - state: 'pending', - reason: 'Error: create failed' - } - } - }); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status.state).to.equal('error'); + expect(stubs.updateStatus.args[0][5].status.reason).to.equal('Error: create failed'); }); it('should destroy the volume upon "del" event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const destroyVolumeStub = sinon.stub(volumes, 'destroyVolume'); destroyVolumeStub.resolves(); - const obj = createVolumeResource(UUID, defaultSpec, defaultStatus); + const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + }); + const getVolumeStub = sinon.stub(volumes, 'get'); + getVolumeStub.returns({ uuid: UUID }); // trigger "del" event - oper.watcher.delObject(UUID); + oper.watcher.emit('del', volumeResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(destroyVolumeStub); sinon.assert.calledWith(destroyVolumeStub, UUID); }); it('should handle gracefully if destroy of a volume fails upon "del" event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const destroyVolumeStub = sinon.stub(volumes, 'destroyVolume'); destroyVolumeStub.rejects( new GrpcError(GrpcCode.INTERNAL, 'destroy failed') ); - const obj = createVolumeResource(UUID, defaultSpec, defaultStatus); + const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + }); + const getVolumeStub = sinon.stub(volumes, 'get'); + getVolumeStub.returns({ uuid: UUID }); // trigger "del" event - oper.watcher.delObject(UUID); + oper.watcher.emit('del', volumeResource); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(destroyVolumeStub); sinon.assert.calledWith(destroyVolumeStub, UUID); }); it('should modify the volume upon "mod" event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const volume = new Volume(UUID, registry, defaultSpec); @@ -451,10 +453,14 @@ module.exports = function () { defaultStatus ); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(oldObj); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(oldObj); + }); // trigger "mod" event - oper.watcher.modObject(newObj); + oper.watcher.emit('mod', newObj); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.calledOnce(fsaStub); expect(volume.replicaCount).to.equal(3); @@ -465,6 +471,7 @@ module.exports = function () { }); it('should not crash if update volume fails upon "mod" event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const volume = new Volume(UUID, registry, defaultSpec); @@ -492,10 +499,14 @@ module.exports = function () { defaultStatus ); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(oldObj); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(oldObj); + }); // trigger "mod" event - oper.watcher.modObject(newObj); + oper.watcher.emit('mod', newObj); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); sinon.assert.notCalled(fsaStub); expect(volume.replicaCount).to.equal(1); @@ -504,6 +515,7 @@ module.exports = function () { }); it('should not do anything if volume params stay the same upon "mod" event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const volume = new Volume(UUID, registry, defaultSpec); @@ -520,16 +532,21 @@ module.exports = function () { // new specification of the object that is the same const newObj = createVolumeResource(UUID, defaultSpec, defaultStatus); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(oldObj); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(oldObj); + }); // trigger "mod" event - oper.watcher.modObject(newObj); + oper.watcher.emit('mod', newObj); + // give event callbacks time to propagate + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.notCalled(fsaStub); }); }); describe('volume events', () => { - var oper; // volume operator + let oper; // volume operator afterEach(async () => { if (oper) { @@ -539,8 +556,9 @@ module.exports = function () { }); it('should create a resource upon "new" volume event', async () => { + let stubs; const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, defaultSpec, 100); const volumes = new Volumes(registry); sinon .stub(volumes, 'get') @@ -549,53 +567,55 @@ module.exports = function () { .withArgs() .returns([volume]); - oper = await mockedVolumeOperator([], volumes); - - await sleep(20); - sinon.assert.calledOnce(postStub); - sinon.assert.calledWithMatch(postStub, { - body: { - metadata: { - name: UUID, - namespace: NAMESPACE - }, - spec: defaultSpec - } + const volumeResource = createVolumeResource(UUID, defaultSpec); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.onFirstCall().returns(); + stubs.get.onSecondCall().returns(volumeResource); + stubs.create.resolves(); + stubs.updateStatus.resolves(); }); - sinon.assert.calledOnce(putStatusStub); - sinon.assert.calledWithMatch(putStatusStub, { - body: { - status: { - node: '', - reason: '', - replicas: [], - size: 0, - state: 'pending' - } - } + + sinon.assert.calledOnce(stubs.create); + expect(stubs.create.args[0][4].metadata.name).to.equal(UUID); + expect(stubs.create.args[0][4].metadata.namespace).to.equal(NAMESPACE); + expect(stubs.create.args[0][4].spec).to.deep.equal(defaultSpec); + sinon.assert.calledOnce(stubs.updateStatus); + expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ + node: '', + replicas: [], + size: 100, + state: 'pending' }); }); it('should not crash if POST fails upon "new" volume event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const volume = new Volume(UUID, registry, defaultSpec); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - postStub.rejects(new Error('post failed')); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + const volumeResource = createVolumeResource(UUID, defaultSpec); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.onFirstCall().returns(); + stubs.get.onSecondCall().returns(volumeResource); + stubs.create.rejects(new Error('POST failed')); + stubs.updateStatus.resolves(); + }); + volumes.emit('volume', { eventType: 'new', object: volume }); - await sleep(10); - sinon.assert.calledOnce(postStub); - sinon.assert.notCalled(putStatusStub); + await sleep(EVENT_PROPAGATION_DELAY); + sinon.assert.calledOnce(stubs.create); + sinon.assert.notCalled(stubs.updateStatus); }); it('should update the resource upon "new" volume event if it exists', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); const newSpec = _.cloneDeep(defaultSpec); @@ -608,23 +628,25 @@ module.exports = function () { .withArgs() .returns([volume]); - oper = await mockedVolumeOperator([], volumes); - const obj = createVolumeResource(UUID, defaultSpec); - oper.watcher.injectObject(obj); - - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { spec: newSpec } + const volumeResource = createVolumeResource(UUID, defaultSpec); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.update.resolves(); + stubs.updateStatus.resolves(); }); - sinon.assert.calledOnce(putStatusStub); + + sinon.assert.notCalled(stubs.create); + sinon.assert.calledOnce(stubs.update); + expect(stubs.update.args[0][5].spec).to.deep.equal(newSpec); + sinon.assert.calledOnce(stubs.updateStatus); }); it('should not update the resource upon "new" volume event if it is the same', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, defaultSpec, 100); sinon .stub(volumes, 'get') .withArgs(UUID) @@ -632,32 +654,37 @@ module.exports = function () { .withArgs() .returns([volume]); - oper = await mockedVolumeOperator([], volumes); - const obj = createVolumeResource(UUID, defaultSpec, { - size: 0, + const volumeResource = createVolumeResource(UUID, defaultSpec, { + size: 100, node: '', state: 'pending', - reason: '', replicas: [] }); - oper.watcher.injectObject(obj); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.update.resolves(); + stubs.updateStatus.resolves(); + }); - await sleep(10); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStatusStub); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); }); it('should update the resource upon "mod" volume event', async () => { - const obj = createVolumeResource(UUID, defaultSpec); + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + const volumeResource = createVolumeResource(UUID, defaultSpec); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.update.resolves(); + stubs.updateStatus.resolves(); + }); const newSpec = { replicaCount: 3, @@ -672,51 +699,51 @@ module.exports = function () { eventType: 'mod', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.calledOnce(putStub); - sinon.assert.calledWithMatch(putStub, { - body: { - metadata: defaultMeta(UUID), - spec: newSpec - } - }); - sinon.assert.calledOnce(putStatusStub); + sinon.assert.calledOnce(stubs.update); + expect(stubs.update.args[0][5].spec).to.deep.equal(newSpec); + sinon.assert.calledOnce(stubs.updateStatus); }); it('should update just the status if spec has not changed upon "mod" volume event', async () => { - const obj = createVolumeResource(UUID, defaultSpec); + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + const volumeResource = createVolumeResource(UUID, defaultSpec); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.update.resolves(); + stubs.updateStatus.resolves(); + }); const volume = new Volume(UUID, registry, defaultSpec); volumes.emit('volume', { eventType: 'mod', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.notCalled(putStub); - sinon.assert.calledOnce(putStatusStub); + sinon.assert.notCalled(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); }); it('should not crash if PUT fails upon "mod" volume event', async () => { - const obj = createVolumeResource(UUID, defaultSpec); + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); - putStub.rejects(new Error('put failed')); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + const volumeResource = createVolumeResource(UUID, defaultSpec); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.update.rejects(new Error('PUT failed')); + stubs.updateStatus.resolves(); + }); const newSpec = { replicaCount: 3, @@ -731,20 +758,22 @@ module.exports = function () { eventType: 'mod', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.calledOnce(putStub); - sinon.assert.notCalled(putStatusStub); + sinon.assert.calledTwice(stubs.update); + sinon.assert.calledOnce(stubs.updateStatus); }); it('should not crash if the resource does not exist upon "mod" volume event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(); + }); const newSpec = { replicaCount: 3, @@ -759,75 +788,81 @@ module.exports = function () { eventType: 'mod', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.notCalled(postStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(putStatusStub); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); + sinon.assert.notCalled(stubs.updateStatus); }); it('should delete the resource upon "del" volume event', async () => { - const obj = createVolumeResource(UUID, defaultSpec); + let stubs; + const volumeResource = createVolumeResource(UUID, defaultSpec); const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.delete.resolves(); + }); const volume = new Volume(UUID, registry, defaultSpec); volumes.emit('volume', { eventType: 'del', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.calledOnce(deleteStub); + sinon.assert.calledOnce(stubs.delete); }); it('should not crash if DELETE fails upon "del" volume event', async () => { - const obj = createVolumeResource(UUID, defaultSpec); + let stubs; + const volumeResource = createVolumeResource(UUID, defaultSpec); const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - oper.watcher.injectObject(obj); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(volumeResource); + stubs.delete.rejects(new Error('delete failed')); + }); - deleteStub.rejects(new Error('delete failed')); const volume = new Volume(UUID, registry, defaultSpec); volumes.emit('volume', { eventType: 'del', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.calledOnce(deleteStub); + sinon.assert.calledOnce(stubs.delete); }); it('should not crash if the resource does not exist upon "del" volume event', async () => { + let stubs; const registry = new Registry(); const volumes = new Volumes(registry); sinon.stub(volumes, 'get').returns([]); - oper = await mockedVolumeOperator([], volumes); - // we have to sleep to give event stream chance to register its handlers - await sleep(10); + oper = await createVolumeOperator(volumes, (arg) => { + stubs = arg; + stubs.get.returns(); + stubs.delete.resolves(); + }); const volume = new Volume(UUID, registry, defaultSpec); volumes.emit('volume', { eventType: 'del', object: volume }); + await sleep(EVENT_PROPAGATION_DELAY); - await sleep(10); - sinon.assert.notCalled(deleteStub); - sinon.assert.notCalled(putStub); - sinon.assert.notCalled(postStub); + sinon.assert.notCalled(stubs.delete); + sinon.assert.notCalled(stubs.create); + sinon.assert.notCalled(stubs.update); }); }); }; diff --git a/csi/moac/test/watcher_stub.js b/csi/moac/test/watcher_stub.js index b1ea6bf74..8dbbeeb49 100644 --- a/csi/moac/test/watcher_stub.js +++ b/csi/moac/test/watcher_stub.js @@ -1,79 +1,49 @@ -// Fake watcher which simulates the real one. +// Fake watcher that isolates the watcher from k8s api server using sinon stubs. 'use strict'; -const assert = require('assert'); -const EventEmitter = require('events'); - -// It can be used instead of real watcher in tests of other classes depending -// on the watcher. -class Watcher extends EventEmitter { - // Construct a watcher with initial set of objects passed in arg. - constructor (filterCb, objects) { - super(); - this.filterCb = filterCb; - this.objects = {}; - for (let i = 0; i < objects.length; i++) { - this.objects[objects[i].metadata.name] = objects[i]; - } - } - - injectObject (obj) { - this.objects[obj.metadata.name] = obj; - } - - newObject (obj) { - this.objects[obj.metadata.name] = obj; - this.emit('new', this.filterCb(obj)); - } - - delObject (name) { - var obj = this.objects[name]; - assert(obj); - delete this.objects[name]; - this.emit('del', this.filterCb(obj)); - } - - modObject (obj) { - this.objects[obj.metadata.name] = obj; - this.emit('mod', this.filterCb(obj)); - } - - async start () { - var self = this; - return new Promise((resolve, reject) => { - setTimeout(() => { - for (const name in self.objects) { - // real objects coming from GET method also don't have kind and - // apiVersion attrs so strip these props to mimic the real case. - delete self.objects[name].kind; - delete self.objects[name].apiVersion; - self.emit('new', self.filterCb(self.objects[name])); - } - resolve(); - }, 0); +const sinon = require('sinon'); + +// stubsCb callback can override default return values of k8s api calls +function mockCache (cache, stubsCb) { + // do not wait for confirming events from k8s + cache.eventTimeout = 0; + + // mock k8s api calls + cache.createStub = sinon.stub(cache.k8sApi, 'createNamespacedCustomObject'); + cache.updateStub = sinon.stub(cache.k8sApi, 'replaceNamespacedCustomObject'); + cache.updateStatusStub = sinon.stub(cache.k8sApi, 'replaceNamespacedCustomObjectStatus'); + cache.deleteStub = sinon.stub(cache.k8sApi, 'deleteNamespacedCustomObject'); + cache.getStub = sinon.stub(cache.listWatch, 'get'); + cache.listStub = sinon.stub(cache.listWatch, 'list'); + const stubs = { + create: cache.createStub, + update: cache.updateStub, + updateStatus: cache.updateStatusStub, + delete: cache.deleteStub, + get: cache.getStub, + list: cache.listStub + }; + stubs.create.resolves(); + stubs.update.resolves(); + stubs.updateStatus.resolves(); + stubs.delete.resolves(); + stubs.get.returns(); + stubs.list.returns([]); + if (stubsCb) stubsCb(stubs); + + // convenience function for emitting watcher events + stubs.emitKubeEvent = (ev, data) => { + cache.listWatch.callbackCache[ev].forEach((cb) => cb(data)); + }; + + // mock the watcher to start even without k8s + const startStub = sinon.stub(cache.listWatch, 'start'); + startStub.callsFake(async () => { + stubs.list().forEach((ent) => { + stubs.emitKubeEvent('add', ent); }); - } - - async stop () {} - - async getRawBypass (name) { - return this.getRaw(name); - } - - getRaw (name) { - const obj = this.objects[name]; - if (!obj) { - return null; - } else { - return JSON.parse(JSON.stringify(obj)); - } - } - - list () { - var self = this; - return Object.values(this.objects).map((ent) => self.filterCb(ent)); - } + }); } -module.exports = Watcher; +module.exports = { mockCache }; diff --git a/csi/moac/test/watcher_test.js b/csi/moac/test/watcher_test.js index 5c58658ea..0cc90a5f1 100644 --- a/csi/moac/test/watcher_test.js +++ b/csi/moac/test/watcher_test.js @@ -1,12 +1,34 @@ -// Unit tests for the watcher. -// -// We fake the k8s api watch and collection endpoints so that the tests are -// runable without k8s environment and let us test corner cases which would -// normally be impossible to test. +// Tests for the object cache (watcher). +const _ = require('lodash'); const expect = require('chai').expect; -const Watcher = require('../watcher'); -const Readable = require('stream').Readable; +const sinon = require('sinon'); +const sleep = require('sleep-promise'); +const { KubeConfig } = require('client-node-fixed-watcher'); +const { CustomResourceCache } = require('../watcher'); + +// slightly modified cache tunings not to wait too long when testing things +const IDLE_TIMEOUT_MS = 500; +const RESTART_DELAY_MS = 300; +const EVENT_TIMEOUT_MS = 200; +const EVENT_DELAY_MS = 100; +const EYE_BLINK_MS = 30; + +const fakeConfig = { + clusters: [ + { + name: 'cluster', + server: 'foo.company.com' + } + ], + contexts: [ + { + cluster: 'cluster', + user: 'user' + } + ], + users: [{ name: 'user' }] +}; // Create fake k8s object. Example of true k8s object follows: // @@ -36,490 +58,460 @@ const Readable = require('stream').Readable; // ... // } // } -function createObject (name, generation, val) { +function createApple (name, finalizers, spec) { return { - kind: 'mykind', apiVersion: 'my.group.io/v1alpha1', - metadata: { name, generation }, - spec: { val } + kind: 'apple', + metadata: { name, finalizers }, + spec }; } -// Simple filter that produces objects {name, val} from the objects -// created by the createObject() above and only objects with val > 100 -// pass through the filter. -function objectFilter (k8sObject) { - if (k8sObject.kind !== 'mykind') { - return null; - } - if (k8sObject.spec.val > 100) { - return { - name: k8sObject.metadata.name, - val: k8sObject.spec.val +// Test class +class Apple { + constructor (obj) { + this.metadata = { + name: obj.metadata.name }; - } else { - return null; - } -} - -// A stub for GET k8s API request returning a collection of k8s objects which -// were previously set by add() method. -class GetMock { - constructor (delay) { - this.delay = delay; - this.objects = {}; - this.statusCode = 200; - } - - setStatusCode (code) { - this.statusCode = code; - } - - add (obj) { - this.objects[obj.metadata.name] = obj; - } - - remove (name) { - delete this.objects[name]; - } - - reset () { - this.objects = {}; - } - - template () { - var gMock = this; - function template (name) { - return { get: async function () { return gMock.getForce(name); } }; - } - template.get = function () { return gMock.get(); }; - return template; - } - - async getForce (name) { - if (this.objects[name]) { - return { statusCode: this.statusCode, body: this.objects[name] }; - } - throw Object.assign( - new Error(`"${name}" not found`), - { code: 404 } - ); - } - - get () { - var self = this; - return new Promise((resolve, reject) => { - setTimeout(() => { - resolve({ - statusCode: 200, - body: { items: Object.values(self.objects) } - }); - }, self.delay || 0); - }); - } -} - -// A mock representing k8s watch stream. -// You can feed arbitrary objects to it and it will pass them to a consumer. -// Example of k8s watch stream event follows: -// -// { -// "type": "ADDED", -// "object": { -// ... (object as shown in GetMock example above) -// } -// } -class StreamMock extends Readable { - constructor () { - super({ autoDestroy: true, objectMode: true }); - this.feeds = []; - this.wantMore = false; - } - - _read (size) { - while (true) { - const obj = this.feeds.shift(); - if (obj === undefined) { - this.wantMore = true; - break; - } - this.push(obj); + if (obj.spec === 'invalid') { + throw new Error('Invalid object'); } - } - - feed (type, object) { - this.feeds.push({ - type, - object - }); - if (this.wantMore) { - this.wantMore = false; - this._read(); - } - } - - end () { - this.feeds.push(null); - if (this.wantMore) { - this.wantMore = false; - this._read(); - } - } - - getObjectStream () { - return this; + this.spec = obj.spec; } } -// This is for test cases where we need to test disconnected watch stream. -// In that case, the watcher will create a new instance of watch stream -// (by calling getObjectStream) and we need to keep track of latest created stream -// in order to be able to feed data to it etc. -class StreamMockTracker { - constructor () { - this.current = null; - } - - // create a new stream (mimics nodejs k8s client api) - getObjectStream () { - const s = new StreamMock(); - this.current = s; - return s; - } - - // get the most recently created underlaying stream - latest () { - return this.current; - } +// Create a cache with a listWatch object with fake start method that does +// nothing instead of connecting to k8s cluster. +function createMockedCache () { + const kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + const watcher = new CustomResourceCache('namespace', 'apple', kc, Apple, { + restartDelay: RESTART_DELAY_MS, + eventTimeout: EVENT_TIMEOUT_MS, + idleTimeout: IDLE_TIMEOUT_MS + }); + // convenience function for generating k8s watcher events + watcher.emitKubeEvent = (ev, data) => { + watcher.listWatch.callbackCache[ev].forEach((cb) => cb(data)); + }; + const startStub = sinon.stub(watcher.listWatch, 'start'); + startStub.onCall(0).resolves(); + return [watcher, startStub]; } module.exports = function () { - // Basic watcher operations grouped in describe to avoid repeating watcher - // initialization & tear down for each test case. - describe('watch events', () => { - var getMock = new GetMock(); - var streamMock = new StreamMock(); - var watcher; - var newList = []; - var modList = []; - var delList = []; - - before(() => { - watcher = new Watcher('test', getMock.template(), streamMock, objectFilter); - watcher.on('new', (obj) => newList.push(obj)); - watcher.on('mod', (obj) => modList.push(obj)); - watcher.on('del', (obj) => delList.push(obj)); - - getMock.add(createObject('valid-object', 1, 123)); - getMock.add(createObject('invalid-object', 1, 99)); - }); - - after(() => { - watcher.stop(); - streamMock.end(); + this.timeout(10000); + + it('should create a cache and block in start until connected', async () => { + const kc = new KubeConfig(); + Object.assign(kc, fakeConfig); + const watcher = new CustomResourceCache('namespace', 'apple', kc, Apple, { + restartDelay: RESTART_DELAY_MS, + eventTimeout: EVENT_TIMEOUT_MS }); + const startStub = sinon.stub(watcher.listWatch, 'start'); + startStub.onCall(0).rejects(); + startStub.onCall(1).rejects(); + startStub.onCall(2).resolves(); + const startTime = new Date(); + await watcher.start(); + const delta = new Date() - startTime; + sinon.assert.calledThrice(startStub); + expect(watcher.isConnected()).to.be.true(); + expect(delta).to.be.within(2 * RESTART_DELAY_MS, 3 * RESTART_DELAY_MS); + watcher.stop(); + }); - it('should init cache only with objects which pass through the filter', async () => { - await watcher.start(); + it('should reconnect watcher if it gets disconnected', async () => { + const [watcher, startStub] = createMockedCache(); + await watcher.start(); + sinon.assert.calledOnce(startStub); + expect(watcher.isConnected()).to.be.true(); + startStub.onCall(1).rejects(new Error('start failed')); + startStub.onCall(2).resolves(); + watcher.emitKubeEvent('error', new Error('got disconnected')); + await sleep(RESTART_DELAY_MS * 1.5); + sinon.assert.calledTwice(startStub); + expect(watcher.isConnected()).to.be.false(); + await sleep(RESTART_DELAY_MS); + sinon.assert.calledThrice(startStub); + expect(watcher.isConnected()).to.be.true(); + watcher.stop(); + }); - expect(modList).to.have.lengthOf(0); - expect(delList).to.have.lengthOf(0); - expect(newList).to.have.lengthOf(1); - expect(newList[0].name).to.equal('valid-object'); - expect(newList[0].val).to.equal(123); + it('should reset watcher if idle for too long', async () => { + const [watcher, startStub] = createMockedCache(); + await watcher.start(); + sinon.assert.calledOnce(startStub); + expect(watcher.isConnected()).to.be.true(); + startStub.onCall(1).resolves(); + await sleep(IDLE_TIMEOUT_MS * 1.5); + sinon.assert.calledTwice(startStub); + expect(watcher.isConnected()).to.be.true(); + watcher.stop(); + }); - const lst = watcher.list(); - expect(lst).to.have.lengthOf(1); - expect(lst[0]).to.have.all.keys('name', 'val'); - expect(lst[0].name).to.equal('valid-object'); - expect(lst[0].val).to.equal(123); + describe('methods', function () { + let watcher; + let timeout; - const rawObj = watcher.getRaw('valid-object'); - expect(rawObj).to.deep.equal(createObject('valid-object', 1, 123)); + beforeEach(async () => { + let startStub; + timeout = undefined; + [watcher, startStub] = createMockedCache(); + startStub.resolves(); + await watcher.start(); }); - it('should add object to the cache only if it passes through the filter', (done) => { - // invalid object should not be added - streamMock.feed('ADDED', createObject('add-invalid-object', 1, 90)); - // valid object should be added - streamMock.feed('ADDED', createObject('evented-object', 1, 155)); - - function check () { - expect(modList).to.have.lengthOf(0); - expect(delList).to.have.lengthOf(0); - expect(newList).to.have.lengthOf(2); - expect(newList[1].name).to.equal('evented-object'); - expect(newList[1].val).to.equal(155); - done(); + afterEach(() => { + if (watcher) { + watcher.stop(); + watcher = undefined; } - - // Use a trick to check 'new' event regardless if it has already arrived - // or will arrive yet. - if (newList.length > 1) { - check(); - } else { - watcher.once('new', () => process.nextTick(check)); + if (timeout) { + clearTimeout(timeout); } }); - it('should modify object in the cache if it passes through the filter', (done) => { - // new object should be added and new event emitted (not the mod event) - streamMock.feed('MODIFIED', createObject('new-object', 1, 160)); - // object with old generation number should be ignored - streamMock.feed('MODIFIED', createObject('evented-object', 1, 155)); - // object should be modified - streamMock.feed('MODIFIED', createObject('evented-object', 2, 156)); - // object should be modified (without gen number) - streamMock.feed( - 'MODIFIED', - createObject('evented-object', undefined, 157) - ); - - function check () { - expect(delList).to.have.lengthOf(0); - expect(modList).to.have.lengthOf(2); - expect(modList[0].name).to.equal('evented-object'); - expect(modList[0].val).to.equal(156); - expect(modList[1].name).to.equal('evented-object'); - expect(modList[1].val).to.equal(157); - expect(newList).to.have.lengthOf(3); - expect(newList[2].name).to.equal('new-object'); - expect(newList[2].val).to.equal(160); - done(); - } + function assertReplaceCalledWith (stub, name, obj, attrs) { + const newObj = _.cloneDeep(obj); + _.merge(newObj, attrs); + sinon.assert.calledOnce(stub); + sinon.assert.calledWith(stub, 'openebs.io', 'v1alpha1', 'namespace', + 'apples', name, newObj); + } - if (modList.length > 0) { - check(); - } else { - watcher.once('mod', () => process.nextTick(check)); - } + it('should list all objects', () => { + const listStub = sinon.stub(watcher.listWatch, 'list'); + listStub.returns([ + createApple('name1', [], 'valid'), + createApple('name2', [], 'invalid'), + createApple('name3', [], 'valid') + ]); + const objs = watcher.list(); + expect(objs).to.have.length(2); + expect(objs[0].metadata.name).to.equal('name1'); + expect(objs[1].metadata.name).to.equal('name3'); }); - it('should remove object from the cache if it exists', (done) => { - streamMock.feed('DELETED', createObject('unknown-object', 1, 160)); - streamMock.feed('DELETED', createObject('evented-object', 2, 156)); - - function check () { - expect(newList).to.have.lengthOf(3); - expect(modList).to.have.lengthOf(2); - expect(delList).to.have.lengthOf(1); - expect(delList[0].name).to.equal('evented-object'); - expect(delList[0].val).to.equal(156); - done(); - } - - if (delList.length > 0) { - check(); - } else { - watcher.once('del', () => process.nextTick(check)); - } + it('should get object by name', () => { + const getStub = sinon.stub(watcher.listWatch, 'get'); + getStub.returns(createApple('name1', [], 'valid')); + const obj = watcher.get('name1'); + expect(obj).to.be.an.instanceof(Apple); + expect(obj.metadata.name).to.equal('name1'); + sinon.assert.calledWith(getStub, 'name1'); }); - it('should not crash upon error watch event', () => { - streamMock.feed('ERROR', createObject('error-object', 1, 160)); + it('should get undefined if object does not exist', () => { + const getStub = sinon.stub(watcher.listWatch, 'get'); + getStub.returns(undefined); + const obj = watcher.get('name1'); + expect(obj).to.be.undefined(); + sinon.assert.calledWith(getStub, 'name1'); }); - it('should not crash upon unknown watch event', () => { - streamMock.feed('UNKNOWN', createObject('some-object', 1, 160)); + it('should create an object and wait for new event', async () => { + const createStub = sinon.stub(watcher.k8sApi, 'createNamespacedCustomObject'); + createStub.resolves(); + const apple = createApple('name1', [], 'valid'); + const startTime = new Date(); + timeout = setTimeout(() => watcher.emitKubeEvent('add', apple), EVENT_DELAY_MS); + await watcher.create(apple); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + sinon.assert.calledOnce(createStub); }); - it('should bypass the watcher when using getRawBypass', async () => { - await watcher.start(); - - getMock.add(createObject('new-object', 1, 123)); - - var obj = watcher.getRaw('new-object'); - expect(obj).is.null(); - - obj = await watcher.getRawBypass('new-object'); - expect(obj).is.not.null(); - - // getRawBypass also adds the newly retrieved object to the watcher cache so we should now see it - obj = watcher.getRaw('new-object'); - expect(obj).is.not.null(); + it('should timeout when "add" event does not come after a create', async () => { + const createStub = sinon.stub(watcher.k8sApi, 'createNamespacedCustomObject'); + createStub.resolves(); + const apple = createApple('name1', [], 'valid'); + const startTime = new Date(); + await watcher.create(apple); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_TIMEOUT_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); + sinon.assert.calledOnce(createStub); }); - it('should fail gracefully when using getRawBypass', async () => { - await watcher.start(); - - var obj = await watcher.getRawBypass('new-object-2'); - expect(obj).is.null(); + it('should update object and wait for mod event', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + const newApple = createApple('name1', [], 'also valid'); + getStub.returns(apple); + const startTime = new Date(); + timeout = setTimeout(() => watcher.emitKubeEvent('update', newApple), EVENT_DELAY_MS); + await watcher.update('name1', (orig) => { + return createApple(orig.metadata.name, [], 'also valid'); + }); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + assertReplaceCalledWith(replaceStub, 'name1', apple, { + spec: 'also valid' + }); + }); - getMock.add(createObject('new-object-2', 1, 123)); + it('should not try to update object if it does not exist', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + getStub.returns(); + await watcher.update('name1', (orig) => { + return createApple(orig.metadata.name, [], 'also valid'); + }); + sinon.assert.notCalled(replaceStub); + }); - getMock.setStatusCode(408); + it('should timeout when "update" event does not come after an update', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + await watcher.update('name1', (orig) => { + return createApple(orig.metadata.name, [], 'also valid'); + }); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_TIMEOUT_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); + sinon.assert.calledOnce(replaceStub); + }); - obj = await watcher.getRawBypass('new-object-2'); - expect(obj).is.null(); + it('should retry update of an object if it fails', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.onCall(0).rejects(new Error('update failed')); + replaceStub.onCall(1).resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + await watcher.update('name1', (orig) => { + return createApple(orig.metadata.name, [], 'also valid'); + }); + sinon.assert.calledTwice(replaceStub); + }); - obj = watcher.getRaw('new-object-2'); - expect(obj).is.null(); + it('should update status of object', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + await watcher.updateStatus('name1', (orig) => { + return _.assign({}, apple, { + status: 'some-state' + }); + }); + assertReplaceCalledWith(replaceStub, 'name1', apple, { + status: 'some-state' + }); + }); - getMock.setStatusCode(200); + it('should not try to update status of object if it does not exist', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(); + await watcher.updateStatus('name1', (orig) => { + return _.assign({}, apple, { + status: 'some-state' + }); + }); + sinon.assert.notCalled(replaceStub); + }); - obj = await watcher.getRawBypass('new-object-2'); - expect(obj).is.not.null(); + it('should timeout when "update" event does not come after status update', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + await watcher.updateStatus('name1', (orig) => { + return _.assign({}, apple, { + status: 'some-state' + }); + }); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_TIMEOUT_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); + sinon.assert.calledOnce(replaceStub); }); - }); - it('should defer event processing when sync is in progress', async () => { - var getMock = new GetMock(); - var streamMock = new StreamMock(); - var watcher = new Watcher('test', getMock, streamMock, objectFilter); - var newCount = 0; - var modCount = 0; - - // Use trick of queueing event with newer generation # for an object which - // is returned by GET. If event processing is done after GET, then we will - // see one new and one mod event. If not then we will see only one new - // event. - getMock.add(createObject('object', 1, 155)); - streamMock.feed('MODIFIED', createObject('object', 2, 156)); - watcher.on('new', () => newCount++); - watcher.on('mod', () => modCount++); + it('should retry status update of an object if it fails', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); + replaceStub.onCall(0).rejects(new Error('update failed')); + replaceStub.onCall(1).resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + await watcher.updateStatus('name1', (orig) => { + return _.assign({}, apple, { + status: 'some-state' + }); + }); + sinon.assert.calledTwice(replaceStub); + }); - await watcher.start(); + it('should fail if status update fails twice', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObjectStatus'); + replaceStub.onCall(0).rejects(new Error('update failed first time')); + replaceStub.onCall(1).rejects(new Error('update failed second time')); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + let error; + try { + await watcher.updateStatus('name1', (orig) => { + return _.assign({}, apple, { + status: 'some-state' + }); + }); + } catch (err) { + error = err; + } + expect(error.message).to.equal('Status update of apple "name1" failed: update failed second time'); + sinon.assert.calledTwice(replaceStub); + }); - expect(newCount).to.equal(1); - expect(modCount).to.equal(1); + it('should delete the object and wait for "delete" event', async () => { + const deleteStub = sinon.stub(watcher.k8sApi, 'deleteNamespacedCustomObject'); + deleteStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + timeout = setTimeout(() => watcher.emitKubeEvent('delete', apple), EVENT_DELAY_MS); + await watcher.delete('name1'); + const delta = new Date() - startTime; + sinon.assert.calledOnce(deleteStub); + sinon.assert.calledWith(deleteStub, 'openebs.io', 'v1alpha1', 'namespace', + 'apples', 'name1'); + expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + }); - watcher.stop(); - streamMock.end(); - }); + it('should timeout when "delete" event does not come after a delete', async () => { + const deleteStub = sinon.stub(watcher.k8sApi, 'deleteNamespacedCustomObject'); + deleteStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + await watcher.delete('name1'); + const delta = new Date() - startTime; + sinon.assert.calledOnce(deleteStub); + expect(delta).to.be.within(EVENT_TIMEOUT_MS, EVENT_TIMEOUT_MS + EYE_BLINK_MS); + }); - it('should merge old and new objects upon resync', (done) => { - var getMock = new GetMock(); - var streamMockTracker = new StreamMockTracker(); - var watcher = new Watcher('test', getMock, streamMockTracker, objectFilter); - var newObjs = []; - var modObjs = []; - var delObjs = []; - - getMock.add(createObject('object-to-be-retained', 1, 155)); - getMock.add(createObject('object-to-be-modified', 1, 155)); - getMock.add(createObject('object-to-be-deleted', 1, 155)); - - watcher.on('new', (obj) => newObjs.push(obj)); - watcher.on('mod', (obj) => modObjs.push(obj)); - watcher.on('del', (obj) => delObjs.push(obj)); - - watcher.start().then(() => { - expect(newObjs).to.have.lengthOf(3); - expect(modObjs).to.have.lengthOf(0); - expect(delObjs).to.have.lengthOf(0); - - streamMockTracker - .latest() - .feed('MODIFIED', createObject('object-to-be-retained', 2, 156)); - getMock.reset(); - getMock.add(createObject('object-to-be-retained', 2, 156)); - getMock.add(createObject('object-to-be-modified', 2, 156)); - getMock.add(createObject('object-to-be-created', 1, 156)); - - streamMockTracker.latest().end(); - - watcher.once('sync', () => { - expect(newObjs).to.have.lengthOf(4); - expect(modObjs).to.have.lengthOf(2); - expect(delObjs).to.have.lengthOf(1); - expect(newObjs[3].name).to.equal('object-to-be-created'); - expect(modObjs[0].name).to.equal('object-to-be-retained'); - expect(modObjs[1].name).to.equal('object-to-be-modified'); - expect(delObjs[0].name).to.equal('object-to-be-deleted'); + it('should not try to delete object that does not exist', async () => { + const deleteStub = sinon.stub(watcher.k8sApi, 'deleteNamespacedCustomObject'); + deleteStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(); + timeout = setTimeout(() => watcher.emitKubeEvent('delete', apple), EVENT_DELAY_MS); + await watcher.delete('name1'); + sinon.assert.notCalled(deleteStub); + }); - watcher.stop(); - streamMockTracker.latest().end(); - done(); + it('should add finalizer to object without any', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.addFinalizer('name1', 'test.finalizer.com'); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + assertReplaceCalledWith(replaceStub, 'name1', apple, { + metadata: { + finalizers: ['test.finalizer.com'] + } }); }); - }); - it('should recover when watch fails during the sync', async () => { - class BrokenStreamMock { - constructor () { - this.iter = 0; - this.current = null; - } - - // We will fail (end) the stream 3x and 4th attempt will succeed - getObjectStream () { - const s = new StreamMock(); - this.current = s; - if (this.iter < 3) { - s.end(); + it('should add another finalizer to object', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', ['test.finalizer.com', 'test2.finalizer.com'], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.addFinalizer('name1', 'new.finalizer.com'); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + assertReplaceCalledWith(replaceStub, 'name1', apple, { + metadata: { + finalizers: ['new.finalizer.com', 'test.finalizer.com', 'test2.finalizer.com'] } - this.iter++; - return s; - } - - // get the most recently created underlaying stream - latest () { - return this.current; - } - } - - var getMock = new GetMock(100); - var brokenStreamMock = new BrokenStreamMock(); - var watcher = new Watcher('test', getMock, brokenStreamMock, objectFilter); - - var start = Date.now(); - await watcher.start(); - var diff = (Date.now() - start) / 1000; + }); + }); - // three retries will accumulate 7 seconds (1, 2 and 4s) - expect(diff).to.be.at.least(6); - expect(diff).to.be.at.most(8); - watcher.stop(); - brokenStreamMock.latest().end(); - }).timeout(10000); - - it('should recover when GET fails during the sync', async () => { - class BrokenGetMock { - constructor (stream) { - this.stream = stream; - this.iter = 0; - } + it('should not add twice the same finalizer', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', ['test.finalizer.com', 'test2.finalizer.com'], 'valid'); + getStub.returns(apple); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.addFinalizer('name1', 'test.finalizer.com'); + sinon.assert.notCalled(replaceStub); + }); - get () { - var self = this; - return new Promise((resolve, reject) => { - setTimeout(() => { - if (self.iter++ < 3) { - const err = new Error('Not found'); - err.statusCode = 404; - err.body = {}; - reject(err); - // TODO: defect in current implementation of watcher is that - // it waits for end of watch connection even when GET fails - self.stream.latest().end(); - } else { - resolve({ - statusCode: 200, - body: { items: [] } - }); - } - }, 0); - }); - } - } + it('should not add the finalizer if object does not exist', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', [], 'valid'); + getStub.returns(); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.addFinalizer('name1', 'test.finalizer.com'); + sinon.assert.notCalled(replaceStub); + }); - var streamMockTracker = new StreamMockTracker(); - var brokenGetMock = new BrokenGetMock(streamMockTracker); - var watcher = new Watcher( - 'test', - brokenGetMock, - streamMockTracker, - objectFilter - ); + it('should remove finalizer from object', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', ['test.finalizer.com', 'test2.finalizer.com'], 'valid'); + getStub.returns(apple); + const startTime = new Date(); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.removeFinalizer('name1', 'test.finalizer.com'); + const delta = new Date() - startTime; + expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + sinon.assert.calledOnce(replaceStub); + assertReplaceCalledWith(replaceStub, 'name1', apple, { + metadata: { + finalizers: ['test2.finalizer.com'] + } + }); + }); - var start = Date.now(); - await watcher.start(); - var diff = (Date.now() - start) / 1000; + it('should not try to remove finalizer that does not exist', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', ['test2.finalizer.com'], 'valid'); + getStub.returns(apple); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.removeFinalizer('name1', 'test.finalizer.com'); + sinon.assert.notCalled(replaceStub); + }); - // three retries will accumulate 7 seconds (1, 2 and 4s) - expect(diff).to.be.at.least(6); - expect(diff).to.be.at.most(8); - watcher.stop(); - streamMockTracker.latest().end(); - }).timeout(10000); + it('should not try to remove finalizer if object does not exist', async () => { + const replaceStub = sinon.stub(watcher.k8sApi, 'replaceNamespacedCustomObject'); + replaceStub.resolves(); + const getStub = sinon.stub(watcher.listWatch, 'get'); + const apple = createApple('name1', ['test.finalizer.com'], 'valid'); + getStub.returns(); + timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); + await watcher.removeFinalizer('name1', 'test.finalizer.com'); + sinon.assert.notCalled(replaceStub); + }); + }); }; diff --git a/csi/moac/tsconfig.json b/csi/moac/tsconfig.json index d90c310b0..4e5658426 100644 --- a/csi/moac/tsconfig.json +++ b/csi/moac/tsconfig.json @@ -11,7 +11,7 @@ // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */ // "declaration": true, /* Generates corresponding '.d.ts' file. */ // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */ - // "sourceMap": true, /* Generates corresponding '.map' file. */ + "sourceMap": true, /* Generates corresponding '.map' file. */ // "outFile": "./", /* Concatenate and emit output to single file. */ // "outDir": "./", /* Redirect output structure to the directory. */ // "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */ @@ -61,9 +61,12 @@ "resolveJsonModule": true /* allows for importing, extracting types from and generating .json files */ }, "files": [ + "watcher.ts", + "nexus.ts", + "node_operator.ts", "replica.ts", "pool.ts", - "nexus.ts", - "finalizer_helper.ts" + "pool_operator.ts", + "volume_operator.ts", ] } diff --git a/csi/moac/volume_operator.js b/csi/moac/volume_operator.js deleted file mode 100644 index 8810ebd05..000000000 --- a/csi/moac/volume_operator.js +++ /dev/null @@ -1,465 +0,0 @@ -// Volume operator managing volume k8s custom resources. -// -// Primary motivation for the resource is to provide information about -// existing volumes. Other actions and their consequences follow: -// -// * destroying the resource implies volume destruction (not advisable) -// * creating the resource implies volume creation (not advisable) -// * modification of "preferred nodes" property influences scheduling of new replicas -// * modification of "required nodes" property moves the volume to different nodes -// * modification of replica count property changes redundancy of the volume -// -// Volume operator stands between k8s custom resource (CR) describing desired -// state and volume manager reflecting the actual state. It gets new/mod/del -// events from both, from the world of ideas and from the world of material -// things. It's task which is not easy, is to restore harmony between them: -// -// +---------+ new/mod/del +----------+ new/mod/del +-----------+ -// | Volumes +--------------> Operator <---------------+ Watcher | -// +------^--+ ++--------++ +---^-------+ -// | | | | -// | | | | -// +------------------+ +--------------------+ -// create/modify/destroy create/modify/destroy -// -// -// real object event | CR exists | CR does not exist -// ------------------------------------------------------------ -// new | -- | create CR -// mod | modify CR | -- -// del | delete CR | -- -// -// -// CR event | volume exists | volume does not exist -// --------------------------------------------------------------- -// new | modify volume | create volume -// mod | modify volume | -- -// del | delete volume | -- -// - -'use strict'; - -const _ = require('lodash'); -const path = require('path'); -const assert = require('assert'); -const fs = require('fs'); -const yaml = require('js-yaml'); -const EventStream = require('./event_stream'); -const log = require('./logger').Logger('volume-operator'); -const Watcher = require('./watcher'); -const Workq = require('./workq'); - -const crdVolume = yaml.safeLoad( - fs.readFileSync(path.join(__dirname, '/crds/mayastorvolume.yaml'), 'utf8') -); -// lower-case letters uuid pattern -const uuidRegexp = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}$/; - -// Volume operator managing volume k8s custom resources. -class VolumeOperator { - constructor (namespace) { - this.namespace = namespace; - this.k8sClient = null; // k8s client - this.volumes = null; // Volume manager - this.eventStream = null; // A stream of node, replica and nexus events. - this.watcher = null; // volume resource watcher. - this.createdBySelf = []; // UUIDs of volumes created by the operator itself - // Events from k8s are serialized so that we don't flood moac by - // concurrent changes to volumes. - this.workq = new Workq(); - } - - // Create volume CRD if it doesn't exist and augment client object so that CRD - // can be manipulated as any other standard k8s api object. - // - // @param {object} k8sClient Client for k8s api server. - // @param {object} volumes Volume manager. - // - async init (k8sClient, volumes) { - log.info('Initializing volume operator'); - - try { - await k8sClient.apis[ - 'apiextensions.k8s.io' - ].v1beta1.customresourcedefinitions.post({ body: crdVolume }); - log.info('Created CRD ' + crdVolume.spec.names.kind); - } catch (err) { - // API returns a 409 Conflict if CRD already exists. - if (err.statusCode !== 409) throw err; - } - k8sClient.addCustomResourceDefinition(crdVolume); - - this.k8sClient = k8sClient; - this.volumes = volumes; - - // Initialize watcher with all callbacks for new/mod/del events - this.watcher = new Watcher( - 'volume', - this.k8sClient.apis['openebs.io'].v1alpha1.namespaces( - this.namespace - ).mayastorvolumes, - this.k8sClient.apis['openebs.io'].v1alpha1.watch.namespaces( - this.namespace - ).mayastorvolumes, - this._filterMayastorVolume - ); - } - - // Normalize k8s mayastor volume resource. - // - // @param {object} msv MayaStor volume custom resource. - // @returns {object} Properties defining a volume. - // - _filterMayastorVolume (msv) { - // We should probably validate the whole record using json scheme or - // something like that, but for now do just the basic check. - if (!msv.metadata.name.match(uuidRegexp)) { - log.warn( - `Ignoring mayastor volume resource with invalid UUID: ${msv.metadata.name}` - ); - return null; - } - if (!msv.spec.requiredBytes) { - log.warn('Ignoring mayastor volume resource without requiredBytes'); - return null; - } - const props = { - // spec part - metadata: { name: msv.metadata.name }, - spec: { - replicaCount: msv.spec.replicaCount || 1, - preferredNodes: [].concat(msv.spec.preferredNodes || []).sort(), - requiredNodes: [].concat(msv.spec.requiredNodes || []).sort(), - requiredBytes: msv.spec.requiredBytes, - limitBytes: msv.spec.limitBytes || 0, - protocol: msv.spec.protocol - } - }; - // volatile part - const st = msv.status; - if (st) { - props.status = { - size: st.size, - state: st.state, - node: st.node, - // sort the replicas according to uri to have deterministic order - replicas: [].concat(st.replicas || []).sort((a, b) => { - if (a.uri < b.uri) return -1; - else if (a.uri > b.uri) return 1; - else return 0; - }) - }; - if (st.nexus) { - props.status.nexus = st.nexus; - } - } - - return props; - } - - // Start volume operator's watcher loop. - // - // NOTE: Not getting the start sequence right can have catastrophic - // consequence leading to unintended volume destruction and data loss. - // Therefore it's important not to call this function before volume - // manager and registry have been started up. - // - async start () { - var self = this; - - // install event handlers to follow changes to resources. - self._bindWatcher(self.watcher); - await self.watcher.start(); - - // This will start async processing of volume events. - self.eventStream = new EventStream({ volumes: self.volumes }); - self.eventStream.on('data', async (ev) => { - // the only kind of event that comes from the volumes source - assert(ev.kind === 'volume'); - - self.workq.push(ev, self._onVolumeEvent.bind(self)); - }); - } - - async _onVolumeEvent (ev) { - var self = this; - const uuid = ev.object.uuid; - - if (ev.eventType === 'new' || ev.eventType === 'mod') { - const k8sVolume = await self.watcher.getRawBypass(uuid); - const spec = self._volumeToSpec(ev.object); - const status = self._volumeToStatus(ev.object); - - if (k8sVolume) { - try { - await self._updateResource(uuid, k8sVolume, spec); - } catch (err) { - log.error(`Failed to update volume resource "${uuid}": ${err}`); - return; - } - } else if (ev.eventType === 'new') { - try { - await self._createResource(uuid, spec); - } catch (err) { - log.error(`Failed to create volume resource "${uuid}": ${err}`); - return; - } - // Note down that the volume existed so we don't try to create it - // again when handling watcher new event. - self.createdBySelf.push(uuid); - } - await this._updateStatus(uuid, status); - } else if (ev.eventType === 'del') { - await self._deleteResource(uuid); - } else { - assert(false); - } - } - - // Transform volume to spec properties used in k8s volume resource. - // - // @param {object} volume Volume object. - // @returns {object} Spec properties. - // - _volumeToSpec (volume) { - return { - replicaCount: volume.replicaCount, - preferredNodes: _.clone(volume.preferredNodes), - requiredNodes: _.clone(volume.requiredNodes), - requiredBytes: volume.requiredBytes, - limitBytes: volume.limitBytes, - protocol: volume.protocol - }; - } - - // Transform volume to status properties used in k8s volume resource. - // - // @param {object} volume Volume object. - // @returns {object} Status properties. - // - _volumeToStatus (volume) { - const st = { - size: volume.getSize(), - state: volume.state, - reason: '', - node: volume.getNodeName(), - replicas: Object.values(volume.replicas).map((r) => { - return { - node: r.pool.node.name, - pool: r.pool.name, - uri: r.uri, - offline: r.isOffline() - }; - }) - }; - if (volume.nexus) { - st.nexus = { - deviceUri: volume.nexus.deviceUri || '', - state: volume.nexus.state, - children: volume.nexus.children.map((ch) => { - return { - uri: ch.uri, - state: ch.state - }; - }) - }; - } - return st; - } - - // Create k8s CRD object. - // - // @param {string} uuid ID of the created volume. - // @param {object} spec New volume spec. - // - async _createResource (uuid, spec) { - log.info(`Creating volume resource "${uuid}"`); - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastorvolumes.post({ - body: { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorVolume', - metadata: { - name: uuid, - namespace: this.namespace - }, - spec - } - }); - } - - // Update properties of k8s CRD object or create it if it does not exist. - // - // @param {string} uuid ID of the updated volume. - // @param {object} k8sVolume Existing k8s resource object. - // @param {object} spec New volume spec. - // - async _updateResource (uuid, k8sVolume, spec) { - // Update object only if it has really changed - if (!_.isEqual(k8sVolume.spec, spec)) { - log.info(`Updating spec of volume resource "${uuid}"`); - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastorvolumes(uuid) - .put({ - body: { - apiVersion: 'openebs.io/v1alpha1', - kind: 'MayastorVolume', - metadata: k8sVolume.metadata, - spec: _.assign(k8sVolume.spec, spec) - } - }); - } - } - - // Update state and reason of the resource. - // - // NOTE: This method does not throw if the operation fails as there is nothing - // we can do if it fails. Though we log an error message in such a case. - // - // @param {string} uuid UUID of the resource. - // @param {object} status Status properties. - // - async _updateStatus (uuid, status) { - var k8sVolume = await this.watcher.getRawBypass(uuid); - if (!k8sVolume) { - log.warn( - `Wanted to update state of volume resource "${uuid}" that disappeared` - ); - return; - } - if (!k8sVolume.status) { - k8sVolume.status = {}; - } - if (_.isEqual(k8sVolume.status, status)) { - // avoid unnecessary status updates - return; - } - log.debug(`Updating status of volume resource "${uuid}"`); - _.assign(k8sVolume.status, status); - try { - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastorvolumes(uuid) - .status.put({ body: k8sVolume }); - } catch (err) { - log.error(`Failed to update status of volume resource "${uuid}": ${err}`); - } - } - - // Delete volume resource with specified uuid. - // - // @param {string} uuid UUID of the volume resource to delete. - // - async _deleteResource (uuid) { - var k8sVolume = await this.watcher.getRawBypass(uuid); - if (k8sVolume) { - log.info(`Deleting volume resource "${uuid}"`); - try { - await this.k8sClient.apis['openebs.io'].v1alpha1 - .namespaces(this.namespace) - .mayastorvolumes(uuid) - .delete(); - } catch (err) { - log.error(`Failed to delete volume resource "${uuid}": ${err}`); - } - } - } - - // Stop listening for watcher and node events and reset the cache - async stop () { - this.watcher.removeAllListeners(); - await this.watcher.stop(); - this.eventStream.destroy(); - this.eventStream = null; - } - - // Bind watcher's new/mod/del events to volume operator's callbacks. - // - // @param {object} watcher k8s volume resource watcher. - // - _bindWatcher (watcher) { - var self = this; - watcher.on('new', (obj) => { - self.workq.push(obj, self._importVolume.bind(self)); - }); - watcher.on('mod', (obj) => { - self.workq.push(obj, self._modifyVolume.bind(self)); - }); - watcher.on('del', (obj) => { - self.workq.push(obj.metadata.name, self._destroyVolume.bind(self)); - }); - } - - // When moac restarts the volume manager does not know which volumes exist. - // We need to import volumes based on the k8s resources. - // - // @param {object} resource Volume resource properties. - // - async _importVolume (resource) { - const uuid = resource.metadata.name; - const createdIdx = this.createdBySelf.indexOf(uuid); - if (createdIdx >= 0) { - // don't react to self - this.createdBySelf.splice(createdIdx, 1); - return; - } - - log.debug(`Importing volume "${uuid}" in response to "new" resource event`); - try { - await this.volumes.importVolume(uuid, resource.spec, resource.status); - } catch (err) { - log.error( - `Failed to import volume "${uuid}" based on new resource: ${err}` - ); - await this._updateStatus(uuid, { - state: 'pending', - reason: err.toString() - }); - } - } - - // Modify volume according to the specification. - // - // @param {object} resource Volume resource properties. - // - async _modifyVolume (resource) { - const uuid = resource.metadata.name; - const volume = this.volumes.get(uuid); - - if (!volume) { - log.warn( - `Volume resource "${uuid}" was modified but the volume does not exist` - ); - return; - } - try { - if (volume.update(resource.spec)) { - log.debug( - `Updating volume "${uuid}" in response to "mod" resource event` - ); - volume.fsa(); - } - } catch (err) { - log.error(`Failed to update volume "${uuid}" based on resource: ${err}`); - } - } - - // Remove the volume from internal state and if it exists destroy it. - // - // @param {string} uuid ID of the volume to destroy. - // - async _destroyVolume (uuid) { - log.debug( - `Destroying volume "${uuid}" in response to "del" resource event` - ); - try { - await this.volumes.destroyVolume(uuid); - } catch (err) { - log.error(`Failed to destroy volume "${uuid}": ${err}`); - } - } -} - -module.exports = VolumeOperator; diff --git a/csi/moac/volume_operator.ts b/csi/moac/volume_operator.ts new file mode 100644 index 000000000..7d9fb6cf3 --- /dev/null +++ b/csi/moac/volume_operator.ts @@ -0,0 +1,544 @@ +// Volume operator managing volume k8s custom resources. +// +// Primary motivation for the resource is to provide information about +// existing volumes. Other actions and their consequences follow: +// +// * destroying the resource implies volume destruction (not advisable) +// * creating the resource implies volume import (not advisable) +// * modification of "preferred nodes" property influences scheduling of new replicas +// * modification of "required nodes" property moves the volume to different nodes +// * modification of replica count property changes redundancy of the volume +// +// Volume operator stands between k8s custom resource (CR) describing desired +// state and volume manager reflecting the actual state. It gets new/mod/del +// events from both, from the world of ideas and from the world of material +// things. It's task which is not easy, is to restore harmony between them: +// +// +---------+ new/mod/del +----------+ new/mod/del +-----------+ +// | Volumes +--------------> Operator <---------------+ Watcher | +// +------^--+ ++--------++ +---^-------+ +// | | | | +// | | | | +// +------------------+ +--------------------+ +// create/modify/destroy create/modify/destroy +// +// +// real object event | CR exists | CR does not exist +// ------------------------------------------------------------ +// new | -- | create CR +// mod | modify CR | -- +// del | delete CR | -- +// +// +// CR event | volume exists | volume does not exist +// --------------------------------------------------------------- +// new | modify volume | create volume +// mod | modify volume | -- +// del | delete volume | -- +// + +const yaml = require('js-yaml'); +const EventStream = require('./event_stream'); +const log = require('./logger').Logger('volume-operator'); +const Workq = require('./workq'); + +import assert from 'assert'; +import * as fs from 'fs'; +import * as _ from 'lodash'; +import * as path from 'path'; +import { + ApiextensionsV1Api, + KubeConfig, +} from 'client-node-fixed-watcher'; +import { + CustomResource, + CustomResourceCache, + CustomResourceMeta, +} from './watcher'; + +const RESOURCE_NAME: string = 'mayastorvolume'; +const crdVolume = yaml.safeLoad( + fs.readFileSync(path.join(__dirname, '/crds/mayastorvolume.yaml'), 'utf8') +); +// lower-case letters uuid pattern +const uuidRegexp = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}$/; + +// Protocol used to export nexus (volume) +enum Protocol { + Unknown = 'unknown', + Nbd = 'nbd', + Iscsi = 'iscsi', + Nvmf = 'nvmf', +} + +function protocolFromString(val: string): Protocol { + if (val == Protocol.Nbd) { + return Protocol.Nbd; + } else if (val == Protocol.Iscsi) { + return Protocol.Iscsi; + } else if (val == Protocol.Nvmf) { + return Protocol.Nvmf; + } else { + return Protocol.Unknown; + } +} + +// State of the volume +enum State { + Unknown = 'unknown', + Healthy = 'healthy', + Degraded = 'degraded', + Faulted = 'faulted', + Pending = 'pending', + Offline = 'offline', + Error = 'error', +} + +function stateFromString(val: string): State { + if (val == State.Healthy) { + return State.Healthy; + } else if (val == State.Degraded) { + return State.Degraded; + } else if (val == State.Faulted) { + return State.Faulted; + } else if (val == State.Pending) { + return State.Pending; + } else if (val == State.Offline) { + return State.Offline; + } else if (val == State.Error) { + return State.Error; + } else { + return State.Unknown; + } +} + +// Spec part in volume resource +type VolumeSpec = { + replicaCount: number, + preferredNodes: string[], + requiredNodes: string[], + requiredBytes: number, + limitBytes: number, + protocol: Protocol, +}; + +// Optional status part in volume resource +type VolumeStatus = { + size: number, + state: State, + reason?: string, + node: string, + replicas: { + node: string, + pool: string, + uri: string, + offline: boolean, + }[], + nexus?: { + deviceUri?: string, + state: string, + children: { + uri: string, + state: string, + }[] + } +}; + +// Object defines properties of node resource. +export class VolumeResource extends CustomResource { + apiVersion?: string; + kind?: string; + metadata: CustomResourceMeta; + spec: VolumeSpec; + status?: VolumeStatus; + + constructor(cr: CustomResource) { + super(); + this.apiVersion = cr.apiVersion; + this.kind = cr.kind; + if (cr.metadata?.name === undefined) { + throw new Error('Missing name attribute'); + } + this.metadata = cr.metadata; + if (!cr.metadata.name.match(uuidRegexp)) { + throw new Error(`Invalid UUID`); + } + let spec = cr.spec as any; + if (spec === undefined) { + throw new Error('Missing spec section'); + } + if (!spec.requiredBytes) { + throw new Error('Missing requiredBytes'); + } + this.spec = { + replicaCount: spec.replicaCount || 1, + preferredNodes: [].concat(spec.preferredNodes || []).sort(), + requiredNodes: [].concat(spec.requiredNodes || []).sort(), + requiredBytes: spec.requiredBytes, + limitBytes: spec.limitBytes || 0, + protocol: protocolFromString(spec.protocol) + }; + let status = cr.status as any; + if (status !== undefined) { + this.status = { + size: status.size || 0, + state: stateFromString(status.state), + node: status.node, + // sort the replicas according to uri to have deterministic order + replicas: [].concat(status.replicas || []).sort((a: any, b: any) => { + if (a.uri < b.uri) return -1; + else if (a.uri > b.uri) return 1; + else return 0; + }), + } + if (status.nexus) { + this.status.nexus = status.nexus; + } + } + } + + getUuid(): string { + let uuid = this.metadata.name; + if (uuid === undefined) { + throw new Error('Volume resource without UUID'); + } else { + return uuid; + } + } +} + +// Volume operator managing volume k8s custom resources. +export class VolumeOperator { + namespace: string; + volumes: any; // Volume manager + eventStream: any; // A stream of node, replica and nexus events. + watcher: CustomResourceCache; // volume resource watcher. + workq: any; // Events from k8s are serialized so that we don't flood moac by + // concurrent changes to volumes. + + // Create volume operator object. + // + // @param namespace Namespace the operator should operate on. + // @param kubeConfig KubeConfig. + // @param volumes Volume manager. + // @param [idleTimeout] Timeout for restarting watcher connection when idle. + constructor ( + namespace: string, + kubeConfig: KubeConfig, + volumes: any, + idleTimeout: number | undefined, + ) { + this.namespace = namespace; + this.volumes = volumes; + this.eventStream = null; + this.workq = new Workq(); + this.watcher = new CustomResourceCache( + this.namespace, + RESOURCE_NAME, + kubeConfig, + VolumeResource, + { idleTimeout } + ); + } + + // Create volume CRD if it doesn't exist. + // + // @param kubeConfig KubeConfig. + async init (kubeConfig: KubeConfig) { + log.info('Initializing volume operator'); + let k8sExtApi = kubeConfig.makeApiClient(ApiextensionsV1Api); + try { + await k8sExtApi.createCustomResourceDefinition(crdVolume); + log.info(`Created CRD ${RESOURCE_NAME}`); + } catch (err) { + // API returns a 409 Conflict if CRD already exists. + if (err.statusCode !== 409) throw err; + } + } + + // Start volume operator's watcher loop. + // + // NOTE: Not getting the start sequence right can have catastrophic + // consequence leading to unintended volume destruction and data loss. + // + async start () { + var self = this; + + // install event handlers to follow changes to resources. + this._bindWatcher(this.watcher); + await this.watcher.start(); + + // This will start async processing of volume events. + this.eventStream = new EventStream({ volumes: this.volumes }); + this.eventStream.on('data', async (ev: any) => { + // the only kind of event that comes from the volumes source + assert(ev.kind === 'volume'); + self.workq.push(ev, self._onVolumeEvent.bind(self)); + }); + } + + async _onVolumeEvent (ev: any) { + const uuid = ev.object.uuid; + + if (ev.eventType === 'new' || ev.eventType === 'mod') { + const origObj = this.watcher.get(uuid); + const spec = { + replicaCount: ev.object.replicaCount, + preferredNodes: _.clone(ev.object.preferredNodes), + requiredNodes: _.clone(ev.object.requiredNodes), + requiredBytes: ev.object.requiredBytes, + limitBytes: ev.object.limitBytes, + protocol: protocolFromString(ev.object.protocol) + }; + const status = this._volumeToStatus(ev.object); + + if (origObj !== undefined) { + await this._updateSpec(uuid, origObj, spec); + } else if (ev.eventType === 'new') { + try { + await this._createResource(uuid, spec); + } catch (err) { + log.error(`Failed to create volume resource "${uuid}": ${err}`); + return; + } + } + await this._updateStatus(uuid, status); + } else if (ev.eventType === 'del') { + await this._deleteResource(uuid); + } else { + assert(false); + } + } + + // Transform volume to status properties used in k8s volume resource. + // + // @param volume Volume object. + // @returns Status properties. + // + _volumeToStatus (volume: any): VolumeStatus { + const st: VolumeStatus = { + size: volume.getSize(), + state: stateFromString(volume.state), + node: volume.getNodeName(), + replicas: Object.values(volume.replicas).map((r: any) => { + return { + node: r.pool.node.name, + pool: r.pool.name, + uri: r.uri, + offline: r.isOffline() + }; + }) + }; + if (volume.nexus) { + st.nexus = { + deviceUri: volume.nexus.deviceUri || '', + state: volume.nexus.state, + children: volume.nexus.children.map((ch: any) => { + return { + uri: ch.uri, + state: ch.state + }; + }) + }; + } + return st; + } + + // Create k8s CRD object. + // + // @param uuid ID of the created volume. + // @param spec New volume spec. + // + async _createResource (uuid: string, spec: VolumeSpec) { + await this.watcher.create({ + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorVolume', + metadata: { + name: uuid, + namespace: this.namespace + }, + spec + }); + } + + // Update properties of k8s CRD object or create it if it does not exist. + // + // @param uuid ID of the updated volume. + // @param origObj Existing k8s resource object. + // @param spec New volume spec. + // + async _updateSpec (uuid: string, origObj: VolumeResource, spec: VolumeSpec) { + try { + await this.watcher.update(uuid, (orig: VolumeResource) => { + // Update object only if it has really changed + if (_.isEqual(origObj.spec, spec)) { + return; + } + log.info(`Updating spec of volume resource "${uuid}"`); + return { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorVolume', + metadata: orig.metadata, + spec, + }; + }); + } catch (err) { + log.error(`Failed to update volume resource "${uuid}": ${err}`); + return; + } + } + + // Update status of the volume based on real data obtained from storage node. + // + // @param uuid UUID of the resource. + // @param status Status properties. + // + async _updateStatus (uuid: string, status: VolumeStatus) { + try { + await this.watcher.updateStatus(uuid, (orig: VolumeResource) => { + if (_.isEqual(orig.status, status)) { + // avoid unnecessary status updates + return; + } + log.debug(`Updating status of volume resource "${uuid}"`); + // merge old and new properties + return { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorNode', + metadata: orig.metadata, + spec: orig.spec, + status, + }; + }); + } catch (err) { + log.error(`Failed to update status of volume resource "${uuid}": ${err}`); + } + } + + // Set state and reason not touching the other status fields. + async _updateState (uuid: string, state: State, reason: string) { + try { + await this.watcher.updateStatus(uuid, (orig: VolumeResource) => { + if (orig.status?.state === state && orig.status?.reason === reason) { + // avoid unnecessary status updates + return; + } + log.debug(`Updating state of volume resource "${uuid}"`); + // merge old and new properties + let newStatus = _.assign({}, orig.status, { state, reason }); + return { + apiVersion: 'openebs.io/v1alpha1', + kind: 'MayastorNode', + metadata: orig.metadata, + spec: orig.spec, + status: newStatus, + }; + }); + } catch (err) { + log.error(`Failed to update status of volume resource "${uuid}": ${err}`); + } + } + + // Delete volume resource with specified uuid. + // + // @param uuid UUID of the volume resource to delete. + // + async _deleteResource (uuid: string) { + try { + log.info(`Deleting volume resource "${uuid}"`); + await this.watcher.delete(uuid); + } catch (err) { + log.error(`Failed to delete volume resource "${uuid}": ${err}`); + } + } + + // Stop listening for watcher and node events and reset the cache + async stop () { + this.watcher.stop(); + this.watcher.removeAllListeners(); + if (this.eventStream) { + this.eventStream.destroy(); + this.eventStream = null; + } + } + + // Bind watcher's new/mod/del events to volume operator's callbacks. + // + // @param watcher k8s volume resource cache. + // + _bindWatcher (watcher: CustomResourceCache) { + watcher.on('new', (obj: VolumeResource) => { + this.workq.push(obj, this._importVolume.bind(this)); + }); + watcher.on('mod', (obj: VolumeResource) => { + this.workq.push(obj, this._modifyVolume.bind(this)); + }); + watcher.on('del', (obj: VolumeResource) => { + // most likely it was not user but us (the operator) who deleted + // the resource. So check if it really exists first. + if (this.volumes.get(obj.metadata.name)) { + this.workq.push(obj.metadata.name, this._destroyVolume.bind(this)); + } + }); + } + + // When moac restarts the volume manager does not know which volumes exist. + // We need to import volumes based on the k8s resources. + // + // @param resource Volume resource properties. + // + async _importVolume (resource: VolumeResource) { + const uuid = resource.getUuid(); + + log.debug(`Importing volume "${uuid}" in response to "new" resource event`); + try { + await this.volumes.importVolume(uuid, resource.spec, resource.status); + } catch (err) { + log.error( + `Failed to import volume "${uuid}" based on new resource: ${err}` + ); + await this._updateState(uuid, State.Error, err.toString()); + } + } + + // Modify volume according to the specification. + // + // @param resource Volume resource properties. + // + async _modifyVolume (resource: VolumeResource) { + const uuid = resource.getUuid(); + const volume = this.volumes.get(uuid); + + if (!volume) { + log.warn( + `Volume resource "${uuid}" was modified but the volume does not exist` + ); + return; + } + try { + if (volume.update(resource.spec)) { + log.debug( + `Updating volume "${uuid}" in response to "mod" resource event` + ); + volume.fsa(); + } + } catch (err) { + log.error(`Failed to update volume "${uuid}" based on resource: ${err}`); + } + } + + // Remove the volume from internal state and if it exists destroy it. + // + // @param uuid ID of the volume to destroy. + // + async _destroyVolume (uuid: string) { + log.debug( + `Destroying volume "${uuid}" in response to "del" resource event` + ); + try { + await this.volumes.destroyVolume(uuid); + } catch (err) { + log.error(`Failed to destroy volume "${uuid}": ${err}`); + } + } +} diff --git a/csi/moac/watcher.js b/csi/moac/watcher.js deleted file mode 100644 index 869c268b6..000000000 --- a/csi/moac/watcher.js +++ /dev/null @@ -1,318 +0,0 @@ -'use strict'; - -const assert = require('assert'); -const EventEmitter = require('events'); -const log = require('./logger').Logger('watcher'); - -// in case of permanent k8s api server failure we retry with max interval -// of this # of seconds -const MAX_RECONNECT_DELAY = 30; - -// This is a classic operator loop design as seen in i.e. operator-sdk (golang) -// to watch a k8s resource. We combine http GET with watcher events to do -// it in an efficient way. First we do GET to populate the cache and then -// maintain it using watch events. When the watch connection is closed by -// the server (happens every minute or so), we do GET and continue watch again. -// -// It is a general implementation of watcher which can be used for any resource -// operator. The operator should subscribe to "new", "mod" and "del" events -// which all pass object parameter and are triggered when a resource is -// added, modified or deleted. -// -class Watcher extends EventEmitter { - // Construct a watcher for resource. - // name: name of the watched resource - // getEp: k8s api endpoint with .get() method to get the objects - // streamEp: k8s api endpoint with .getObjectStream() method to obtain - // stream of watch events - // filterCb: converts k8s object to representation understood by the - // operator. Or returns null if object should be ignored. - constructor (name, getEp, streamEp, filterCb) { - super(); - this.name = name; - this.getEp = getEp; - this.streamEp = streamEp; - this.filterCb = filterCb; - this.objects = null; // the cache of objects being watched - this.noRestart = false; // do not renew watcher connection - this.startResolve = null; // start promise in case of delayed start due - // to an error - this.objectStream = null; // non-null if watch connection is active - this.getInProg = false; // true if GET objects query is in progress - this.reconnectDelay = 0; // Exponential backoff in case of api server - // failures (in secs) - this.pendingEvents = null; // watch events while sync is in progress - // (if not null -> GET is in progress) - } - - // Start asynchronously the watcher - async start () { - var self = this; - self.objectStream = await self.streamEp.getObjectStream(); - - // TODO: missing upper bound on exponential backoff - self.reconnectDelay = Math.min( - Math.max(2 * self.reconnectDelay, 1), - MAX_RECONNECT_DELAY - ); - self.pendingEvents = []; - assert(!self.getInProg); - self.getInProg = true; - // start the stream of events before GET query so that we don't miss any - // event while performing the GET. - self.objectStream.on('data', (ev) => { - log.trace( - `Event ${ev.type} in ${self.name} watcher: ${JSON.stringify(ev.object)}` - ); - - // if update of the node list is in progress, queue the event for later - if (self.pendingEvents != null) { - log.trace(`Event deferred until ${self.name} watcher is synced`); - self.pendingEvents.push(ev); - return; - } - - self._processEvent(ev); - }); - - self.objectStream.on('error', (err) => { - log.error(`stream error in ${self.name} watcher: ${err}`); - }); - - // k8s api server disconnects watcher after a timeout. If that happens - // reconnect and start again. - self.objectStream.once('end', () => { - self.objectStream = null; - if (self.getInProg) { - // if watcher disconnected before we finished syncing, we have - // to wait for the GET request to finish and then start over - log.error(`${self.name} watch stream closed before the sync completed`); - } else { - // reconnect and start watching again - log.debug(`${self.name} watch stream disconnected`); - } - self.scheduleRestart(); - }); - - var items; - try { - const res = await self.getEp.get(); - items = res.body.items; - } catch (err) { - log.error( - `Failed to get list of ${self.name} objects: HTTP ${err.statusCode}` - ); - self.getInProg = false; - self.scheduleRestart(); - return self.delayedStart(); - } - - // if watcher did end before we retrieved list of objects then start over - self.getInProg = false; - if (!self.objectStream) { - self.scheduleRestart(); - return self.delayedStart(); - } - - log.trace(`List of watched ${self.name} objects: ${JSON.stringify(items)}`); - - // filter the obtained objects - var objects = {}; - for (let i = 0; i < items.length; i++) { - const obj = this.filterCb(items[i]); - if (obj != null) { - objects[items[i].metadata.name] = { - object: obj, - k8sObject: items[i] - }; - } - } - - const origObjects = self.objects; - self.objects = {}; - - if (origObjects == null) { - // the first time all objects appear to be new - for (const name in objects) { - self.objects[name] = objects[name].k8sObject; - self.emit('new', objects[name].object); - } - } else { - // Merge old node list with the new node list - // First delete objects which no longer exist - for (const name in origObjects) { - if (!(name in objects)) { - self.emit('del', self.filterCb(origObjects[name])); - } - } - // Second detect new objects and modified objects - for (const name in objects) { - const k8sObj = objects[name].k8sObject; - const obj = objects[name].object; - const origObj = origObjects[name]; - - self.objects[name] = k8sObj; - - if (origObj) { - const generation = k8sObj.metadata.generation; - // Some objects don't have generation # - if (!generation || generation > origObj.metadata.generation) { - self.emit('mod', obj); - } - } else { - self.emit('new', obj); - } - } - } - - var ev; - while ((ev = self.pendingEvents.pop())) { - self._processEvent(ev); - } - self.pendingEvents = null; - self.reconnectDelay = 0; - - log.info(`${self.name} watcher sync completed`); - - // if the start was delayed, then resolve the promise now - if (self.startResolve) { - self.startResolve(); - self.startResolve = null; - } - - // this event is for test cases - self.emit('sync'); - } - - // Stop does not mean stopping watcher immediately, but rather not restarting - // it again when watcher connection is closed. - // TODO: find out how to reset the watcher connection - async stop () { - this.noRestart = true; - } - - // Return k8s object(s) from the cache or null if it does not exist. - getRaw (name) { - var obj = this.objects[name]; - if (!obj) { - return null; - } else { - return JSON.parse(JSON.stringify(obj)); - } - } - - // Fetches the latest object(s) from k8s and updates the cache; then it - // returns the k8s object(s) from the cache or null if it does not exist. - async getRawBypass (name) { - var getObj = null; - - try { - getObj = await this.getEp(name).get(); - } catch (err) { - if (err.code !== 404) { - log.error(`Failed to fetch latest "${name}" from k8s, error: "${err}". Will only use the cached values instead.`); - } - } - - if (getObj) { - if (getObj.statusCode === 200) { - const k8sObj = getObj.body; - const cachedObj = this.objects[name]; - - if (!cachedObj) { - // we still haven't processed the "ADDED" event so add it now - this._processEvent({ - type: 'ADDED', - object: k8sObj - }); - } else if (!k8sObj.metadata.generation || cachedObj.metadata.generation < k8sObj.metadata.generation) { - // the object already exists so modify it - this._processEvent({ - type: 'MODIFIED', - object: k8sObj - }); - } - } else { - const code = getObj.statusCode; - log.error(`Failed to fetch latest "${name}" from k8s, code: "${code}". Will only use the cached values instead.`); - } - } - - return this.getRaw(name); - } - - // Return the collection of objects - list () { - return Object.values(this.objects).map((ent) => this.filterCb(ent)); - } - - delayedStart () { - var self = this; - - if (self.startResolve) { - return self.startResolve; - } else { - return new Promise((resolve, reject) => { - self.startResolve = resolve; - }); - } - } - - // Restart the watching process after a timeout - scheduleRestart () { - // We cannot restart while either watcher connection or GET query is still - // in progress. We will get called again when either of them terminates. - // TODO: How to terminate the watcher connection? - // Now we simply rely on server to close the conn after timeout - if (!this.objectStream && !this.getInProg) { - if (!this.noRestart) { - setTimeout(this.start.bind(this), 1000 * this.reconnectDelay); - } - } - } - - // Invoked when there is a watch event (a resource has changed). - _processEvent (ev) { - const k8sObj = ev.object; - const name = k8sObj.metadata.name; - const generation = k8sObj.metadata.generation; - const type = ev.type; - - const obj = this.filterCb(k8sObj); - if (obj == null) { - return; // not interested in this object - } - const oldObj = this.objects[name]; - - if (type === 'ADDED' || type === 'MODIFIED') { - this.objects[name] = k8sObj; - if (!oldObj) { - // it is a new object with no previous history - this.emit('new', obj); - // Some objects don't have generation # - } else if (!generation || oldObj.metadata.generation < generation) { - // we assume that if generation # remained the same => no change - // TODO: add 64-bit integer overflow protection - this.emit('mod', obj); - } else if (oldObj.metadata.generation === generation) { - log.trace(`Status of ${this.name} object changed`); - } else { - log.warn(`Ignoring stale ${this.name} object event`); - } - - // TODO: subtle race condition when delete event is related to object which - // existed before we populated the cache.. - } else if (type === 'DELETED') { - if (oldObj) { - delete this.objects[name]; - this.emit('del', obj); - } - } else if (type === 'ERROR') { - log.error(`Error event in ${this.name} watcher: ${JSON.stringify(ev)}`); - } else { - log.error(`Unknown event in ${this.name} watcher: ${JSON.stringify(ev)}`); - } - } -} - -module.exports = Watcher; diff --git a/csi/moac/watcher.ts b/csi/moac/watcher.ts new file mode 100644 index 000000000..b13b54ecf --- /dev/null +++ b/csi/moac/watcher.ts @@ -0,0 +1,555 @@ +// Implementation of a cache for arbitrary k8s custom resource in openebs.io +// api with v1alpha1 version. + +import * as _ from 'lodash'; +import { + CustomObjectsApi, + HttpError, + KubeConfig, + KubernetesObject, + KubernetesListObject, + ListWatch, + V1ListMeta, + Watch, +} from 'client-node-fixed-watcher'; + +const EventEmitter = require('events'); +const log = require('./logger').Logger('watcher'); + +// If listWatch errors out then we restart it after this many msecs. +const RESTART_DELAY: number = 3000; +// We wait this many msecs for an event confirming operation done previously. +const EVENT_TIMEOUT: number = 5000; +const GROUP: string = 'openebs.io'; +const VERSION: string = 'v1alpha1'; + +// Errors generated by api requests are hopelessly useless. We need to add +// a text from http body to them. +function bodyError(prefix: string, error: any): any { + if (error instanceof HttpError) { + error.message = prefix + ': ' + error.body.message; + } else { + error.message = prefix + ': ' + error.message; + } + return error; +} + +// Commonly used metadata attributes. +export class CustomResourceMeta extends V1ListMeta { + name?: string; + namespace?: string; + generation?: number; + finalizers?: string[]; +} + +// Properties of custom resources (all optional so that we can do easy +// conversion from "object" type) +export class CustomResource implements KubernetesObject { + apiVersion?: string; + kind?: string; + metadata?: CustomResourceMeta; + spec?: object; + status?: any; +} + +class TimeoutError extends Error { + constructor() { + super(); + } +} + +// Utility class for wrapping asynchronous operations that once done, need to be +// confirmed by something from outside (i.e. watcher event). If confirmation does +// not arrive on time, then end the operation regardless and let user know. +class ConfirmOp { + private id: string; + private timer: NodeJS.Timeout | null; + private timeout: number; + private since: number; + private confirmed: boolean; + private done: boolean; + private resolve?: () => void; + private reject?: (err: any) => void; + + constructor(id: string, timeout: number) { + this.id = id; + this.timeout = timeout; + this.since = 0; + this.timer = null; + this.confirmed = false; + this.done = false; + } + + run(action: () => Promise): Promise { + this.since = (new Date()).getTime(); + if (this.timeout <= 0) { + this.confirmed = true; + } + return new Promise((resolve, reject) => { + this.resolve = resolve; + this.reject = reject; + action() + .then(() => { + this.done = true; + if (!this.confirmed) { + this.timer = setTimeout(() => { + const delta = (new Date()).getTime() - this.since; + log.warn(`Timed out waiting for watcher event on "${this.id}" (${delta}ms)`); + this.timer = null; + reject(new TimeoutError()); + }, this.timeout); + } else { + this._complete(); + } + }) + .catch((err) => { + this.done = true; + this._complete(err); + }); + }); + } + + // Beware that confirm can come before the operation done callback! + confirm() { + this.confirmed = true; + if (this.timeout > 0) { + this._complete(); + } + } + + _complete(err?: any) { + if (!err && (!this.confirmed || !this.done)) return; + + const delta = (new Date()).getTime() - this.since; + log.trace(`The operation on "${this.id}" took ${delta}ms`); + if (this.timer) { + clearTimeout(this.timer); + } + if (err) { + this.reject!(err); + } else { + this.resolve!(); + } + } +} + +// Resource cache keeps track of a k8s custom resource and exposes methods +// for modifying the cache content. +// +// It is a classic operator loop design as seen in i.e. operator-sdk (golang) +// to watch a k8s resource. We utilize k8s client library to take care of low +// level details. +// +// It is a general implementation of watcher which can be used for any resource +// operator. The operator should subscribe to "new", "mod" and "del" events that +// are triggered when a resource is added, modified or deleted. +export class CustomResourceCache extends EventEmitter { + name: string; + plural: string; + namespace: string; + waiting: Record; + k8sApi: CustomObjectsApi; + listWatch: ListWatch; + creator: new (obj: CustomResource) => T; + eventHandlers: Record void>; + connected: boolean; + restartDelay: number; + idleTimeout: number; + eventTimeout: number; + timer: any; + + // Create the cache for given namespace and resource name. + // + // @param namespace Namespace of custom resource. + // @param name Name of the resource. + // @param kubeConfig Kube config object. + // @param creator Constructor of the object from custom resource object. + // @param opts Cache/watcher options. + constructor( + namespace: string, + name: string, + kubeConfig: KubeConfig, + creator: new (obj: CustomResource) => T, + opts?: { + restartDelay?: number, + eventTimeout?: number, + idleTimeout?: number + } + ) { + super(); + this.k8sApi = kubeConfig.makeApiClient(CustomObjectsApi); + this.name = name; + this.plural = name + 's'; + this.namespace = namespace; + this.creator = creator; + this.waiting = {}; + this.connected = false; + this.restartDelay = opts?.restartDelay || RESTART_DELAY; + this.eventTimeout = opts?.eventTimeout || EVENT_TIMEOUT; + this.idleTimeout = opts?.idleTimeout || 0; + this.eventHandlers = { + add: this._onEvent.bind(this, 'new'), + update: this._onEvent.bind(this, 'mod'), + delete: this._onEvent.bind(this, 'del'), + }; + + const watch = new Watch(kubeConfig); + this.listWatch = new ListWatch( + `/apis/${GROUP}/${VERSION}/namespaces/${this.namespace}/${this.plural}`, + watch, + async () => { + var resp = await this.k8sApi.listNamespacedCustomObject( + GROUP, + VERSION, + this.namespace, + this.plural); + return { + response: resp.response, + body: resp.body as KubernetesListObject, + }; + }, + false + ); + } + + // Clear idle/restart timer. + _clearTimer() { + if (this.timer) { + clearTimeout(this.timer); + this.timer = undefined; + } + } + // Install a timer that restarts watcher if idle for more than x seconds. + // On Azure AKS we have observed watcher connections that don't get any + // events after some time when idle. + _setIdleTimeout() { + if (this.idleTimeout > 0) { + this._clearTimer(); + this.timer = setTimeout(() => { + this.stop(); + this.start(); + }, this.idleTimeout); + } + } + + // Called upon a watcher event. It unblocks create or update operation if any + // is waiting for the event and propagates the event further. + _onEvent(event: string, cr: CustomResource) { + let name = cr.metadata?.name; + if (name === undefined) { + log.error(`Ignoring event ${event} with object without a name`); + return; + } + log.trace(`Received watcher event ${event} for ${this.name} "${name}"`); + this._setIdleTimeout(); + let confirmOp = this.waiting[name]; + if (confirmOp) { + confirmOp.confirm(); + } + this._doWithObject(cr, (obj) => this.emit(event, obj)); + } + + // Convert custom resource object to desired object swallowing exceptions + // and call callback with the new object. + _doWithObject(obj: CustomResource | undefined, cb: (obj: T) => void): void { + if (obj === undefined) return; + + try { + var newObj = new this.creator(obj); + } catch (e) { + log.error(`Ignoring invalid ${this.name} custom resource: ${e}`); + return; + } + cb(newObj); + } + + // This method does not return until the cache is successfully populated. + // That means that the promise eventually always fulfills (resolves). + start(): Promise { + this.listWatch.on('error', this._onError.bind(this)); + for (let evName in this.eventHandlers) { + this.listWatch.on(evName, this.eventHandlers[evName]); + } + return this.listWatch.start() + .then(() => { + this.connected = true; + log.debug(`${this.name} watcher was started`); + log.trace(`Initial content of the "${this.name}" cache: ` + + this.listWatch.list().map((i: CustomResource) => i.metadata?.name)); + this._setIdleTimeout(); + }) + .catch((err) => { + log.error(`Failed to start ${this.name} watcher: ${err}`) + this.stop(); + log.info(`Restart ${this.name} watcher after ${this.restartDelay}ms...`); + return new Promise((resolve, reject) => { + this.timer = setTimeout(() => { + this.start().then(resolve, reject); + }, this.restartDelay); + }); + }); + } + + // Called when the connection breaks. + _onError(err: any) { + log.error(`Watcher error: ${err}`); + this.stop(); + log.info(`Restarting ${this.name} watcher after ${this.restartDelay}ms...`); + this.timer = setTimeout(() => this.start(), this.restartDelay); + } + + // Deregister all internal event handlers on the watcher. + stop() { + this._clearTimer(); + this.connected = false; + log.debug(`Deregistering "${this.name}" cache event handlers`); + this.listWatch.off('error', this._onError); + for (let evName in this.eventHandlers) { + this.listWatch.off(evName, this.eventHandlers[evName]); + } + this.listWatch.stop(); + } + + isConnected(): boolean { + // should we propagate event to consumers about the reset? + return this.connected; + } + + // Get all objects from the cache. + list(): T[] { + let list: T[] = []; + this.listWatch.list().forEach((item) => { + this._doWithObject(item, (obj) => list.push(obj)); + }); + return list; + } + + // Get object with given name (ID). + get(name: string): T | undefined { + var result; + this._doWithObject(this.listWatch.get(name), (obj) => result = obj); + return result; + } + + // Execute the action and do not return until we receive an event from watcher. + // Otherwise the object in the cache might be stale when we do the next + // modification to it. Set timeout for the case when we never receive the + // event and restart the watcher to get fresh content in that case. + async _waitForEvent(name: string, action: () => Promise) { + this.waiting[name] = new ConfirmOp(name, this.eventTimeout); + try { + await this.waiting[name].run(action); + } catch (err) { + delete this.waiting[name]; + if (err instanceof TimeoutError) { + // restart the cache + this.stop(); + await this.start(); + } else { + throw err; + } + } + } + + // Create the resource and wait for it to be created. + async create(obj: CustomResource) { + let name: string = obj.metadata?.name || ''; + if (!name) { + throw Error("Object does not have a name"); + } + log.trace(`Creating new "${this.name}" resource: ${JSON.stringify(obj)}`); + await this._waitForEvent( + name, + async () => { + try { + await this.k8sApi.createNamespacedCustomObject( + GROUP, + VERSION, + this.namespace, + this.plural, + obj + ); + } catch (err) { + throw bodyError(`Delete of ${this.name} "${name}" failed`, err); + } + } + ); + } + + // Update the resource. The merge callback takes the original version from + // the cache, modifies it and returns the new version of object. The reason + // for this is that sometimes we get stale errors and we must repeat + // the operation with an updated version of the original object. + async update(name: string, merge: (orig: T) => CustomResource | undefined) { + await this._update(name, () => { + let orig = this.get(name); + if (orig === undefined) { + log.warn(`Tried to update ${this.name} "${name}" that does not exist`); + return; + } + return merge(orig); + }); + } + + // Same as above but works with custom resource type rather than user + // defined object. + async _updateCustomResource(name: string, merge: (orig: CustomResource) => CustomResource | undefined) { + await this._update(name, () => { + let orig = this.listWatch.get(name); + if (orig === undefined) { + log.warn(`Tried to update ${this.name} "${name}" that does not exist`); + return; + } + return merge(orig); + }); + } + + // Update the resource and wait for mod event. If update fails due to an error + // we restart the watcher and retry the operation. If event does not come, + // we restart the watcher. + async _update( + name: string, + getAndMerge: () => CustomResource | undefined, + ) { + for (let retries = 1; retries >= 0; retries -= 1) { + let obj = getAndMerge(); + if (obj === undefined) { + // likely means that the props are the same - nothing to do + return; + } + log.trace(`Updating ${this.name} "${name}": ${JSON.stringify(obj)}`); + try { + await this._waitForEvent( + name, + async () => { + await this.k8sApi.replaceNamespacedCustomObject( + GROUP, + VERSION, + this.namespace, + this.plural, + name, + obj! + ); + } + ); + break; + } catch (err) { + err = bodyError(`Update of ${this.name} "${name}" failed`, err); + if (retries == 0) { + throw err; + } + log.warn(`${err} (retrying ...)`); + this.stop(); + await this.start(); + } + } + } + + // Update status of the resource. Unlike in case create/update we don't have + // to wait for confirming event because generation number is not incremented + // upon status change. + async updateStatus(name: string, merge: (orig: T) => CustomResource | undefined) { + for (let retries = 1; retries >= 0; retries -= 1) { + let orig = this.get(name); + if (orig === undefined) { + log.warn(`Tried to update status of ${this.name} "${name}" but it is gone`); + return; + } + let obj = merge(orig); + if (obj === undefined) { + // likely means that the props are the same - nothing to do + return; + } + log.trace(`Updating status of ${this.name} "${name}": ${JSON.stringify(obj.status)}`); + try { + await this._waitForEvent( + name, + async () => { + await this.k8sApi.replaceNamespacedCustomObjectStatus( + GROUP, + VERSION, + this.namespace, + this.plural, + name, + obj! + ); + } + ); + break; + } catch (err) { + err = bodyError(`Status update of ${this.name} "${name}" failed`, err); + if (retries == 0) { + throw err; + } + log.warn(`${err} (retrying ...)`); + this.stop(); + await this.start(); + } + } + } + + // Delete the resource. + async delete(name: string) { + let orig = this.get(name); + if (orig === undefined) { + log.warn(`Tried to delete ${this.name} "${name}" that does not exist`); + return new Promise((resolve) => resolve()); + } + log.trace(`Deleting ${this.name} "${name}"`); + await this._waitForEvent( + name, + async () => { + try { + this.k8sApi.deleteNamespacedCustomObject( + GROUP, + VERSION, + this.namespace, + this.plural, + name + ); + } catch (err) { + throw bodyError(`Delete of ${this.name} "${name}" failed`, err); + } + } + ); + } + + // Add finalizer to given resource if not already there. + async addFinalizer(name: string, finalizer: string) { + await this._updateCustomResource(name, (orig) => { + let finalizers = orig.metadata?.finalizers; + let newFinalizers = finalizers || []; + if (newFinalizers.indexOf(finalizer) >= 0) { + // it's already there + return; + } + newFinalizers = [finalizer].concat(newFinalizers); + let obj = _.cloneDeep(orig); + if (obj.metadata === undefined) { + throw new Error(`Resource ${this.name} "${name}" without metadata`) + } + obj.metadata.finalizers = newFinalizers; + return obj; + }); + } + + // Remove finalizer from the resource in case it's there. + async removeFinalizer(name: string, finalizer: string) { + await this._updateCustomResource(name, (orig) => { + let finalizers = orig.metadata?.finalizers; + let newFinalizers = finalizers || []; + let idx = newFinalizers.indexOf(finalizer); + if (idx < 0) { + // it's not there + return; + } + newFinalizers.splice(idx, 1); + let obj = _.cloneDeep(orig); + if (obj.metadata === undefined) { + throw new Error(`Resource ${this.name} "${name}" without metadata`) + } + obj.metadata.finalizers = newFinalizers; + return obj; + }); + } +} From 876d49d97cf99ae37c639a8f5e05d66956672cd3 Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Thu, 19 Nov 2020 15:44:51 +0100 Subject: [PATCH 61/92] Point the build icon to `master` Using `develop` will result in sometimes showing "aborted" which is sort of misleading. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index db728a6f7..271c7ebb4 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # MayaStor [![Releases](https://img.shields.io/github/release/openebs/Mayastor/all.svg?style=flat-square)](https://github.com/openebs/Mayastor/releases) -[![CI-basic](https://mayastor-ci.mayadata.io/buildStatus/icon?job=Mayastor%2Fdevelop)](https://mayastor-ci.mayadata.io/blue/organizations/jenkins/Mayastor/activity/) +[![CI-basic](https://mayastor-ci.mayadata.io/buildStatus/icon?job=Mayastor%2Fmaster)](https://mayastor-ci.mayadata.io/blue/organizations/jenkins/Mayastor/activity/) [![Slack](https://img.shields.io/badge/JOIN-SLACK-blue)](https://kubernetes.slack.com/messages/openebs) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor?ref=badge_shield) [![built with nix](https://builtwithnix.org/badge.svg)](https://builtwithnix.org) From 563eb6190bad941c8dba0fbea1cef61abd766a33 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 12 Nov 2020 16:53:45 +0000 Subject: [PATCH 62/92] Add e2e pvc stress test. CAS-500 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do { Scenario: A Mayastor deployment should respond correctly to new Mayastor PVC declarations Given: Mayastor is deployed on a Kubernetes cluster And: A StorageClass resource is defined for the Mayastor CSI plugin provisioner When: A new, valid PVC resource which references that StorageClass is declared via a k8s client Then: A corresponding PV should be dynamically provisioned And: The reported status of the PVC and PV resources should become ‘Bound’ And: A corresponding MayastorVoume CR should be created And: The reported status of the MayastorVolume should become 'healthy' Scenario: A Mayastor deployment should respond correctly to the deletion of PVC resources Given: A Mayastor deployment with PVs which have been dynamically provisioned by the Mayastor CSI plugin When: An existing PVC resource is deleted via a k8s client And: The PVC is not mounted by a pod And: The PVC references a StorageClass which is provisioned by Mayastor Then: The PVC and its corresponding PV should be removed And: The MayastorVolume CR should be removed } While (<100 cycles) --- mayastor-test/e2e/nightly/README.md | 33 ++ .../e2e/nightly/pvc_stress/pvc_stress_test.go | 391 ++++++++++++++++++ mayastor-test/e2e/nightly/test.sh | 12 + 3 files changed, 436 insertions(+) create mode 100644 mayastor-test/e2e/nightly/README.md create mode 100644 mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go create mode 100755 mayastor-test/e2e/nightly/test.sh diff --git a/mayastor-test/e2e/nightly/README.md b/mayastor-test/e2e/nightly/README.md new file mode 100644 index 000000000..513072921 --- /dev/null +++ b/mayastor-test/e2e/nightly/README.md @@ -0,0 +1,33 @@ +## About +Long running stress e2e tests for mayastor + +To run the tests use the `test.sh` file. + +When adding a test make sure to bump the timeout value suitably. + +## Tests +### pvc_stress +``` +Do { + +Scenario: A Mayastor deployment should respond correctly to new Mayastor PVC declarations + Given: Mayastor is deployed on a Kubernetes cluster + And: A StorageClass resource is defined for the Mayastor CSI plugin provisioner + When: A new, valid PVC resource which references that StorageClass is declared via a k8s client + Then: A corresponding PV should be dynamically provisioned + And: The reported status of the PVC and PV resources should become ‘Bound’ + And: A corresponding MayastorVoume CR should be created + And: The reported status of the MayastorVolume should become 'healthy' + +Scenario: A Mayastor deployment should respond correctly to the deletion of PVC resources + +Given: A Mayastor deployment with PVs which have been dynamically provisioned by the Mayastor CSI plugin +When: An existing PVC resource is deleted via a k8s client + And: The PVC is not mounted by a pod + And: The PVC references a StorageClass which is provisioned by Mayastor +Then: The PVC and its corresponding PV should be removed + And: x The MayastorVolume CR should be removed + +} While (<100 cycles) +``` + diff --git a/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go b/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go new file mode 100644 index 000000000..bcab85213 --- /dev/null +++ b/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go @@ -0,0 +1,391 @@ +// JIRA: CAS-500 +package pvc_stress_test + +import ( + "context" + "fmt" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/deprecated/scheme" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "reflect" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var cfg *rest.Config +var k8sClient client.Client +var kubeInt kubernetes.Interface +var k8sManager ctrl.Manager +var testEnv *envtest.Environment +var dynamicClient dynamic.Interface +var defTimeoutSecs = "30s" + +// Status part of the mayastor volume CRD +type mayastorVolStatus struct { + state string + reason string + node string + /* Not required for now. + nexus struct { + children [ ]map[string]string + deviceUri string + state string + } + replicas []map[string]string + */ +} + +func getMSV(uuid string) *mayastorVolStatus { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msv, err := dynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + if err != nil { + fmt.Println(err) + return nil + } + + if msv == nil { + return nil + } + + status, found, err := unstructured.NestedFieldCopy(msv.Object, "status") + if err != nil { + fmt.Println(err) + return nil + } + + if !found { + return nil + } + + msVol := mayastorVolStatus{} + v := reflect.ValueOf(status) + if v.Kind() == reflect.Map { + for _, key := range v.MapKeys() { + sKey := key.Interface().(string) + val := v.MapIndex(key) + switch sKey { + case "state": + msVol.state = val.Interface().(string) + break + case "reason": + msVol.reason = val.Interface().(string) + break + case "node": + msVol.node = val.Interface().(string) + break + } + } + } + return &msVol +} + +// Check for a deleted Mayastor Volume, +// the object does not exist if deleted +func isMSVDeleted(uuid string) bool { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msv, err := dynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "mayastorvolumes.openebs.io") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + + Expect(err).To(BeNil()) + Expect(msv).ToNot(BeNil()) + return false +} + +// Check for a deleted Persistent Volume Claim, +// either the object does not exist +// or the status phase is invalid. +func isPVCDeleted(volName string) bool { + pvc, err := kubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "persistentvolumeclaims") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + // After the PVC has been deleted it may still accessible, but status phase will be invalid + Expect(err).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + switch pvc.Status.Phase { + case + corev1.ClaimBound, + corev1.ClaimPending, + corev1.ClaimLost: + return false + default: + return true + } +} + +// Check for a deleted Persistent Volume, +// either the object does not exist +// or the status phase is invalid. +func isPVDeleted(volName string) bool { + pv, err := kubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "persistentvolumes") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + // After the PV has been deleted it may still accessible, but status phase will be invalid + Expect(err).To(BeNil()) + Expect(pv).ToNot(BeNil()) + switch pv.Status.Phase { + case + corev1.VolumeBound, + corev1.VolumeAvailable, + corev1.VolumeFailed, + corev1.VolumePending, + corev1.VolumeReleased: + return false + default: + return true + } +} + +// Retrieve status phase of a Persistent Volume Claim +func getPvcClaimStatusPhase(volname string) (phase corev1.PersistentVolumeClaimPhase) { + pvc, getPvcErr := kubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + return pvc.Status.Phase +} + +// Retrieve status phase of a Persistent Volume +func getPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { + pv, getPvErr := kubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) + Expect(getPvErr).To(BeNil()) + Expect(pv).ToNot(BeNil()) + return pv.Status.Phase +} + +// Retrieve the state of a Mayastor Volume +func getMsvState(uuid string) (state string) { + msv := getMSV(uuid) + Expect(msv).ToNot(BeNil()) + return msv.state +} + +// Create a PVC and verify that +// 1. The PVC status transitions to bound, +// 2. The associated PV is created and its status transitions bound +// 3. The associated MV is created and has a State "healthy" +// then Delete the PVC and verify that +// 1. The PVC is deleted +// 2. The associated PV is deleted +// 3. The associated MV is deleted +func testPVC(volName string, scName string) { + fmt.Printf("%s, %s\n", volName, scName) + // PVC create options + createOpts := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volName, + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("64Mi"), + }, + }, + }, + } + + // Create the PVC. + PVCApi := kubeInt.CoreV1().PersistentVolumeClaims + _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) + Expect(createErr).To(BeNil()) + + // Confirm the PVC has been created. + pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PVC to be bound. + Eventually(func() corev1.PersistentVolumeClaimPhase { + return getPvcClaimStatusPhase(volName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(corev1.ClaimBound)) + + // Refresh the PVC contents, so that we can get the PV name. + pvc, getPvcErr = PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PV to be provisioned + Eventually(func() *corev1.PersistentVolume { + pv, getPvErr := kubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if getPvErr != nil { + return nil + } + return pv + + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Not(BeNil())) + + // Wait for the PV to be bound. + Eventually(func() corev1.PersistentVolumePhase { + return getPvStatusPhase(pvc.Spec.VolumeName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(corev1.VolumeBound)) + + msv := getMSV(string(pvc.ObjectMeta.UID)) + Expect(msv).ToNot(BeNil()) + Expect(msv.state).Should(Equal("healthy")) + + // Wait for the MSV to be healthy + Eventually(func() string { + return getMsvState(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal("healthy")) + + // Delete the PVC + deleteErr := PVCApi("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) + Expect(deleteErr).To(BeNil()) + + // Wait for the PVC to be deleted. + Eventually(func() bool { + return isPVCDeleted(volName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the PV to be deleted. + Eventually(func() bool { + return isPVDeleted(pvc.Spec.VolumeName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the MSV to be deleted. + Eventually(func() bool { + return isMSVDeleted(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) +} + +func stressTestPVC() { + for ix := 0; ix < 100; ix++ { + testPVC(fmt.Sprintf("stress-pvc-nvmf-%d", ix), "mayastor-nvmf") + testPVC(fmt.Sprintf("stress-pvc-iscsi-%d", ix), "mayastor-iscsi") + // FIXME: Without this delay getPvcClaimStatusPhase returns Pending + // even though kubectl shows that the pvc is Bound. + //pause() + } +} + +func TestPVCStress(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "PVC Stress Test Suite") +} + +var _ = Describe("Mayastor PVC Stress test", func() { + It("should stress test use of PVCs provisioned over iSCSI and NVMe-of", func() { + stressTestPVC() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + useCluster := true + testEnv = &envtest.Environment{ + UseExistingCluster: &useCluster, + AttachControlPlaneOutput: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + go func() { + err = k8sManager.Start(ctrl.SetupSignalHandler()) + Expect(err).ToNot(HaveOccurred()) + }() + + mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer mgrSyncCtxCancel() + if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { + fmt.Println("Failed to sync") + } + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + restConfig := config.GetConfigOrDie() + Expect(restConfig).ToNot(BeNil()) + + kubeInt = kubernetes.NewForConfigOrDie(restConfig) + Expect(kubeInt).ToNot(BeNil()) + + dynamicClient = dynamic.NewForConfigOrDie(restConfig) + Expect(dynamicClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/mayastor-test/e2e/nightly/test.sh b/mayastor-test/e2e/nightly/test.sh new file mode 100755 index 000000000..70eeca678 --- /dev/null +++ b/mayastor-test/e2e/nightly/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# For stress tests the default go test timeout of 10 minutes may be +# insufficient. +# We start with a timeout value of 0 and bump up the value by addsing +# the number of seconds for each test. +timeout=0 +#pvc_stress run duration is around 7 minutes, add 10 minutes to handle +#unexpected delays. +timeout=$(( timeout + 600 )) + +go test ./... --timeout "${timeout}s" From 4f92eebab82985639f16eb8388fc918ae4b0bc25 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 19 Nov 2020 17:02:18 +0000 Subject: [PATCH 63/92] Enabled missed tests from control plane --- rest/tests/v0_test.rs | 15 +++++++++------ scripts/cargo-test.sh | 6 +++++- services/common/src/lib.rs | 4 ++-- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index cbdd62317..06f0d6bb0 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -51,7 +51,7 @@ async fn client() -> Result<(), Box> { .add_container_spec( ContainerSpec::new( "rest", - Binary::from_nix("rest").with_args(nats_arg.clone()), + Binary::from_dbg("rest").with_args(nats_arg.clone()), ) .with_portmap("8080", "8080"), ) @@ -65,12 +65,13 @@ async fn client() -> Result<(), Box> { .build_only() .await?; - orderly_start(&test).await?; - - client_test(mayastor, &test).await?; + let result = client_test(mayastor, &test).await; // run with --nocapture to see all the logs test.logs_all().await?; + + result?; + Ok(()) } @@ -78,8 +79,10 @@ async fn client_test( mayastor: &str, test: &ComposeTest, ) -> Result<(), Box> { - let client = ActixRestClient::new("https://localhost:8080").unwrap().v0(); - let nodes = client.get_nodes().await.unwrap(); + orderly_start(&test).await?; + + let client = ActixRestClient::new("https://localhost:8080")?.v0(); + let nodes = client.get_nodes().await?; assert_eq!(nodes.len(), 1); assert_eq!( nodes.first().unwrap(), diff --git a/scripts/cargo-test.sh b/scripts/cargo-test.sh index c9ee912eb..ac04402bb 100755 --- a/scripts/cargo-test.sh +++ b/scripts/cargo-test.sh @@ -2,5 +2,9 @@ set -euxo pipefail export PATH=$PATH:${HOME}/.cargo/bin ( cd jsonrpc && cargo test ) -( cd mayastor && cargo test -- --test-threads=1 ) +# test dependencies +cargo build --bins +for test in composer mayastor services rest; do + ( cd ${test} && cargo test -- --test-threads=1 ) +done ( cd nvmeadm && cargo test ) diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index e0969a084..b15f26302 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -142,11 +142,11 @@ impl Service { /// /// Example: /// # async fn main() { - /// # Service::builder(cli_args.url, Channel::Registry) + /// Service::builder(cli_args.url, Channel::Registry) /// .with_shared_state(NodeStore::default()) /// .with_shared_state(More {}) /// .with_subscription(ServiceHandler::::default()) - /// # .run().await; + /// .run().await; /// /// # async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { /// let store: &NodeStore = args.context.get_state(); From 1e3636e092c63308645773ddf8ff59b829fa1226 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 19 Nov 2020 14:23:24 +0000 Subject: [PATCH 64/92] Add back logs from test code Previous change set the test logs to mayastor=DEBUG to silence the h2 logs but this completely removes logs from other components including from the test components. Now set mayastor to DEBUG, but everything else to info. --- mayastor/tests/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index c63b5f817..5bbf29a5b 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -134,7 +134,7 @@ pub fn mayastor_test_init() { panic!("binary: {} not present in path", binary); } }); - logger::init("mayastor=DEBUG"); + logger::init("info,mayastor=DEBUG"); mayastor::CPS_INIT!(); } From ae6810503df965e86b5d4fbf4eac960bba810892 Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Fri, 20 Nov 2020 13:02:15 +0100 Subject: [PATCH 65/92] Explicitly pin targets to different cores to improve stability of tests --- mayastor/tests/io_job.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/mayastor/tests/io_job.rs b/mayastor/tests/io_job.rs index e96ea5038..6c0522435 100644 --- a/mayastor/tests/io_job.rs +++ b/mayastor/tests/io_job.rs @@ -10,7 +10,7 @@ use mayastor::{ use rpc::mayastor::{BdevShareRequest, BdevUri}; pub mod common; -use common::compose::{self, ComposeTest, MayastorTest}; +use common::compose::{self, Binary, ComposeTest, MayastorTest}; use mayastor::core::io_driver::JobQueue; static DOCKER_COMPOSE: OnceCell = OnceCell::new(); @@ -137,11 +137,19 @@ async fn io_driver() { let queue = Arc::new(JobQueue::new()); // create the docker containers + // we are pinning them to 3rd and 4th core spectively to improve stability + // of the test. Be aware that default docker container cpuset is 0-3! let compose = compose::Builder::new() .name("cargo-test") .network("10.1.0.0/16") - .add_container("nvmf-target1") - .add_container("nvmf-target2") + .add_container_bin( + "nvmf-target1", + Binary::from_dbg("mayastor").with_args(vec!["-l", "2"]), + ) + .add_container_bin( + "nvmf-target2", + Binary::from_dbg("mayastor").with_args(vec!["-l", "3"]), + ) .with_clean(true) .build() .await From bf34a81fcc61f507cd9eceec62c1ac721eb07199 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Thu, 12 Nov 2020 21:08:18 +0100 Subject: [PATCH 66/92] test: add IO failure test to validate admin queue timeouts --- composer/src/lib.rs | 36 +- csi/src/dev/nvmf.rs | 19 +- mayastor/src/bdev/mod.rs | 1 + mayastor/src/bdev/nexus/nexus_bdev.rs | 60 +++- .../src/bdev/nexus/nexus_bdev_children.rs | 12 +- mayastor/src/bdev/nexus/nexus_child.rs | 19 +- .../src/bdev/nexus/nexus_child_error_store.rs | 152 ++++---- mayastor/src/bdev/nexus/nexus_fn_table.rs | 43 +-- mayastor/src/bdev/nexus/nexus_io.rs | 260 +++++++++++--- mayastor/src/bdev/nexus/nexus_module.rs | 2 +- mayastor/src/core/bdev.rs | 6 +- mayastor/src/core/descriptor.rs | 16 +- mayastor/src/core/io_driver.rs | 13 +- mayastor/src/core/mod.rs | 5 +- mayastor/src/core/nvme.rs | 113 ++++++ mayastor/src/logger.rs | 4 +- mayastor/src/lvs/lvs_pool.rs | 15 +- mayastor/src/subsys/config/mod.rs | 2 +- mayastor/src/subsys/config/opts.rs | 32 +- mayastor/src/subsys/mod.rs | 2 +- mayastor/src/subsys/nvmf/subsystem.rs | 9 +- mayastor/tests/bdev_test.rs | 337 ++++++++++++++++++ mayastor/tests/common/compose.rs | 12 +- mayastor/tests/error_count.rs | 2 +- mayastor/tests/error_count_retry.rs | 2 +- mayastor/tests/error_fault_child.rs | 2 +- mayastor/tests/error_store.rs | 276 +++++++------- mayastor/tests/nexus_add_remove.rs | 250 +++++++++++++ spdk-sys/build.sh | 2 +- 29 files changed, 1324 insertions(+), 380 deletions(-) create mode 100644 mayastor/src/core/nvme.rs create mode 100644 mayastor/tests/bdev_test.rs create mode 100644 mayastor/tests/nexus_add_remove.rs diff --git a/composer/src/lib.rs b/composer/src/lib.rs index 7c40ef45c..d861c4061 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -230,6 +230,8 @@ pub struct Builder { network: String, /// delete the container and network when dropped clean: bool, + /// destroy existing containers if any + prune: bool, } impl Default for Builder { @@ -246,6 +248,7 @@ impl Builder { containers: Default::default(), network: "10.1.0.0".to_string(), clean: true, + prune: true, } } @@ -285,6 +288,10 @@ impl Builder { self } + pub fn with_prune(mut self, enable: bool) -> Builder { + self.prune = enable; + self + } /// build the config and start the containers pub async fn build( self, @@ -326,6 +333,7 @@ impl Builder { ipam, label: format!("io.mayastor.test.{}", self.name), clean: self.clean, + prune: self.prune, }; compose.network_id = @@ -375,6 +383,7 @@ pub struct ComposeTest { label: String, /// automatically clean up the things we have created for this test clean: bool, + pub prune: bool, } impl Drop for ComposeTest { @@ -519,6 +528,29 @@ impl ComposeTest { spec: &ContainerSpec, ipv4: &str, ) -> Result<(), Error> { + if self.prune { + let _ = self + .docker + .stop_container( + &spec.name, + Some(StopContainerOptions { + t: 0, + }), + ) + .await; + let _ = self + .docker + .remove_container( + &spec.name, + Some(RemoveContainerOptions { + v: false, + force: false, + link: false, + }), + ) + .await; + } + let host_config = HostConfig { binds: Some(vec![ format!("{}:{}", self.srcdir, self.srcdir), @@ -739,7 +771,7 @@ impl ComposeTest { handles.push( RpcHandle::connect( v.0.clone(), - format!("{}:10124", v.1.1).parse::().unwrap(), + format!("{}:10124", v.1 .1).parse::().unwrap(), ) .await?, ); @@ -753,7 +785,7 @@ impl ComposeTest { match self.containers.iter().find(|&c| c.0 == name) { Some(container) => Ok(RpcHandle::connect( container.0.clone(), - format!("{}:10124", container.1.1) + format!("{}:10124", container.1 .1) .parse::() .unwrap(), ) diff --git a/csi/src/dev/nvmf.rs b/csi/src/dev/nvmf.rs index 449a92159..7f88b553c 100644 --- a/csi/src/dev/nvmf.rs +++ b/csi/src/dev/nvmf.rs @@ -76,15 +76,12 @@ impl Attach for NvmfAttach { if let Err(error) = nvmeadm::nvmf_discovery::connect(&self.host, self.port, &self.nqn) { - match (error) { - nvmeadm::error::NvmeError::ConnectInProgress => return Ok(()), + return match error { + nvmeadm::error::NvmeError::ConnectInProgress => Ok(()), _ => { - return Err(DeviceError::from(format!( - "connect failed: {}", - error - ))) + Err(DeviceError::from(format!("connect failed: {}", error))) } - } + }; } Ok(()) @@ -124,10 +121,6 @@ impl NvmfDetach { #[tonic::async_trait] impl Detach for NvmfDetach { - fn devname(&self) -> DeviceName { - self.name.clone() - } - async fn detach(&self) -> Result<(), DeviceError> { if nvmeadm::nvmf_discovery::disconnect(&self.nqn)? == 0 { return Err(DeviceError::from(format!( @@ -138,4 +131,8 @@ impl Detach for NvmfDetach { Ok(()) } + + fn devname(&self) -> DeviceName { + self.name.clone() + } } diff --git a/mayastor/src/bdev/mod.rs b/mayastor/src/bdev/mod.rs index 648ca8a9a..442a90709 100644 --- a/mayastor/src/bdev/mod.rs +++ b/mayastor/src/bdev/mod.rs @@ -12,6 +12,7 @@ pub use nexus::{ nexus_child::{ChildState, Reason}, nexus_child_error_store::{ActionType, NexusErrStore, QueryType}, nexus_child_status_config, + nexus_io::Bio, nexus_label::{GPTHeader, GptEntry}, nexus_metadata_content::{ NexusConfig, diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 09c377c64..f12e98b52 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -42,18 +42,18 @@ use crate::{ instances, nexus_channel::{DREvent, NexusChannel, NexusChannelInner}, nexus_child::{ChildError, ChildState, NexusChild}, - nexus_io::{io_status, nvme_admin_opc, Bio}, + nexus_io::{nvme_admin_opc, Bio, IoStatus, IoType}, nexus_label::LabelError, nexus_nbd::{NbdDisk, NbdError}, }, }, - core::{Bdev, CoreError, DmaError, Reactor, Share}, + core::{Bdev, CoreError, DmaError, Protocol, Reactor, Share}, ffihelper::errno_result_from_i32, lvs::Lvol, nexus_uri::{bdev_destroy, NexusBdevError}, rebuild::RebuildError, subsys, - subsys::Config, + subsys::{Config, NvmfSubsystem}, }; /// Obtain the full error chain @@ -308,7 +308,7 @@ pub struct Nexus { /// raw pointer to bdev (to destruct it later using Box::from_raw()) bdev_raw: *mut spdk_bdev, /// represents the current state of the Nexus - pub(super) state: NexusState, + pub(super) state: std::sync::Mutex, /// Dynamic Reconfigure event pub dr_complete_notify: Option>, /// the offset in num blocks where the data partition starts @@ -391,6 +391,7 @@ impl Nexus { child_bdevs: Option<&[String]>, ) -> Box { let mut b = Box::new(spdk_bdev::default()); + b.name = c_str!(name); b.product_name = c_str!(NEXUS_PRODUCT_ID); b.fn_table = nexus::fn_table().unwrap(); @@ -406,7 +407,7 @@ impl Nexus { child_count: 0, children: Vec::new(), bdev: Bdev::from(&*b as *const _ as *mut spdk_bdev), - state: NexusState::Init, + state: std::sync::Mutex::new(NexusState::Init), bdev_raw: Box::into_raw(b), dr_complete_notify: None, data_ent_offset: 0, @@ -439,7 +440,7 @@ impl Nexus { "{} Transitioned state from {:?} to {:?}", self.name, self.state, state ); - self.state = state; + *self.state.lock().unwrap() = state; state } /// returns the size in bytes of the nexus instance @@ -502,9 +503,9 @@ impl Nexus { pub(crate) fn destruct(&mut self) -> NexusState { // a closed operation might already be in progress calling unregister // will trip an assertion within the external libraries - if self.state == NexusState::Closed { + if *self.state.lock().unwrap() == NexusState::Closed { trace!("{}: already closed", self.name); - return self.state; + return NexusState::Closed; } trace!("{}: closing, from state: {:?} ", self.name, self.state); @@ -597,11 +598,40 @@ impl Nexus { } } + /// resume IO to the bdev + pub(crate) async fn resume(&self) -> Result<(), Error> { + match self.shared() { + Some(Protocol::Nvmf) => { + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { + subsystem.resume().await.unwrap(); + } + } + _ => {} + } + + Ok(()) + } + + /// suspend any incoming IO to the bdev pausing the controller allows us to + /// handle internal events and which is a protocol feature. + pub(crate) async fn pause(&self) -> Result<(), Error> { + match self.shared() { + Some(Protocol::Nvmf) => { + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { + subsystem.pause().await.unwrap(); + } + } + _ => {} + } + + Ok(()) + } + /// register the bdev with SPDK and set the callbacks for io channel /// creation. Once this function is called, the device is visible and can /// be used for IO. pub(crate) async fn register(&mut self) -> Result<(), Error> { - assert_eq!(self.state, NexusState::Init); + assert_eq!(*self.state.lock().unwrap(), NexusState::Init); unsafe { spdk_io_device_register( @@ -657,7 +687,7 @@ impl Nexus { /// determine if any of the children do not support the requested /// io type. Break the loop on first occurrence. /// TODO: optionally add this check during nexus creation - pub fn io_is_supported(&self, io_type: u32) -> bool { + pub fn io_is_supported(&self, io_type: IoType) -> bool { self.children .iter() .filter_map(|e| e.bdev.as_ref()) @@ -676,13 +706,13 @@ impl Nexus { // if any child IO has failed record this within the io context if !success { trace!( - "child IO {:?} ({}) of parent {:?} failed", + "child IO {:?} ({:#?}) of parent {:?} failed", chio, chio.io_type(), pio ); - pio.ctx_as_mut_ref().status = io_status::FAILED; + pio.ctx_as_mut_ref().status = IoStatus::Failed.into(); } pio.assess(&mut chio, success); // always free the child IO @@ -695,7 +725,7 @@ impl Nexus { let pio_ctx = pio.ctx_as_mut_ref(); if !success { - pio_ctx.status = io_status::FAILED; + pio_ctx.status = IoStatus::Failed.into(); } // As there is no child IO, perform the IO accounting that Bio::assess @@ -704,7 +734,7 @@ impl Nexus { debug_assert!(pio_ctx.in_flight >= 0); if pio_ctx.in_flight == 0 { - if pio_ctx.status == io_status::FAILED { + if IoStatus::from(pio_ctx.status) == IoStatus::Failed { pio_ctx.io_attempts -= 1; if pio_ctx.io_attempts == 0 { pio.fail(); @@ -976,7 +1006,7 @@ impl Nexus { /// No child is online so the nexus is faulted /// This may be made more configurable in the future pub fn status(&self) -> NexusStatus { - match self.state { + match *self.state.lock().unwrap() { NexusState::Init => NexusStatus::Degraded, NexusState::Closed => NexusStatus::Faulted, NexusState::Open => { diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index 6cdc7ff3b..4bc74c49d 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -58,7 +58,7 @@ impl Nexus { /// register children with the nexus, only allowed during the nexus init /// phase pub fn register_children(&mut self, dev_name: &[String]) { - assert_eq!(self.state, NexusState::Init); + assert_eq!(*self.state.lock().unwrap(), NexusState::Init); self.child_count = dev_name.len() as u32; dev_name .iter() @@ -79,7 +79,7 @@ impl Nexus { &mut self, uri: &str, ) -> Result<(), NexusBdevError> { - assert_eq!(self.state, NexusState::Init); + assert_eq!(*self.state.lock().unwrap(), NexusState::Init); let name = bdev_create(&uri).await?; self.children.push(NexusChild::new( uri.to_string(), @@ -550,6 +550,14 @@ impl Nexus { blockcnt } + /// lookup a child by its name + pub fn child_lookup(&self, name: &str) -> Option<&NexusChild> { + self.children + .iter() + .filter(|c| c.bdev.as_ref().is_some()) + .find(|c| c.bdev.as_ref().unwrap().name() == name) + } + pub fn get_child_by_name( &mut self, name: &str, diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 70469c216..49b77a2b3 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -20,6 +20,7 @@ use crate::{ rebuild::{ClientOperations, RebuildJob}, subsys::Config, }; +use crossbeam::atomic::AtomicCell; #[derive(Debug, Snafu)] pub enum ChildError { @@ -54,7 +55,7 @@ pub enum ChildError { }, } -#[derive(Debug, Serialize, PartialEq, Deserialize, Copy, Clone)] +#[derive(Debug, Serialize, PartialEq, Deserialize, Eq, Copy, Clone)] pub enum Reason { /// no particular reason for the child to be in this state /// this is typically the init state @@ -88,7 +89,7 @@ impl Display for Reason { } } -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq)] pub enum ChildState { /// child has not been opened, but we are in the process of opening it Init, @@ -128,7 +129,7 @@ pub struct NexusChild { pub(crate) desc: Option>, /// current state of the child #[serde(skip_serializing)] - state: ChildState, + pub state: AtomicCell, /// record of most-recent IO errors #[serde(skip_serializing)] pub(crate) err_store: Option, @@ -153,16 +154,16 @@ impl Display for NexusChild { } impl NexusChild { - pub(crate) fn set_state(&mut self, state: ChildState) { + pub(crate) fn set_state(&self, state: ChildState) { trace!( "{}: child {}: state change from {} to {}", self.parent, self.name, - self.state.to_string(), + self.state.load().to_string(), state.to_string(), ); - self.state = state; + self.state.store(state); } /// Open the child in RW mode and claim the device to be ours. If the child @@ -280,7 +281,7 @@ impl NexusChild { ) -> Result { // Only online a child if it was previously set offline. Check for a // "Closed" state as that is what offlining a child will set it to. - match self.state { + match self.state.load() { ChildState::Closed => { // Re-create the bdev as it will have been previously destroyed. let name = @@ -307,7 +308,7 @@ impl NexusChild { /// returns the state of the child pub fn state(&self) -> ChildState { - self.state + self.state.load() } pub(crate) fn rebuilding(&self) -> bool { @@ -372,7 +373,7 @@ impl NexusChild { bdev, parent, desc: None, - state: ChildState::Init, + state: AtomicCell::new(ChildState::Init), err_store: None, } } diff --git a/mayastor/src/bdev/nexus/nexus_child_error_store.rs b/mayastor/src/bdev/nexus/nexus_child_error_store.rs index a02113ff1..a00ef43f5 100644 --- a/mayastor/src/bdev/nexus/nexus_child_error_store.rs +++ b/mayastor/src/bdev/nexus/nexus_child_error_store.rs @@ -5,7 +5,7 @@ use std::{ use serde::export::{fmt::Error, Formatter}; -use spdk_sys::{spdk_bdev, spdk_bdev_io_type}; +use spdk_sys::spdk_bdev; use crate::{ bdev::{ @@ -17,7 +17,7 @@ use crate::{ Nexus, }, nexus_child::{ChildState, NexusChild}, - nexus_io::{io_status, io_type}, + nexus_io::{IoStatus, IoType}, }, Reason, }, @@ -30,15 +30,15 @@ pub struct NexusChildErrorRecord { io_offset: u64, io_num_blocks: u64, timestamp: Instant, - io_error: i32, - io_op: spdk_bdev_io_type, + io_error: IoStatus, + io_op: IoType, } impl Default for NexusChildErrorRecord { fn default() -> Self { Self { - io_op: 0, - io_error: 0, + io_op: IoType::Invalid, + io_error: IoStatus::Failed, io_offset: 0, io_num_blocks: 0, timestamp: Instant::now(), // for lack of another suitable default @@ -67,22 +67,22 @@ pub enum ActionType { } impl NexusErrStore { - pub const READ_FLAG: u32 = 1 << (io_type::READ - 1); - pub const WRITE_FLAG: u32 = 1 << (io_type::WRITE - 1); - pub const UNMAP_FLAG: u32 = 1 << (io_type::UNMAP - 1); - pub const FLUSH_FLAG: u32 = 1 << (io_type::FLUSH - 1); - pub const RESET_FLAG: u32 = 1 << (io_type::RESET - 1); + pub const READ_FLAG: u32 = 1 << (IoType::Read as u32 - 1); + pub const WRITE_FLAG: u32 = 1 << (IoType::Write as u32 - 1); + pub const UNMAP_FLAG: u32 = 1 << (IoType::Unmap as u32 - 1); + pub const FLUSH_FLAG: u32 = 1 << (IoType::Flush as u32 - 1); + pub const RESET_FLAG: u32 = 1 << (IoType::Reset as u32 - 1); pub const IO_FAILED_FLAG: u32 = 1; // the following definitions are for the error_store unit test - pub const IO_TYPE_READ: u32 = io_type::READ; - pub const IO_TYPE_WRITE: u32 = io_type::WRITE; - pub const IO_TYPE_UNMAP: u32 = io_type::UNMAP; - pub const IO_TYPE_FLUSH: u32 = io_type::FLUSH; - pub const IO_TYPE_RESET: u32 = io_type::RESET; + pub const IO_TYPE_READ: u32 = IoType::Read as u32; + pub const IO_TYPE_WRITE: u32 = IoType::Write as u32; + pub const IO_TYPE_UNMAP: u32 = IoType::Unmap as u32; + pub const IO_TYPE_FLUSH: u32 = IoType::Flush as u32; + pub const IO_TYPE_RESET: u32 = IoType::Reset as u32; - pub const IO_FAILED: i32 = io_status::FAILED; + pub const IO_FAILED: i32 = IoStatus::Failed as i32; pub fn new(max_records: usize) -> Self { Self { @@ -94,8 +94,8 @@ impl NexusErrStore { pub fn add_record( &mut self, - io_op: spdk_bdev_io_type, - io_error: i32, + io_op: IoType, + io_error: IoStatus, io_offset: u64, io_num_blocks: u64, timestamp: Instant, @@ -126,20 +126,20 @@ impl NexusErrStore { target_timestamp: Option, ) -> bool { match record.io_op { - io_type::READ if (io_op_flags & NexusErrStore::READ_FLAG) != 0 => {} - io_type::WRITE - if (io_op_flags & NexusErrStore::WRITE_FLAG) != 0 => {} - io_type::UNMAP - if (io_op_flags & NexusErrStore::UNMAP_FLAG) != 0 => {} - io_type::FLUSH - if (io_op_flags & NexusErrStore::FLUSH_FLAG) != 0 => {} - io_type::RESET - if (io_op_flags & NexusErrStore::RESET_FLAG) != 0 => {} + IoType::Read if (io_op_flags & NexusErrStore::READ_FLAG) != 0 => {} + IoType::Write if (io_op_flags & NexusErrStore::WRITE_FLAG) != 0 => { + } + IoType::Unmap if (io_op_flags & NexusErrStore::UNMAP_FLAG) != 0 => { + } + IoType::Flush if (io_op_flags & NexusErrStore::FLUSH_FLAG) != 0 => { + } + IoType::Reset if (io_op_flags & NexusErrStore::RESET_FLAG) != 0 => { + } _ => return false, }; match record.io_error { - io_status::FAILED + IoStatus::Failed if (io_error_flags & NexusErrStore::IO_FAILED_FLAG) != 0 => {} _ => return false, }; @@ -205,7 +205,7 @@ impl NexusErrStore { } write!( f, - "\n {}: timestamp:{:?} op:{} error:{} offset:{} blocks:{}", + "\n {}: timestamp:{:?} op:{:?} error:{:?} offset:{} blocks:{}", n, self.records[idx].timestamp, self.records[idx].io_op, @@ -235,15 +235,15 @@ impl Nexus { pub fn error_record_add( &self, bdev: *const spdk_bdev, - io_op_type: spdk_bdev_io_type, - io_error_type: i32, + io_op_type: IoType, + io_error_type: IoStatus, io_offset: u64, io_num_blocks: u64, ) { let now = Instant::now(); let cfg = Config::get(); if cfg.err_store_opts.enable_err_store - && (io_op_type == io_type::READ || io_op_type == io_type::WRITE) + && (io_op_type == IoType::Read || io_op_type == IoType::Write) { let nexus_name = self.name.clone(); // dispatch message to management core to do this @@ -266,8 +266,8 @@ impl Nexus { async fn future_error_record_add( name: String, bdev: *const spdk_bdev, - io_op_type: spdk_bdev_io_type, - io_error_type: i32, + io_op_type: IoType, + io_error_type: IoStatus, io_offset: u64, io_num_blocks: u64, now: Instant, @@ -279,55 +279,57 @@ impl Nexus { return; } }; - trace!("Adding error record {} bdev {:?}", io_op_type, bdev); + trace!("Adding error record {:?} bdev {:?}", io_op_type, bdev); for child in nexus.children.iter_mut() { - if child.bdev.as_ref().unwrap().as_ptr() as *const _ == bdev { - if child.state() == ChildState::Open { - if child.err_store.is_some() { - child.err_store.as_mut().unwrap().add_record( - io_op_type, - io_error_type, - io_offset, - io_num_blocks, - now, - ); - let cfg = Config::get(); - if cfg.err_store_opts.action == ActionType::Fault - && !Self::assess_child( - &child, - cfg.err_store_opts.max_errors, - cfg.err_store_opts.retention_ns, - QueryType::Total, - ) - { - let child_name = child.name.clone(); - info!("Faulting child {}", child_name); - if nexus - .fault_child(&child_name, Reason::IoError) - .await - .is_err() + if let Some(bdev) = child.bdev.as_ref() { + if bdev.as_ptr() as *const _ == bdev { + if child.state() == ChildState::Open { + if child.err_store.is_some() { + child.err_store.as_mut().unwrap().add_record( + io_op_type, + io_error_type, + io_offset, + io_num_blocks, + now, + ); + let cfg = Config::get(); + if cfg.err_store_opts.action == ActionType::Fault + && !Self::assess_child( + &child, + cfg.err_store_opts.max_errors, + cfg.err_store_opts.retention_ns, + QueryType::Total, + ) { - error!( - "Failed to fault the child {}", - child_name, - ); + let child_name = child.name.clone(); + info!("Faulting child {}", child_name); + if nexus + .fault_child(&child_name, Reason::IoError) + .await + .is_err() + { + error!( + "Failed to fault the child {}", + child_name, + ); + } } + } else { + let child_name = child.name.clone(); + error!( + "Failed to record error - no error store in child {}", + child_name, + ); } - } else { - let child_name = child.name.clone(); - error!( - "Failed to record error - no error store in child {}", - child_name, - ); + return; } + let child_name = child.name.clone(); + trace!("Ignoring error response sent to non-open child {}, state {:?}", child_name, child.state()); return; } - let child_name = child.name.clone(); - trace!("Ignoring error response sent to non-open child {}, state {:?}", child_name, child.state()); - return; } + //error!("Failed to record error - could not find child"); } - error!("Failed to record error - could not find child"); } pub fn error_record_query( diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index 059bf3413..3935cf8ee 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -18,7 +18,7 @@ use crate::bdev::nexus::{ instances, nexus_bdev::Nexus, nexus_channel::NexusChannel, - nexus_io::{io_type, Bio}, + nexus_io::{Bio, IoType}, }; static NEXUS_FN_TBL: Lazy = Lazy::new(NexusFnTable::new); @@ -59,28 +59,29 @@ impl NexusFnTable { io_type: spdk_bdev_io_type, ) -> bool { let nexus = unsafe { Nexus::from_raw(ctx) }; - match io_type { + let _io_type = IoType::from(io_type); + match _io_type { // we always assume the device supports read/write commands // allow NVMe Admin as it is needed for local replicas - io_type::READ | io_type::WRITE | io_type::NVME_ADMIN => true, - io_type::FLUSH - | io_type::RESET - | io_type::UNMAP - | io_type::WRITE_ZEROES => { - let supported = nexus.io_is_supported(io_type); + IoType::Read | IoType::Write | IoType::NvmeAdmin => true, + IoType::Flush + | IoType::Reset + | IoType::Unmap + | IoType::WriteZeros => { + let supported = nexus.io_is_supported(_io_type); if !supported { trace!( "IO type {:?} not supported for {}", - io_type, + _io_type, nexus.bdev.name() ); } supported } _ => { - trace!( - "un matched IO type {} not supported for {}", - io_type, + debug!( + "un matched IO type {:#?} not supported for {}", + _io_type, nexus.bdev.name() ); false @@ -109,7 +110,7 @@ impl NexusFnTable { let mut ch = NexusChannel::inner_from_channel(channel); // set the fields that need to be (re)set per-attempt - if nio.io_type() == io_type::READ { + if nio.io_type() == IoType::Read { // set that we only need to read from one child // before we complete the IO to the callee. nio.reset(1); @@ -120,36 +121,36 @@ impl NexusFnTable { let nexus = nio.nexus_as_ref(); let io_type = nio.io_type(); match io_type { - io_type::READ => nexus.readv(&nio, &mut ch), - io_type::WRITE => nexus.writev(&nio, &ch), - io_type::RESET => { + IoType::Read => nexus.readv(&nio, &mut ch), + IoType::Write => nexus.writev(&nio, &ch), + IoType::Reset => { trace!("{}: Dispatching RESET", nexus.bdev.name()); nexus.reset(&nio, &ch) } - io_type::UNMAP => { + IoType::Unmap => { if nexus.io_is_supported(io_type) { nexus.unmap(&nio, &ch) } else { nio.fail(); } } - io_type::FLUSH => { + IoType::Flush => { // our replica's are attached to as nvme controllers // who always support flush. This can be troublesome // so we complete the IO directly. nio.reset(0); nio.ok(); } - io_type::WRITE_ZEROES => { + IoType::WriteZeros => { if nexus.io_is_supported(io_type) { nexus.write_zeroes(&nio, &ch) } else { nio.fail() } } - io_type::NVME_ADMIN => nexus.nvme_admin(&nio, &ch), + IoType::NvmeAdmin => nexus.nvme_admin(&nio, &ch), _ => panic!( - "{} Received unsupported IO! type {}", + "{} Received unsupported IO! type {:#?}", nexus.name, io_type ), }; diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index eeb935f93..f5e5c53e6 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -1,5 +1,8 @@ use core::fmt; -use std::fmt::{Debug, Formatter}; +use std::{ + fmt::{Debug, Formatter}, + ptr::NonNull, +}; use libc::c_void; @@ -12,13 +15,20 @@ use spdk_sys::{ }; use crate::{ - bdev::nexus::{ - nexus_bdev::{Nexus, NEXUS_PRODUCT_ID}, - nexus_fn_table::NexusFnTable, + bdev::{ + nexus::{ + nexus_bdev::{Nexus, NEXUS_PRODUCT_ID}, + nexus_channel::DREvent, + nexus_fn_table::NexusFnTable, + }, + nexus_lookup, + ChildState, + NexusStatus, + Reason, }, - core::Bdev, + core::{Bdev, Cores, Mthread, NvmeStatus, Reactors}, + nexus_uri::bdev_destroy, }; -use std::ptr::NonNull; /// NioCtx provides context on a per IO basis #[derive(Debug, Clone)] @@ -53,7 +63,7 @@ pub struct NioCtx { /// 2. The IO pointers are never accessed from any other thread /// and care must be taken that you never pass an IO ptr to another core #[derive(Clone)] -pub(crate) struct Bio(NonNull); +pub struct Bio(NonNull); impl From<*mut c_void> for Bio { fn from(io: *mut c_void) -> Self { @@ -67,39 +77,135 @@ impl From<*mut spdk_bdev_io> for Bio { } } -/// redefinition of IO types to make them (a) shorter and (b) get rid of the -/// enum conversion bloat. -/// -/// The commented types are currently not used in our code base, uncomment as -/// needed. -pub mod io_type { - pub const READ: u32 = 1; - pub const WRITE: u32 = 2; - pub const UNMAP: u32 = 3; - // pub const INVALID: u32 = 0; - pub const FLUSH: u32 = 4; - pub const RESET: u32 = 5; - pub const NVME_ADMIN: u32 = 6; - // pub const NVME_IO: u32 = 7; - // pub const NVME_IO_MD: u32 = 8; - pub const WRITE_ZEROES: u32 = 9; - // pub const ZCOPY: u32 = 10; - // pub const GET_ZONE_INFO: u32 = 11; - // pub const ZONE_MANAGMENT: u32 = 12; - // pub const ZONE_APPEND: u32 = 13; - // pub const IO_NUM_TYPES: u32 = 14; +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq, Eq)] +pub enum IoType { + Invalid, + Read, + Write, + Unmap, + Flush, + Reset, + NvmeAdmin, + NvmeIO, + NvmeIOMD, + WriteZeros, + ZeroCopy, + ZoneInfo, + ZoneManagement, + ZoneAppend, + Compare, + CompareAndWrite, + Abort, + IoNumTypes, +} + +impl From for u32 { + fn from(t: IoType) -> Self { + match t { + IoType::Invalid => 0, + IoType::Read => 1, + IoType::Write => 2, + IoType::Unmap => 3, + IoType::Flush => 4, + IoType::Reset => 5, + IoType::NvmeAdmin => 6, + IoType::NvmeIO => 7, + IoType::NvmeIOMD => 8, + IoType::WriteZeros => 9, + IoType::ZeroCopy => 10, + IoType::ZoneInfo => 11, + IoType::ZoneManagement => 12, + IoType::ZoneAppend => 13, + IoType::Compare => 14, + IoType::CompareAndWrite => 15, + IoType::Abort => 16, + IoType::IoNumTypes => 17, + } + } +} + +impl From for IoType { + fn from(u: u32) -> Self { + match u { + 0 => Self::Invalid, + 1 => Self::Read, + 2 => Self::Write, + 3 => Self::Unmap, + 4 => Self::Flush, + 5 => Self::Reset, + 6 => Self::NvmeAdmin, + 7 => Self::NvmeIO, + 8 => Self::NvmeIOMD, + 9 => Self::WriteZeros, + 10 => Self::ZeroCopy, + 11 => Self::ZoneInfo, + 12 => Self::ZoneManagement, + 13 => Self::ZoneAppend, + 14 => Self::Compare, + 15 => Self::CompareAndWrite, + 16 => Self::Abort, + 17 => Self::IoNumTypes, + _ => { + panic!("invalid IO type") + } + } + } +} + +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq, Eq)] +#[non_exhaustive] +pub enum IoStatus { + Aborted, + FirstFusedFailed, + MisCompared, + NoMemory, + ScsiError, + NvmeError, + Failed, + Pending, + Success, +} + +impl From for IoStatus { + fn from(status: i32) -> Self { + match status { + -7 => Self::Aborted, + -6 => Self::FirstFusedFailed, + -5 => Self::MisCompared, + -4 => Self::NoMemory, + -3 => Self::ScsiError, + -2 => Self::NvmeError, + -1 => Self::Failed, + 0 => Self::Pending, + 1 => Self::Success, + _ => { + panic!("invalid status code") + } + } + } } -/// the status of an IO - note: values copied from spdk bdev_module.h -pub mod io_status { - //pub const NOMEM: i32 = -4; - //pub const SCSI_ERROR: i32 = -3; - //pub const NVME_ERROR: i32 = -2; - pub const FAILED: i32 = -1; - //pub const PENDING: i32 = 0; - pub const SUCCESS: i32 = 1; +impl From for i32 { + fn from(i: IoStatus) -> Self { + match i { + IoStatus::Aborted => -7, + IoStatus::FirstFusedFailed => -6, + IoStatus::MisCompared => -5, + IoStatus::NoMemory => -4, + IoStatus::ScsiError => -3, + IoStatus::NvmeError => -2, + IoStatus::Failed => -1, + IoStatus::Pending => 0, + IoStatus::Success => 1, + } + } } +impl From for IoStatus { + fn from(status: i8) -> Self { + (status as i32).into() + } +} /// NVMe Admin opcode, from nvme_spec.h pub mod nvme_admin_opc { // pub const GET_LOG_PAGE: u8 = 0x02; @@ -129,7 +235,7 @@ impl Bio { /// reset the ctx fields of an spdk_bdev_io to submit or resubmit an IO pub fn reset(&mut self, in_flight: usize) { self.ctx_as_mut_ref().in_flight = in_flight as i8; - self.ctx_as_mut_ref().status = io_status::SUCCESS; + self.ctx_as_mut_ref().status = IoStatus::Success.into(); } /// complete an IO for the nexus. In the IO completion routine in @@ -149,40 +255,78 @@ impl Bio { } } unsafe { - spdk_bdev_io_complete(self.0.as_ptr(), io_status::SUCCESS); + spdk_bdev_io_complete(self.0.as_ptr(), IoStatus::Success.into()) } } /// mark the IO as failed #[inline] pub(crate) fn fail(&self) { unsafe { - spdk_bdev_io_complete(self.0.as_ptr(), io_status::FAILED); + spdk_bdev_io_complete(self.0.as_ptr(), IoStatus::Failed.into()) } } /// assess the IO if we need to mark it failed or ok. #[inline] pub(crate) fn assess(&mut self, child_io: &mut Bio, success: bool) { - self.ctx_as_mut_ref().in_flight -= 1; + { + let pio_ctx = self.ctx_as_mut_ref(); + pio_ctx.in_flight -= 1; - debug_assert!(self.ctx_as_mut_ref().in_flight >= 0); + debug_assert!(pio_ctx.in_flight >= 0); + } if !success { - let io_offset = self.offset(); - let io_num_blocks = self.num_blocks(); - self.nexus_as_ref().error_record_add( - child_io.bdev_as_ref().as_ptr(), - self.io_type(), - io_status::FAILED, - io_offset, - io_num_blocks, - ); + // note although this is not the hot path, with a sufficiently high + // queue depth it can turn whitehot rather quickly + error!("{:#?}", NvmeStatus::from(child_io.clone())); + let child = child_io.bdev_as_ref(); + let n = self.nexus_as_ref(); + + if let Some(child) = n.child_lookup(&child.name()) { + let current_state = child.state.compare_and_swap( + ChildState::Open, + ChildState::Faulted(Reason::IoError), + ); + + if current_state == ChildState::Open { + warn!( + "core {} thread {:?}, faulting child {}", + Cores::current(), + Mthread::current(), + child + ); + + let name = n.name.clone(); + let uri = child.name.clone(); + + let fut = async move { + if let Some(nexus) = nexus_lookup(&name) { + nexus.pause().await.unwrap(); + nexus.reconfigure(DREvent::ChildFault).await; + bdev_destroy(&uri).await.unwrap(); + if nexus.status() != NexusStatus::Faulted { + nexus.resume().await.unwrap(); + } else { + error!(":{} has no children left... ", nexus); + } + } + }; + + Reactors::master().send_future(fut); + } + } else { + debug!("core {} thread {:?}, not faulting child {} as its already being removed", + Cores::current(), Mthread::current(), child); + } } - if self.ctx_as_mut_ref().in_flight == 0 { - if self.ctx_as_mut_ref().status == io_status::FAILED { - self.ctx_as_mut_ref().io_attempts -= 1; - if self.ctx_as_mut_ref().io_attempts > 0 { + let pio_ctx = self.ctx_as_mut_ref(); + + if pio_ctx.in_flight == 0 { + if IoStatus::from(pio_ctx.status) == IoStatus::Failed { + pio_ctx.io_attempts -= 1; + if pio_ctx.io_attempts > 0 { NexusFnTable::io_submit_or_resubmit( self.io_channel(), &mut self.clone(), @@ -261,8 +405,8 @@ impl Bio { /// determine the type of this IO #[inline] - pub(crate) fn io_type(&self) -> u32 { - unsafe { self.0.as_ref().type_ as u32 } + pub(crate) fn io_type(&self) -> IoType { + unsafe { self.0.as_ref().type_ as u32 }.into() } /// get the block length of this IO @@ -271,8 +415,8 @@ impl Bio { self.bdev_as_ref().block_len() as u64 } #[inline] - pub(crate) fn status(&self) -> i8 { - unsafe { self.0.as_ref().internal.status } + pub(crate) fn status(&self) -> IoStatus { + unsafe { self.0.as_ref().internal.status }.into() } /// determine if the IO needs an indirect buffer this can happen for example diff --git a/mayastor/src/bdev/nexus/nexus_module.rs b/mayastor/src/bdev/nexus/nexus_module.rs index e786456a3..92c6ed2d0 100644 --- a/mayastor/src/bdev/nexus/nexus_module.rs +++ b/mayastor/src/bdev/nexus/nexus_module.rs @@ -123,7 +123,7 @@ impl NexusModule { instances .iter_mut() - .filter(|nexus| nexus.state == NexusState::Init) + .filter(|nexus| *nexus.state.lock().unwrap() == NexusState::Init) .any(|nexus| { if nexus.examine_child(&name) { info!( diff --git a/mayastor/src/core/bdev.rs b/mayastor/src/core/bdev.rs index 1c7993192..930a1ec3a 100644 --- a/mayastor/src/core/bdev.rs +++ b/mayastor/src/core/bdev.rs @@ -32,7 +32,7 @@ use spdk_sys::{ }; use crate::{ - bdev::nexus::instances, + bdev::nexus::{instances, nexus_io::IoType}, core::{ share::{Protocol, Share}, uuid::Uuid, @@ -348,8 +348,8 @@ impl Bdev { } /// returns whenever the bdev supports the requested IO type - pub fn io_type_supported(&self, io_type: u32) -> bool { - unsafe { spdk_bdev_io_type_supported(self.0.as_ptr(), io_type) } + pub fn io_type_supported(&self, io_type: IoType) -> bool { + unsafe { spdk_bdev_io_type_supported(self.0.as_ptr(), io_type.into()) } } /// returns the bdev as a ptr diff --git a/mayastor/src/core/descriptor.rs b/mayastor/src/core/descriptor.rs index 7373091f6..2c424d382 100644 --- a/mayastor/src/core/descriptor.rs +++ b/mayastor/src/core/descriptor.rs @@ -192,12 +192,16 @@ impl Drop for Descriptor { impl Debug for Descriptor { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - write!( - f, - "Descriptor {:p} for bdev: {}", - self.as_ptr(), - self.get_bdev().name() - ) + if self.0 != std::ptr::null_mut() { + write!( + f, + "Descriptor {:p} for bdev: {}", + self.as_ptr(), + self.get_bdev().name() + ) + } else { + write!(f, "not alloacted") + } } } diff --git a/mayastor/src/core/io_driver.rs b/mayastor/src/core/io_driver.rs index 9029c8e5c..bb8d7fb64 100644 --- a/mayastor/src/core/io_driver.rs +++ b/mayastor/src/core/io_driver.rs @@ -188,7 +188,18 @@ impl Job { let job = unsafe { ioq.job.as_mut() }; if !success { - error!("{}: {:#?}", job.thread.as_ref().unwrap().name(), bdev_io); + // trace!( + // "core: {} mthread: {:?}{}: {:#?}", + // Cores::current(), + // Mthread::current().unwrap(), + // job.thread.as_ref().unwrap().name(), + // bdev_io + // ); + + // let bio = Bio::from(bdev_io); + // dbg!(&bio); + // + // dbg!(NvmeStatus::from(bio)); } assert_eq!(Cores::current(), job.core); diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index 2096901e6..e497ba542 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -15,12 +15,15 @@ pub use env::{ MayastorCliArgs, MayastorEnvironment, GLOBAL_RC, + SIG_RECEIVED, }; pub use handle::BdevHandle; pub use reactor::{Reactor, ReactorState, Reactors, REACTOR_LIST}; pub use share::{Protocol, Share}; pub use thread::Mthread; +pub use nvme::{NvmeStatus,GenericStatusCode}; + mod bdev; mod channel; @@ -30,11 +33,11 @@ mod dma; mod env; mod handle; pub mod io_driver; +mod nvme; mod reactor; mod share; pub(crate) mod thread; mod uuid; - #[derive(Debug, Snafu, Clone)] #[snafu(visibility = "pub")] pub enum CoreError { diff --git a/mayastor/src/core/nvme.rs b/mayastor/src/core/nvme.rs new file mode 100644 index 000000000..2b49916d6 --- /dev/null +++ b/mayastor/src/core/nvme.rs @@ -0,0 +1,113 @@ +use crate::bdev::Bio; +use spdk_sys::spdk_bdev_io_get_nvme_status; + +#[derive(Debug, Copy, Clone, Eq, PartialOrd, PartialEq)] +pub enum GenericStatusCode { + Success, + InvalidOPCode, + InternalDeviceError, + AbortedRequested, + Reserved, + AbortedSubmissionQueueDeleted, +} + +impl From for GenericStatusCode { + fn from(i: i32) -> Self { + match i { + 0x00 => Self::Success, + 0x1 => Self::InvalidOPCode, + 0x06 => Self::InternalDeviceError, + 0x07 => Self::AbortedRequested, + 0x08 => Self::AbortedSubmissionQueueDeleted, + _ => { + error!("unknown code {}", i); + Self::Reserved + } + } + } +} + +#[derive(Debug)] +pub struct NvmeStatus { + /// NVMe completion queue entry + cdw0: u32, + /// NVMe status code type + sct: i32, + /// NVMe status code + sc: GenericStatusCode, +} + +impl NvmeStatus { + pub fn status_code(&self) -> GenericStatusCode { + self.sc + } + // todo make enums +} + +impl From for NvmeStatus { + fn from(b: Bio) -> Self { + let mut cdw0: u32 = 0; + let mut sct: i32 = 0; + let mut sc: i32 = 0; + + unsafe { + spdk_bdev_io_get_nvme_status( + b.as_ptr(), + &mut cdw0, + &mut sct, + &mut sc, + ) + } + + Self { + cdw0, + sct, + sc: GenericStatusCode::from(sc), + } + } +} + +impl From<&mut Bio> for NvmeStatus { + fn from(b: &mut Bio) -> Self { + let mut cdw0: u32 = 0; + let mut sct: i32 = 0; + let mut sc: i32 = 0; + + unsafe { + spdk_bdev_io_get_nvme_status( + b.as_ptr(), + &mut cdw0, + &mut sct, + &mut sc, + ) + } + + Self { + cdw0, + sct, + sc: GenericStatusCode::from(sc), + } + } +} +impl From<&Bio> for NvmeStatus { + fn from(b: &Bio) -> Self { + let mut cdw0: u32 = 0; + let mut sct: i32 = 0; + let mut sc: i32 = 0; + + unsafe { + spdk_bdev_io_get_nvme_status( + b.as_ptr(), + &mut cdw0, + &mut sct, + &mut sc, + ) + } + + Self { + cdw0, + sct, + sc: GenericStatusCode::from(sc), + } + } +} diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index 3793cee22..94eac6a36 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -211,7 +211,7 @@ impl std::fmt::Display for Location<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(file) = self.meta.file() { if let Some(line) = self.meta.line() { - write!(f, "({}:{}) ", basename(file), line)?; + write!(f, "{}:{}] ", basename(file), line)?; } } Ok(()) @@ -240,7 +240,7 @@ where write!( writer, - "[{} {} {}] ", + "[{} {} {}:", chrono::Local::now().format("%FT%T%.9f%Z"), FormatLevel::new(meta.level(), self.ansi), meta.target() diff --git a/mayastor/src/lvs/lvs_pool.rs b/mayastor/src/lvs/lvs_pool.rs index 26d30f80d..a2cf343b3 100644 --- a/mayastor/src/lvs/lvs_pool.rs +++ b/mayastor/src/lvs/lvs_pool.rs @@ -25,12 +25,11 @@ use spdk_sys::{ LVOL_CLEAR_WITH_UNMAP, LVOL_CLEAR_WITH_WRITE_ZEROES, LVS_CLEAR_WITH_NONE, - SPDK_BDEV_IO_TYPE_UNMAP, }; use url::Url; use crate::{ - bdev::{util::uring, Uri}, + bdev::{nexus::nexus_io::IoType, util::uring, Uri}, core::{Bdev, Share, Uuid}, ffihelper::{cb_arg, pair, AsStr, ErrnoResult, FfiResult, IntoCString}, lvs::{Error, Lvol, PropName, PropValue}, @@ -522,12 +521,12 @@ impl Lvs { size: u64, thin: bool, ) -> Result { - let clear_method = - if self.base_bdev().io_type_supported(SPDK_BDEV_IO_TYPE_UNMAP) { - LVOL_CLEAR_WITH_UNMAP - } else { - LVOL_CLEAR_WITH_WRITE_ZEROES - }; + let clear_method = if self.base_bdev().io_type_supported(IoType::Unmap) + { + LVOL_CLEAR_WITH_UNMAP + } else { + LVOL_CLEAR_WITH_WRITE_ZEROES + }; if Bdev::lookup_by_name(name).is_some() { return Err(Error::RepExists { diff --git a/mayastor/src/subsys/config/mod.rs b/mayastor/src/subsys/config/mod.rs index b2dc68195..47386d17d 100644 --- a/mayastor/src/subsys/config/mod.rs +++ b/mayastor/src/subsys/config/mod.rs @@ -353,7 +353,7 @@ impl Config { pub fn apply(&self) { info!("Applying Mayastor configuration settings"); // note: nvmf target does not have a set method - self.nvme_bdev_opts.set(); + assert_eq!(self.nvme_bdev_opts.set(), true); self.bdev_opts.set(); self.iscsi_tgt_conf.set(); } diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index 3a1baaf16..29b400283 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -16,6 +16,7 @@ use spdk_sys::{ spdk_iscsi_opts, spdk_nvmf_target_opts, spdk_nvmf_transport_opts, + SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, }; use crate::bdev::ActionType; @@ -202,27 +203,27 @@ impl From for spdk_nvmf_transport_opts { #[serde(default, deny_unknown_fields)] pub struct NvmeBdevOpts { /// action take on timeout - action_on_timeout: u32, + pub action_on_timeout: u32, /// timeout for each command - timeout_us: u64, + pub timeout_us: u64, /// retry count - retry_count: u32, + pub retry_count: u32, /// TODO - arbitration_burst: u32, + pub arbitration_burst: u32, /// max number of low priority cmds a controller may launch at one time - low_priority_weight: u32, + pub low_priority_weight: u32, /// max number of medium priority cmds a controller may launch at one time - medium_priority_weight: u32, + pub medium_priority_weight: u32, /// max number of high priority cmds a controller may launch at one time - high_priority_weight: u32, + pub high_priority_weight: u32, /// admin queue polling period - nvme_adminq_poll_period_us: u64, + pub nvme_adminq_poll_period_us: u64, /// ioq polling period - nvme_ioq_poll_period_us: u64, + pub nvme_ioq_poll_period_us: u64, /// number of requests per nvme IO queue - io_queue_requests: u32, + pub io_queue_requests: u32, /// allow for batching of commands - delay_cmd_submit: bool, + pub delay_cmd_submit: bool, } impl GetOpts for NvmeBdevOpts { @@ -236,6 +237,7 @@ impl GetOpts for NvmeBdevOpts { fn set(&self) -> bool { let opts = Box::new(self.into()); + debug!("{:?}", &opts); if unsafe { bdev_nvme_set_opts(Box::into_raw(opts)) } != 0 { return false; } @@ -246,14 +248,14 @@ impl GetOpts for NvmeBdevOpts { impl Default for NvmeBdevOpts { fn default() -> Self { Self { - action_on_timeout: 1, - timeout_us: 2_000_000, - retry_count: 5, + action_on_timeout: SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, + timeout_us: 30_000_000, + retry_count: 3, arbitration_burst: 0, low_priority_weight: 0, medium_priority_weight: 0, high_priority_weight: 0, - nvme_adminq_poll_period_us: 10_000, + nvme_adminq_poll_period_us: 100, nvme_ioq_poll_period_us: 0, io_queue_requests: 0, delay_cmd_submit: true, diff --git a/mayastor/src/subsys/mod.rs b/mayastor/src/subsys/mod.rs index 34af5c67e..e2709902c 100644 --- a/mayastor/src/subsys/mod.rs +++ b/mayastor/src/subsys/mod.rs @@ -2,7 +2,7 @@ //! Main file to register additional subsystems pub use config::{ - opts::NexusOpts, + opts::{NexusOpts, NvmeBdevOpts}, BaseBdev, Config, ConfigSubsystem, diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 53ce533ed..e6ea6883c 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -372,8 +372,8 @@ impl NvmfSubsystem { /// we are not making use of pause and resume yet but this will be needed /// when we start to move things around - #[allow(dead_code)] - async fn pause(&self) -> Result<(), Error> { + #[instrument(level = "debug", err)] + pub async fn pause(&self) -> Result<(), Error> { extern "C" fn pause_cb( ss: *mut spdk_nvmf_subsystem, arg: *mut c_void, @@ -413,9 +413,8 @@ impl NvmfSubsystem { msg: "failed to pause the subsystem".to_string(), }) } - - #[allow(dead_code)] - async fn resume(&self) -> Result<(), Error> { + #[instrument(level = "debug", err)] + pub async fn resume(&self) -> Result<(), Error> { extern "C" fn resume_cb( ss: *mut spdk_nvmf_subsystem, arg: *mut c_void, diff --git a/mayastor/tests/bdev_test.rs b/mayastor/tests/bdev_test.rs new file mode 100644 index 000000000..39e8d2520 --- /dev/null +++ b/mayastor/tests/bdev_test.rs @@ -0,0 +1,337 @@ +//! +//! At a high level this is what is tested during +//! this run. For each core we are assigned we will +//! start a job +//! +//! +//! +------------+ +-------------------------+ +//! | | | | +//! | job | | +--nvmf----> MS1 | +//! | | | | | +//! +------------+ +-------------------------+ +//! | +//! +------------+ +-------------------------+ +//! | | | | | +//! | nvmf | | +--nvmf----> MS2 | +// | | | | | +// +------------+ +-------------------------+ +//! | | +//! | +-------------------------+ +//! | | | +//! | | | | +//! +-+nvmf------>+ nexus +--loop----> MS3 | +//! | | +//! +-------------------------+ +//! +//! +//! The idea is that we then "hot remove" targets while +//! the nexus is still able to process IO. +//! +//! +//! When we encounter an IO problem, we must reconfigure all cores, (unless we +//! use single cores of course) and this multi core reconfiguration is what we +//! are trying to test here, and so we require a certain amount of cores to test +//! this to begin with. Also, typically, no more than one mayastor instance will +//! be bound to a particular core. As such we "spread" out cores as much as +//! possible. +use std::{ + sync::{atomic::Ordering, Arc}, + time::Duration, +}; + +use once_cell::sync::OnceCell; + +const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; +//const NEXUS_NAME: &str = "nexus-00000000-0000-0000-0000-000000000001"; + +use common::compose::{Builder, ComposeTest, MayastorTest}; +use mayastor::{ + core::{ + io_driver, + io_driver::JobQueue, + Bdev, + Cores, + MayastorCliArgs, + SIG_RECEIVED, + }, + nexus_uri::bdev_create, +}; +use rpc::mayastor::{ + BdevShareRequest, + BdevUri, + CreateNexusRequest, + CreateReply, + ListNexusReply, + Null, + PublishNexusRequest, +}; + +use composer::Binary; +use mayastor::subsys::{Config, NvmeBdevOpts}; +use tokio::time::interval; + +pub mod common; + +static MAYASTOR: OnceCell = OnceCell::new(); +static DOCKER_COMPOSE: OnceCell = OnceCell::new(); + +/// create a malloc bdev and export them over nvmf, returns the URI of the +/// constructed target. +async fn create_target(container: &str) -> String { + let mut h = DOCKER_COMPOSE + .get() + .unwrap() + .grpc_handle(container) + .await + .unwrap(); + h.bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + // share it over nvmf + let ep = h + .bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) + .await + .unwrap(); + + ep.into_inner().uri +} + +/// create a local malloc bdev, and then use it to create a nexus with the +/// remote targets added. This reflects the current approach where we have +/// children as: bdev:/// and nvmf:// we really should get rid of this +/// asymmetrical composition if we can. +async fn create_nexus(container: &str, mut kiddos: Vec) -> String { + let mut h = DOCKER_COMPOSE + .get() + .unwrap() + .grpc_handle(container) + .await + .unwrap(); + + let bdev = h + .bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + + kiddos.push(format!("bdev:///{}", bdev.into_inner().name)); + + h.mayastor + .create_nexus(CreateNexusRequest { + uuid: NEXUS_UUID.to_string(), + size: 96 * 1024 * 1024, + children: kiddos, + }) + .await + .unwrap(); + + let endpoint = h + .mayastor + .publish_nexus(PublishNexusRequest { + uuid: NEXUS_UUID.into(), + share: 1, + ..Default::default() + }) + .await + .unwrap(); + + endpoint.into_inner().device_uri +} + +/// create the work -- which means the nexus, replica's and the jobs. on return +/// IO flows through mayastorTest to all 3 containers +async fn create_topology(queue: Arc) { + let r1 = create_target("ms1").await; + let r2 = create_target("ms2").await; + let endpoint = create_nexus("ms3", vec![r1, r2]).await; + + // the nexus is running on ms3 we will use a 4th instance of mayastor to + // create a nvmf bdev and push IO to it. + + let ms = MAYASTOR.get().unwrap(); + let bdev = ms + .spawn(async move { + let bdev = bdev_create(&endpoint).await.unwrap(); + bdev + }) + .await; + + // start the workload by running a job on each core, this simulates the way + // the targets use multiple cores + ms.spawn(async move { + for c in Cores::count() { + let bdev = Bdev::lookup_by_name(&bdev).unwrap(); + let job = io_driver::Builder::new() + .core(c) + .bdev(bdev) + .qd(64) + .io_size(512) + .build() + .await; + + queue.start(job); + } + }) + .await; +} + +async fn check_nexus(checker: F) { + let mut ms3 = DOCKER_COMPOSE + .get() + .unwrap() + .grpc_handle("ms3") + .await + .unwrap(); + let list = ms3.mayastor.list_nexus(Null {}).await.unwrap().into_inner(); + checker(list) +} + +/// kill replica issues an unshare to the container which more or less amounts +/// to the same thing as killing the container. +async fn kill_replica(container: &str) { + let t = DOCKER_COMPOSE.get().unwrap(); + let mut hdl = t.grpc_handle(container).await.unwrap(); + + hdl.bdev + .unshare(CreateReply { + name: "disk0".to_string(), + }) + .await + .unwrap(); +} + +#[allow(dead_code)] +async fn pause_replica(container: &str) { + let t = DOCKER_COMPOSE.get().unwrap(); + t.pause(container).await.unwrap(); +} + +#[allow(dead_code)] +async fn unpause_replica(container: &str) { + let t = DOCKER_COMPOSE.get().unwrap(); + t.thaw(container).await.unwrap(); +} + +#[allow(dead_code)] +async fn kill_local(container: &str) { + let t = DOCKER_COMPOSE.get().unwrap(); + let mut hdl = t.grpc_handle(container).await.unwrap(); + hdl.bdev + .destroy(BdevUri { + uri: "malloc:///disk0".into(), + }) + .await + .unwrap(); +} + +async fn list_bdevs(container: &str) { + let mut h = DOCKER_COMPOSE + .get() + .unwrap() + .grpc_handle(container) + .await + .unwrap(); + dbg!(h.bdev.list(Null {}).await.unwrap()); +} + +#[tokio::test] +async fn nvmf_bdev_test() { + let queue = Arc::new(JobQueue::new()); + + Config::get_or_init(|| Config { + nvme_bdev_opts: NvmeBdevOpts { + action_on_timeout: 2, + timeout_us: 10_000_000, + retry_count: 5, + ..Default::default() + }, + ..Default::default() + }) + .apply(); + + // create the docker containers each container started with two adjacent CPU + // cores. ms1 will have core mask 0x3, ms3 will have core mask 0xc and so + // on. the justification for this enormous core spreading is we want to + // test and ensure that things do not interfere with one and other and + // yet, still have at least more than one core such that we mimic + // production workloads. + // + + let compose = Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .add_container_bin( + "ms1", + Binary::from_dbg("mayastor").with_args(vec!["-l", "1"]), + ) + .add_container_bin( + "ms2", + Binary::from_dbg("mayastor").with_args(vec!["-l", "2"]), + ) + .add_container_bin( + "ms3", + Binary::from_dbg("mayastor").with_args(vec!["-l", "3"]), + ) + .with_clean(true) + .with_prune(true) + .build() + .await + .unwrap(); + + DOCKER_COMPOSE.set(compose).unwrap(); + // this is based on the number of containers above. + let mask = format!("{:#01x}", (1 << 4) | (1 << 5)); + let ms = MayastorTest::new(MayastorCliArgs { + reactor_mask: mask, + no_pci: true, + grpc_endpoint: "0.0.0.0".to_string(), + ..Default::default() + }); + + let ms = MAYASTOR.get_or_init(|| ms); + + let mut ticker = interval(Duration::from_millis(1000)); + create_topology(Arc::clone(&queue)).await; + + list_bdevs("ms3").await; + + for i in 1 .. 10 { + ticker.tick().await; + if i == 5 { + kill_replica("ms1").await; + } + + ms.spawn(async { + let bdev = Bdev::bdev_first().unwrap(); + dbg!(bdev.stats().await.unwrap()); + }) + .await; + // ctrl was hit so exit the loop here + if SIG_RECEIVED.load(Ordering::Relaxed) { + break; + } + } + + check_nexus(|n| { + n.nexus_list.iter().for_each(|n| { + dbg!(n); + }); + }) + .await; + + list_bdevs("ms3").await; + DOCKER_COMPOSE.get().unwrap().logs("ms3").await.unwrap(); + + queue.stop_all().await; + ms.stop().await; + DOCKER_COMPOSE.get().unwrap().down().await; +} diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index ad6c26bd2..33514e6e7 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -11,7 +11,9 @@ use mayastor::core::{ MayastorEnvironment, Reactor, Reactors, + GLOBAL_RC, }; +use std::time::Duration; /// Mayastor test structure that simplifies sending futures. Mayastor has /// its own reactor, which is not tokio based, so we need to handle properly @@ -86,10 +88,14 @@ impl<'a> MayastorTest<'a> { } /// explicitly stop mayastor - pub async fn stop(mut self) { + pub async fn stop(&self) { self.spawn(async { mayastor_env_stop(0) }).await; - let hdl = self.thdl.take().unwrap(); - hdl.join().unwrap() + loop { + if *GLOBAL_RC.lock().unwrap() == 0 { + break; + } + tokio::time::delay_for(Duration::from_millis(500)).await; + } } } diff --git a/mayastor/tests/error_count.rs b/mayastor/tests/error_count.rs index 332d66d06..0a2afaefa 100644 --- a/mayastor/tests/error_count.rs +++ b/mayastor/tests/error_count.rs @@ -27,7 +27,7 @@ static EE_ERROR_DEVICE: &str = "EE_error_device"; static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_device"; static YAML_CONFIG_FILE: &str = "/tmp/error_count_test_nexus.yaml"; - +#[ignore] #[tokio::test] async fn nexus_error_count_test() { common::truncate_file(DISKNAME1, 64 * 1024); diff --git a/mayastor/tests/error_count_retry.rs b/mayastor/tests/error_count_retry.rs index cb8e9ec2a..fc4eaedf8 100644 --- a/mayastor/tests/error_count_retry.rs +++ b/mayastor/tests/error_count_retry.rs @@ -24,7 +24,7 @@ static EE_ERROR_DEVICE: &str = "EE_error_retry_device"; // The prefix is added b static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_retry_device"; static YAML_CONFIG_FILE: &str = "/tmp/error_count_retry_nexus.yaml"; - +#[ignore] #[tokio::test] async fn nexus_error_count_retry_test() { common::truncate_file(DISKNAME1, 64 * 1024); diff --git a/mayastor/tests/error_fault_child.rs b/mayastor/tests/error_fault_child.rs index 73325f1a0..176683e3f 100644 --- a/mayastor/tests/error_fault_child.rs +++ b/mayastor/tests/error_fault_child.rs @@ -26,7 +26,7 @@ static EE_ERROR_DEVICE: &str = "EE_error_device"; static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_device"; static YAML_CONFIG_FILE: &str = "/tmp/error_fault_child_test_nexus.yaml"; - +#[ignore] #[tokio::test] async fn nexus_fault_child_test() { common::truncate_file(DISKNAME1, 64 * 1024); diff --git a/mayastor/tests/error_store.rs b/mayastor/tests/error_store.rs index bc9930301..179c6ed99 100644 --- a/mayastor/tests/error_store.rs +++ b/mayastor/tests/error_store.rs @@ -1,136 +1,140 @@ -pub mod common; -use mayastor::bdev::{NexusErrStore, QueryType}; -use std::time::{Duration, Instant}; - -const ALL_FLAGS: u32 = 0xffff_ffff; - -#[test] -fn nexus_child_error_store_test() { - let mut es = NexusErrStore::new(15); - let start_inst = Instant::now(); - - let mut errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - assert_eq!(errors, 0); - - add_records(&mut es, 1, NexusErrStore::IO_TYPE_READ, start_inst, 5); - add_records(&mut es, 1, NexusErrStore::IO_TYPE_READ, start_inst, 10); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - assert_eq!(errors, 2); - - add_records(&mut es, 2, NexusErrStore::IO_TYPE_WRITE, start_inst, 11); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - assert_eq!(errors, 4); - - add_records(&mut es, 3, NexusErrStore::IO_TYPE_UNMAP, start_inst, 12); - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - assert_eq!(errors, 7); - - add_records(&mut es, 4, NexusErrStore::IO_TYPE_FLUSH, start_inst, 13); - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - assert_eq!(errors, 11); - - add_records(&mut es, 5, NexusErrStore::IO_TYPE_RESET, start_inst, 14); - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - // last record over-writes the first, hence 15 not 16 - assert_eq!(errors, 15); - - /////////////////// filter by time //////////////////////////// - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); - assert_eq!(errors, 15); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 15); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 11); - assert_eq!(errors, 14); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 12); - assert_eq!(errors, 12); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 13); - assert_eq!(errors, 9); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 14); - assert_eq!(errors, 5); - - errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 15); - assert_eq!(errors, 0); - - errors = es.query(ALL_FLAGS, ALL_FLAGS, None, QueryType::Total); // no time specified - assert_eq!(errors, 15); - - /////////////////////// filter by op //////////////////////// - errors = do_query(&es, NexusErrStore::READ_FLAG, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 1); - - errors = - do_query(&es, NexusErrStore::WRITE_FLAG, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 2); - - errors = - do_query(&es, NexusErrStore::UNMAP_FLAG, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 3); - - errors = - do_query(&es, NexusErrStore::FLUSH_FLAG, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 4); - - errors = - do_query(&es, NexusErrStore::RESET_FLAG, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 5); - - errors = do_query(&es, 0, ALL_FLAGS, start_inst, 10); - assert_eq!(errors, 0); - - ////////////////////// filter by failure ////////////////////////// - - errors = do_query( - &es, - ALL_FLAGS, - NexusErrStore::IO_FAILED_FLAG, - start_inst, - 10, - ); - assert_eq!(errors, 15); - - errors = do_query(&es, ALL_FLAGS, 0, start_inst, 10); - assert_eq!(errors, 0); -} - -fn add_records( - es: &mut NexusErrStore, - how_many: usize, - op: u32, - start_inst: Instant, - when: u64, -) { - let offset: u64 = 0; - let num_of_blocks: u64 = 1; - for _ in 0 .. how_many { - es.add_record( - op, - NexusErrStore::IO_FAILED, - offset, - num_of_blocks, - start_inst + Duration::from_nanos(when), - ); - } -} - -fn do_query( - es: &NexusErrStore, - op_flags: u32, - err_flags: u32, - start_inst: Instant, - when: u64, -) -> u32 { - es.query( - op_flags, - err_flags, - Some(start_inst + Duration::from_nanos(when)), - QueryType::Total, - ) -} +// pub mod common; +// use mayastor::{ +// bdev::{NexusErrStore, QueryType}, +// }; +// use std::time::{Duration, Instant}; +// use crate::mayastor::bdev::nexus::nexus_io::{IoStatus, IoType}; +// use mayastor::bdev::nexus::nexus_io::{IoType, IoStatus}; +// +// const ALL_FLAGS: u32 = 0xffff_ffff; +// #[skip] +// #[test] +// fn nexus_child_error_store_test() { +// let mut es = NexusErrStore::new(15); +// let start_inst = Instant::now(); +// +// let mut errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// assert_eq!(errors, 0); +// +// add_records(&mut es, 1, NexusErrStore::IO_TYPE_READ, start_inst, 5); +// add_records(&mut es, 1, NexusErrStore::IO_TYPE_READ, start_inst, 10); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// assert_eq!(errors, 2); +// +// add_records(&mut es, 2, NexusErrStore::IO_TYPE_WRITE, start_inst, 11); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// assert_eq!(errors, 4); +// +// add_records(&mut es, 3, NexusErrStore::IO_TYPE_UNMAP, start_inst, 12); +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// assert_eq!(errors, 7); +// +// add_records(&mut es, 4, NexusErrStore::IO_TYPE_FLUSH, start_inst, 13); +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// assert_eq!(errors, 11); +// +// add_records(&mut es, 5, NexusErrStore::IO_TYPE_RESET, start_inst, 14); +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// // last record over-writes the first, hence 15 not 16 +// assert_eq!(errors, 15); +// +// /////////////////// filter by time //////////////////////////// +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); +// assert_eq!(errors, 15); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 10); +// assert_eq!(errors, 15); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 11); +// assert_eq!(errors, 14); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 12); +// assert_eq!(errors, 12); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 13); +// assert_eq!(errors, 9); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 14); +// assert_eq!(errors, 5); +// +// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 15); +// assert_eq!(errors, 0); +// +// errors = es.query(ALL_FLAGS, ALL_FLAGS, None, QueryType::Total); // no +// time specified assert_eq!(errors, 15); +// +// /////////////////////// filter by op //////////////////////// +// errors = do_query(&es, NexusErrStore::READ_FLAG, ALL_FLAGS, start_inst, +// 10); assert_eq!(errors, 1); +// +// errors = +// do_query(&es, NexusErrStore::WRITE_FLAG, ALL_FLAGS, start_inst, 10); +// assert_eq!(errors, 2); +// +// errors = +// do_query(&es, NexusErrStore::UNMAP_FLAG, ALL_FLAGS, start_inst, 10); +// assert_eq!(errors, 3); +// +// errors = +// do_query(&es, NexusErrStore::FLUSH_FLAG, ALL_FLAGS, start_inst, 10); +// assert_eq!(errors, 4); +// +// errors = +// do_query(&es, NexusErrStore::RESET_FLAG, ALL_FLAGS, start_inst, 10); +// assert_eq!(errors, 5); +// +// errors = do_query(&es, 0, ALL_FLAGS, start_inst, 10); +// assert_eq!(errors, 0); +// +// ////////////////////// filter by failure ////////////////////////// +// +// errors = do_query( +// &es, +// ALL_FLAGS, +// NexusErrStore::IO_FAILED_FLAG, +// start_inst, +// 10, +// ); +// assert_eq!(errors, 15); +// +// errors = do_query(&es, ALL_FLAGS, 0, start_inst, 10); +// assert_eq!(errors, 0); +// } +// +// fn add_records( +// es: &mut NexusErrStore, +// how_many: usize, +// op: IoType, +// start_inst: Instant, +// when: u64, +// ) { +// let offset: u64 = 0; +// let num_of_blocks: u64 = 1; +// for _ in 0 .. how_many { +// es.add_record( +// op, +// IoStatus::Failed, +// offset, +// num_of_blocks, +// start_inst + Duration::from_nanos(when), +// ); +// } +// } +// +// fn do_query( +// es: &NexusErrStore, +// op_flags: u32, +// err_flags: u32, +// start_inst: Instant, +// when: u64, +// ) -> u32 { +// es.query( +// op_flags, +// err_flags, +// Some(start_inst + Duration::from_nanos(when)), +// QueryType::Total, +// ) +//} diff --git a/mayastor/tests/nexus_add_remove.rs b/mayastor/tests/nexus_add_remove.rs new file mode 100644 index 000000000..d8461dd67 --- /dev/null +++ b/mayastor/tests/nexus_add_remove.rs @@ -0,0 +1,250 @@ +use common::compose::{Builder, ComposeTest, MayastorTest}; +use mayastor::{ + bdev::{nexus_create, nexus_lookup}, + core::{MayastorCliArgs, Share}, + nexus_uri::bdev_destroy, +}; +use once_cell::sync::OnceCell; +use rpc::mayastor::{BdevShareRequest, BdevUri}; + +pub mod common; + +static MAYASTOR: OnceCell = OnceCell::new(); +static DOCKER_COMPOSE: OnceCell = OnceCell::new(); + +async fn nexus_3_way_create() { + let hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + + MAYASTOR + .get() + .unwrap() + .spawn(async move { + nexus_create( + "nexus0", + 1024 * 1024 * 50, + None, + &[ + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[1].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[2].endpoint.ip() + ), + ], + ) + .await + .unwrap(); + + let n = nexus_lookup("nexus0").unwrap(); + n.share_nvmf().await.unwrap(); + }) + .await; +} + +async fn nexus_destroy() { + MAYASTOR + .get() + .unwrap() + .spawn(async move { + nexus_lookup("nexus0").unwrap().destroy().await.unwrap(); + }) + .await; +} +async fn nexus_share() { + let n = nexus_lookup("nexus0").unwrap(); + n.share_nvmf().await.unwrap(); +} + +async fn nexus_create_2_way_add_one() { + let hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + MAYASTOR + .get() + .unwrap() + .spawn(async move { + nexus_create( + "nexus0", + 1024 * 1024 * 50, + None, + &[ + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[1].endpoint.ip() + ), + ], + ) + .await + .unwrap(); + }) + .await; + + // MAYASTOR + // .get() + // .unwrap() + // .spawn(async move { nexus_share().await }) + // .await; + + let hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + MAYASTOR + .get() + .unwrap() + .spawn(async move { + let n = nexus_lookup("nexus0").unwrap(); + + assert_eq!(n.children.len(), 2); + n.add_child( + &format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[2].endpoint.ip() + ), + true, + ) + .await + .unwrap(); + assert_eq!(n.children.len(), 3); + }) + .await; + + MAYASTOR + .get() + .unwrap() + .spawn(async move { nexus_share().await }) + .await; +} + +async fn nexus_2_way_destroy_destroy_child() { + let hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + MAYASTOR + .get() + .unwrap() + .spawn(async move { + nexus_create( + "nexus0", + 1024 * 1024 * 50, + None, + &[ + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + ), + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[1].endpoint.ip() + ), + ], + ) + .await + .unwrap(); + + nexus_share().await; + }) + .await; + + let hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + MAYASTOR + .get() + .unwrap() + .spawn(async move { + let n = nexus_lookup("nexus0").unwrap(); + + assert_eq!(n.children.len(), 2); + n.add_child( + &format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[2].endpoint.ip() + ), + true, + ) + .await + .unwrap(); + assert_eq!(n.children.len(), 3); + }) + .await; + + let hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + MAYASTOR + .get() + .unwrap() + .spawn(async move { + bdev_destroy(&format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + )) + .await + .unwrap(); + }) + .await; +} + +async fn create_targets() { + let mut hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); + + // for each grpc client, invoke these methods. + for h in &mut hdls { + // create the bdev + h.bdev + .create(BdevUri { + uri: "malloc:///disk0?size_mb=100".into(), + }) + .await + .unwrap(); + // share it over nvmf + h.bdev + .share(BdevShareRequest { + name: "disk0".into(), + proto: "nvmf".into(), + }) + .await + .unwrap(); + } +} + +#[tokio::test] +async fn nexus_add_remove() { + // create the docker containers + let compose = Builder::new() + .name("cargo-test") + .network("10.1.0.0/16") + .add_container("ms1") + .add_container("ms2") + .add_container("ms3") + .with_clean(true) + .with_prune(true) + .build() + .await + .unwrap(); + + // create the mayastor test instance + let ms = MayastorTest::new(MayastorCliArgs { + log_components: vec!["all".into()], + reactor_mask: "0x3".to_string(), + no_pci: true, + grpc_endpoint: "0.0.0.0".to_string(), + ..Default::default() + }); + + DOCKER_COMPOSE.set(compose).unwrap(); + + let ms = MAYASTOR.get_or_init(|| ms); + + create_targets().await; + nexus_3_way_create().await; + nexus_destroy().await; + + nexus_create_2_way_add_one().await; + nexus_destroy().await; + + nexus_2_way_destroy_destroy_child().await; + ms.stop().await; + + DOCKER_COMPOSE.get().unwrap().down().await; +} diff --git a/spdk-sys/build.sh b/spdk-sys/build.sh index ffb95d0ca..a64eeb08a 100755 --- a/spdk-sys/build.sh +++ b/spdk-sys/build.sh @@ -5,7 +5,7 @@ # pushd spdk || { echo "Can not find spdk directory"; exit; } - +rm libspdk.so [ ! -d dpdk/.git ] || { echo "Submodules not checked out?"; exit; } From 1da52b88f7e99fe198d9778337feba6a9a66e0f2 Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Fri, 20 Nov 2020 14:58:40 +0100 Subject: [PATCH 67/92] Remove all containers after premature end of the tests This is considered a workaround (aka ugly hack) for CAS-524. As it kills all containers, it doesn't play nicely with other tests being executed on the same machine. Also explicitly switching to `bash` as `sh` doesn't implement `pipefail`. --- scripts/cargo-test.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/scripts/cargo-test.sh b/scripts/cargo-test.sh index c9ee912eb..649d276d9 100755 --- a/scripts/cargo-test.sh +++ b/scripts/cargo-test.sh @@ -1,4 +1,14 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash + +cleanup_handler() { + for c in $(docker ps -a --format '{{.ID}}') ; do + docker kill "$c" || true + docker rm "$c" || true + done +} + +trap cleanup_handler ERR INT QUIT TERM HUP + set -euxo pipefail export PATH=$PATH:${HOME}/.cargo/bin ( cd jsonrpc && cargo test ) From c117116fcb35bd7853230268bc84e0f6e3837259 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 20 Nov 2020 15:15:55 +0100 Subject: [PATCH 68/92] nexus: multi core reconfigure looses callback When using multiple cores reconfiguration happens concurrently. This result in reconfiguration to not work properly. This fix adds a per reconfiguration context. --- mayastor/src/bdev/nexus/nexus_bdev.rs | 22 +++++++----- mayastor/src/bdev/nexus/nexus_channel.rs | 46 +++++++++++++++++++----- mayastor/src/bdev/nexus/nexus_io.rs | 8 ++--- mayastor/src/bdev/nexus/nexus_nbd.rs | 1 - mayastor/src/core/mod.rs | 3 +- mayastor/tests/replica_snapshot.rs | 2 +- mayastor/tests/yaml_config.rs | 2 +- 7 files changed, 57 insertions(+), 27 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index f12e98b52..7ca2a7656 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -40,7 +40,12 @@ use crate::{ nexus, nexus::{ instances, - nexus_channel::{DREvent, NexusChannel, NexusChannelInner}, + nexus_channel::{ + DREvent, + NexusChannel, + NexusChannelInner, + ReconfigureCtx, + }, nexus_child::{ChildError, ChildState, NexusChild}, nexus_io::{nvme_admin_opc, Bio, IoStatus, IoType}, nexus_label::LabelError, @@ -55,6 +60,7 @@ use crate::{ subsys, subsys::{Config, NvmfSubsystem}, }; +use std::ptr::NonNull; /// Obtain the full error chain pub trait VerboseError { @@ -309,8 +315,6 @@ pub struct Nexus { bdev_raw: *mut spdk_bdev, /// represents the current state of the Nexus pub(super) state: std::sync::Mutex, - /// Dynamic Reconfigure event - pub dr_complete_notify: Option>, /// the offset in num blocks where the data partition starts pub data_ent_offset: u64, /// the handle to be used when sharing the nexus, this allows for the bdev @@ -409,7 +413,6 @@ impl Nexus { bdev: Bdev::from(&*b as *const _ as *mut spdk_bdev), state: std::sync::Mutex::new(NexusState::Init), bdev_raw: Box::into_raw(b), - dr_complete_notify: None, data_ent_offset: 0, share_handle: None, size, @@ -452,17 +455,20 @@ impl Nexus { pub(crate) async fn reconfigure(&mut self, event: DREvent) { let _var = self.reconfigure_mutex.lock().await; let (s, r) = oneshot::channel::(); - assert!(self.dr_complete_notify.is_none()); - self.dr_complete_notify = Some(s); info!( "{}: Dynamic reconfiguration event: {:?} started", self.name, event ); - NexusChannel::reconfigure(self.as_ptr(), &event); + let ctx = Box::new(ReconfigureCtx::new( + s, + NonNull::new(self.as_ptr()).unwrap(), + )); + + NexusChannel::reconfigure(self.as_ptr(), ctx, &event); - let result = r.await; + let result = r.await.expect("reconfigure sender already dropped"); info!( "{}: Dynamic reconfiguration event: {:?} completed {:?}", diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index c5afe19fe..ed6c6bcbe 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -8,6 +8,7 @@ use spdk_sys::{ spdk_io_channel, spdk_io_channel_iter, spdk_io_channel_iter_get_channel, + spdk_io_channel_iter_get_ctx, spdk_io_channel_iter_get_io_device, }; @@ -15,6 +16,8 @@ use crate::{ bdev::{nexus::nexus_child::ChildState, Nexus}, core::BdevHandle, }; +use futures::channel::oneshot; +use std::ptr::NonNull; /// io channel, per core #[repr(C)] @@ -32,6 +35,27 @@ pub(crate) struct NexusChannelInner { device: *mut c_void, } +#[derive(Debug)] +/// reconfigure context holding among others +/// the completion channel. +pub struct ReconfigureCtx { + /// channel to send completion on. + sender: oneshot::Sender, + device: NonNull, +} + +impl ReconfigureCtx { + pub(crate) fn new( + sender: oneshot::Sender, + device: NonNull, + ) -> Self { + Self { + sender, + device, + } + } +} + #[derive(Debug)] /// Dynamic Reconfiguration Events occur when a child is added or removed pub enum DREvent { @@ -173,7 +197,11 @@ impl NexusChannel { } /// function called when we receive a Dynamic Reconfigure event (DR) - pub extern "C" fn reconfigure(device: *mut c_void, event: &DREvent) { + pub extern "C" fn reconfigure( + device: *mut c_void, + ctx: Box, + event: &DREvent, + ) { match event { DREvent::ChildOffline | DREvent::ChildOnline @@ -184,7 +212,7 @@ impl NexusChannel { spdk_for_each_channel( device, Some(NexusChannel::refresh_io_channels), - std::ptr::null_mut(), + Box::into_raw(ctx).cast(), Some(Self::reconfigure_completed), ); }, @@ -200,12 +228,14 @@ impl NexusChannel { Nexus::from_raw(spdk_io_channel_iter_get_io_device(ch_iter)) }; - trace!("{}: Reconfigure completed", nexus.name); - if let Some(sender) = nexus.dr_complete_notify.take() { - sender.send(status).expect("reconfigure channel gone"); - } else { - error!("DR error"); - } + let ctx: Box = unsafe { + Box::from_raw( + spdk_io_channel_iter_get_ctx(ch_iter) as *mut ReconfigureCtx + ) + }; + + info!("{}: Reconfigure completed", nexus.name); + ctx.sender.send(status).expect("reconfigure channel gone"); } /// Refresh the IO channels of the underlying children. Typically, this is diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index f5e5c53e6..6d4c916ea 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -145,9 +145,7 @@ impl From for IoType { 15 => Self::CompareAndWrite, 16 => Self::Abort, 17 => Self::IoNumTypes, - _ => { - panic!("invalid IO type") - } + _ => panic!("invalid IO type"), } } } @@ -178,9 +176,7 @@ impl From for IoStatus { -1 => Self::Failed, 0 => Self::Pending, 1 => Self::Success, - _ => { - panic!("invalid status code") - } + _ => panic!("invalid status code"), } } } diff --git a/mayastor/src/bdev/nexus/nexus_nbd.rs b/mayastor/src/bdev/nexus/nexus_nbd.rs index c00dec72f..54e18d7cf 100644 --- a/mayastor/src/bdev/nexus/nexus_nbd.rs +++ b/mayastor/src/bdev/nexus/nexus_nbd.rs @@ -41,7 +41,6 @@ pub enum NbdError { } extern "C" { - //TODO this is defined in nbd_internal.h but is not part of our bindings fn nbd_disconnect(nbd: *mut spdk_nbd_disk); } diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index e497ba542..095a83f03 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -19,11 +19,10 @@ pub use env::{ }; pub use handle::BdevHandle; +pub use nvme::{GenericStatusCode, NvmeStatus}; pub use reactor::{Reactor, ReactorState, Reactors, REACTOR_LIST}; pub use share::{Protocol, Share}; pub use thread::Mthread; -pub use nvme::{NvmeStatus,GenericStatusCode}; - mod bdev; mod channel; diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs index 6d2a2263e..120b52ae1 100644 --- a/mayastor/tests/replica_snapshot.rs +++ b/mayastor/tests/replica_snapshot.rs @@ -103,7 +103,7 @@ fn share_snapshot(t: u64) { "nvmf", ]); } - +#[ignore] #[test] fn replica_snapshot() { generate_config(); diff --git a/mayastor/tests/yaml_config.rs b/mayastor/tests/yaml_config.rs index 8afbfae37..ad769eca9 100644 --- a/mayastor/tests/yaml_config.rs +++ b/mayastor/tests/yaml_config.rs @@ -209,7 +209,7 @@ fn yaml_pool_tests() { // delete the pool common::delete_file(&["/tmp/disk1.img".into()]); } - +#[ignore] #[test] // Try to see if we can start two mayastor instances where the nvmf and iSCSI // target is disabled for one of them. If we did not disable one of them, one From 78f1d3278509f7c320ff7d794181669fc8f73659 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 20 Nov 2020 16:05:51 +0100 Subject: [PATCH 69/92] test: remove error_store test + clippy --- csi/src/filesystem_vol.rs | 2 +- mayastor/src/bdev/nexus/nexus_bdev.rs | 18 ++-- mayastor/src/core/descriptor.rs | 4 +- mayastor/src/subsys/nvmf/subsystem.rs | 2 - mayastor/tests/bdev_test.rs | 1 - mayastor/tests/error_store.rs | 140 -------------------------- nvmeadm/src/nvmf_discovery.rs | 2 +- nvmeadm/src/nvmf_subsystem.rs | 2 +- 8 files changed, 11 insertions(+), 160 deletions(-) delete mode 100644 mayastor/tests/error_store.rs diff --git a/csi/src/filesystem_vol.rs b/csi/src/filesystem_vol.rs index 7e5a49db3..b1bc64e29 100644 --- a/csi/src/filesystem_vol.rs +++ b/csi/src/filesystem_vol.rs @@ -175,7 +175,7 @@ pub fn publish_fs_volume( // TODO: Should also check that the staged "device" // corresponds to the the volume uuid - if mnt.fs_type != "" && mnt.fs_type != staged.fstype { + if !mnt.fs_type.is_empty() && mnt.fs_type != staged.fstype { return Err(failure!( Code::InvalidArgument, "Failed to publish volume {}: filesystem type ({}) does not match staged volume ({})", diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 7ca2a7656..a9d874687 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -606,13 +606,10 @@ impl Nexus { /// resume IO to the bdev pub(crate) async fn resume(&self) -> Result<(), Error> { - match self.shared() { - Some(Protocol::Nvmf) => { - if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { - subsystem.resume().await.unwrap(); - } + if let Some(Protocol::Nvmf) = self.shared() { + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { + subsystem.resume().await.unwrap(); } - _ => {} } Ok(()) @@ -621,13 +618,10 @@ impl Nexus { /// suspend any incoming IO to the bdev pausing the controller allows us to /// handle internal events and which is a protocol feature. pub(crate) async fn pause(&self) -> Result<(), Error> { - match self.shared() { - Some(Protocol::Nvmf) => { - if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { - subsystem.pause().await.unwrap(); - } + if let Some(Protocol::Nvmf) = self.shared() { + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { + subsystem.pause().await.unwrap(); } - _ => {} } Ok(()) diff --git a/mayastor/src/core/descriptor.rs b/mayastor/src/core/descriptor.rs index 2c424d382..c927bbbc6 100644 --- a/mayastor/src/core/descriptor.rs +++ b/mayastor/src/core/descriptor.rs @@ -192,7 +192,7 @@ impl Drop for Descriptor { impl Debug for Descriptor { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - if self.0 != std::ptr::null_mut() { + if !self.0.is_null() { write!( f, "Descriptor {:p} for bdev: {}", @@ -200,7 +200,7 @@ impl Debug for Descriptor { self.get_bdev().name() ) } else { - write!(f, "not alloacted") + Ok(()) } } } diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index e6ea6883c..a2735d51e 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -372,7 +372,6 @@ impl NvmfSubsystem { /// we are not making use of pause and resume yet but this will be needed /// when we start to move things around - #[instrument(level = "debug", err)] pub async fn pause(&self) -> Result<(), Error> { extern "C" fn pause_cb( ss: *mut spdk_nvmf_subsystem, @@ -413,7 +412,6 @@ impl NvmfSubsystem { msg: "failed to pause the subsystem".to_string(), }) } - #[instrument(level = "debug", err)] pub async fn resume(&self) -> Result<(), Error> { extern "C" fn resume_cb( ss: *mut spdk_nvmf_subsystem, diff --git a/mayastor/tests/bdev_test.rs b/mayastor/tests/bdev_test.rs index 39e8d2520..2dcc130d4 100644 --- a/mayastor/tests/bdev_test.rs +++ b/mayastor/tests/bdev_test.rs @@ -42,7 +42,6 @@ use std::{ use once_cell::sync::OnceCell; const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; -//const NEXUS_NAME: &str = "nexus-00000000-0000-0000-0000-000000000001"; use common::compose::{Builder, ComposeTest, MayastorTest}; use mayastor::{ diff --git a/mayastor/tests/error_store.rs b/mayastor/tests/error_store.rs deleted file mode 100644 index 179c6ed99..000000000 --- a/mayastor/tests/error_store.rs +++ /dev/null @@ -1,140 +0,0 @@ -// pub mod common; -// use mayastor::{ -// bdev::{NexusErrStore, QueryType}, -// }; -// use std::time::{Duration, Instant}; -// use crate::mayastor::bdev::nexus::nexus_io::{IoStatus, IoType}; -// use mayastor::bdev::nexus::nexus_io::{IoType, IoStatus}; -// -// const ALL_FLAGS: u32 = 0xffff_ffff; -// #[skip] -// #[test] -// fn nexus_child_error_store_test() { -// let mut es = NexusErrStore::new(15); -// let start_inst = Instant::now(); -// -// let mut errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// assert_eq!(errors, 0); -// -// add_records(&mut es, 1, NexusErrStore::IO_TYPE_READ, start_inst, 5); -// add_records(&mut es, 1, NexusErrStore::IO_TYPE_READ, start_inst, 10); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// assert_eq!(errors, 2); -// -// add_records(&mut es, 2, NexusErrStore::IO_TYPE_WRITE, start_inst, 11); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// assert_eq!(errors, 4); -// -// add_records(&mut es, 3, NexusErrStore::IO_TYPE_UNMAP, start_inst, 12); -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// assert_eq!(errors, 7); -// -// add_records(&mut es, 4, NexusErrStore::IO_TYPE_FLUSH, start_inst, 13); -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// assert_eq!(errors, 11); -// -// add_records(&mut es, 5, NexusErrStore::IO_TYPE_RESET, start_inst, 14); -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// // last record over-writes the first, hence 15 not 16 -// assert_eq!(errors, 15); -// -// /////////////////// filter by time //////////////////////////// -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 0); -// assert_eq!(errors, 15); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 10); -// assert_eq!(errors, 15); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 11); -// assert_eq!(errors, 14); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 12); -// assert_eq!(errors, 12); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 13); -// assert_eq!(errors, 9); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 14); -// assert_eq!(errors, 5); -// -// errors = do_query(&es, ALL_FLAGS, ALL_FLAGS, start_inst, 15); -// assert_eq!(errors, 0); -// -// errors = es.query(ALL_FLAGS, ALL_FLAGS, None, QueryType::Total); // no -// time specified assert_eq!(errors, 15); -// -// /////////////////////// filter by op //////////////////////// -// errors = do_query(&es, NexusErrStore::READ_FLAG, ALL_FLAGS, start_inst, -// 10); assert_eq!(errors, 1); -// -// errors = -// do_query(&es, NexusErrStore::WRITE_FLAG, ALL_FLAGS, start_inst, 10); -// assert_eq!(errors, 2); -// -// errors = -// do_query(&es, NexusErrStore::UNMAP_FLAG, ALL_FLAGS, start_inst, 10); -// assert_eq!(errors, 3); -// -// errors = -// do_query(&es, NexusErrStore::FLUSH_FLAG, ALL_FLAGS, start_inst, 10); -// assert_eq!(errors, 4); -// -// errors = -// do_query(&es, NexusErrStore::RESET_FLAG, ALL_FLAGS, start_inst, 10); -// assert_eq!(errors, 5); -// -// errors = do_query(&es, 0, ALL_FLAGS, start_inst, 10); -// assert_eq!(errors, 0); -// -// ////////////////////// filter by failure ////////////////////////// -// -// errors = do_query( -// &es, -// ALL_FLAGS, -// NexusErrStore::IO_FAILED_FLAG, -// start_inst, -// 10, -// ); -// assert_eq!(errors, 15); -// -// errors = do_query(&es, ALL_FLAGS, 0, start_inst, 10); -// assert_eq!(errors, 0); -// } -// -// fn add_records( -// es: &mut NexusErrStore, -// how_many: usize, -// op: IoType, -// start_inst: Instant, -// when: u64, -// ) { -// let offset: u64 = 0; -// let num_of_blocks: u64 = 1; -// for _ in 0 .. how_many { -// es.add_record( -// op, -// IoStatus::Failed, -// offset, -// num_of_blocks, -// start_inst + Duration::from_nanos(when), -// ); -// } -// } -// -// fn do_query( -// es: &NexusErrStore, -// op_flags: u32, -// err_flags: u32, -// start_inst: Instant, -// when: u64, -// ) -> u32 { -// es.query( -// op_flags, -// err_flags, -// Some(start_inst + Duration::from_nanos(when)), -// QueryType::Total, -// ) -//} diff --git a/nvmeadm/src/nvmf_discovery.rs b/nvmeadm/src/nvmf_discovery.rs index e98429e59..a022576a7 100644 --- a/nvmeadm/src/nvmf_discovery.rs +++ b/nvmeadm/src/nvmf_discovery.rs @@ -243,7 +243,7 @@ impl Discovery { let numdl: u16 = (dword_count & 0xFFFF) as u16; let numdu: u16 = (dword_count >> 16) as u16; - cmd.cdw10 = 0x70 | u32::from(numdl) << 16 as u32; + cmd.cdw10 = 0x70 | u32::from(numdl) << 16_u32; cmd.cdw11 = u32::from(numdu); let _ret = unsafe { diff --git a/nvmeadm/src/nvmf_subsystem.rs b/nvmeadm/src/nvmf_subsystem.rs index 324646714..4345cc73e 100644 --- a/nvmeadm/src/nvmf_subsystem.rs +++ b/nvmeadm/src/nvmf_subsystem.rs @@ -45,7 +45,7 @@ impl Subsystem { let serial = parse_value::(&source, "serial")?; let model = parse_value::(&source, "model")?; - if serial == "" || model == "" { + if serial.is_empty() || model.is_empty() { return Err(NvmeError::CtlNotFound { text: "discovery controller".into(), }); From 6e59c9ea34c08fd8d3cae391441883f3890785f9 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 20 Nov 2020 16:37:54 +0100 Subject: [PATCH 70/92] test: adjust core mask CI does not have more then 4 cores --- mayastor/tests/bdev_test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mayastor/tests/bdev_test.rs b/mayastor/tests/bdev_test.rs index 2dcc130d4..6b6e2837b 100644 --- a/mayastor/tests/bdev_test.rs +++ b/mayastor/tests/bdev_test.rs @@ -288,7 +288,7 @@ async fn nvmf_bdev_test() { DOCKER_COMPOSE.set(compose).unwrap(); // this is based on the number of containers above. - let mask = format!("{:#01x}", (1 << 4) | (1 << 5)); + let mask = format!("{:#01x}", (1 << 1) | (1 << 2)); let ms = MayastorTest::new(MayastorCliArgs { reactor_mask: mask, no_pci: true, From b96eb81e2ab84e8e93759910f70c5fe23e8247eb Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Fri, 20 Nov 2020 16:58:05 +0100 Subject: [PATCH 71/92] Replace dynamic label with static .name having test name as its value Label is named $label_prefix.name and set to value of $name. --- composer/src/lib.rs | 20 ++++++++++++++------ scripts/cargo-test.sh | 6 +++++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/composer/src/lib.rs b/composer/src/lib.rs index 7c40ef45c..5baaf88f7 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -324,7 +324,7 @@ impl Builder { network_id: "".to_string(), containers: Default::default(), ipam, - label: format!("io.mayastor.test.{}", self.name), + label_prefix: "io.mayastor.test".to_string(), clean: self.clean, }; @@ -371,8 +371,9 @@ pub struct ComposeTest { containers: HashMap, /// the default network configuration we use for our test cases ipam: Ipam, - /// set on containers and networks - label: String, + /// prefix for labels set on containers and networks + /// $prefix.name = $name will be created automatically + label_prefix: String, /// automatically clean up the things we have created for this test clean: bool, } @@ -413,6 +414,7 @@ impl ComposeTest { return Ok(self.network_id.clone()); } + let name_label = format!("{}.name", self.label_prefix); let create_opts = CreateNetworkOptions { name: self.name.as_str(), check_duplicate: true, @@ -425,7 +427,9 @@ impl ComposeTest { options: vec![("com.docker.network.bridge.name", "mayabridge0")] .into_iter() .collect(), - labels: vec![(self.label.as_str(), "true")].into_iter().collect(), + labels: vec![(name_label.as_str(), self.name.as_str())] + .into_iter() + .collect(), }; self.docker.create_network(create_opts).await.map(|r| { @@ -468,7 +472,8 @@ impl ComposeTest { all: true, filters: vec![( "label", - vec![format!("{}=true", self.label).as_str()], + vec![format!("{}.name={}", self.label_prefix, self.name) + .as_str()], )] .into_iter() .collect(), @@ -577,6 +582,7 @@ impl ComposeTest { }) } + let name_label = format!("{}.name", self.label_prefix); let config = Config { cmd: Some(cmd.iter().map(|s| s.as_str()).collect()), env: Some(env.iter().map(|s| s.as_str()).collect()), @@ -597,7 +603,9 @@ impl ComposeTest { .collect(), ), labels: Some( - vec![(self.label.as_str(), "true")].into_iter().collect(), + vec![(name_label.as_str(), self.name.as_str())] + .into_iter() + .collect(), ), exposed_ports: Some(exposed_ports), ..Default::default() diff --git a/scripts/cargo-test.sh b/scripts/cargo-test.sh index 649d276d9..0c9b448a1 100755 --- a/scripts/cargo-test.sh +++ b/scripts/cargo-test.sh @@ -1,10 +1,14 @@ #!/usr/bin/env bash cleanup_handler() { - for c in $(docker ps -a --format '{{.ID}}') ; do + for c in $(docker ps -a --filter "label=io.mayastor.test.name" --format '{{.ID}}') ; do docker kill "$c" || true docker rm "$c" || true done + + for n in $(docker network ls --filter "label=io.mayastor.test.name" --format '{{.ID}}') ; do + docker network rm "$n" || true + done } trap cleanup_handler ERR INT QUIT TERM HUP From 11645c540012be82aa572d61a76f6350308f59ab Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 20 Nov 2020 21:17:24 +0100 Subject: [PATCH 72/92] ci: further reduce resources needed --- mayastor/tests/bdev_test.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mayastor/tests/bdev_test.rs b/mayastor/tests/bdev_test.rs index 6b6e2837b..3300580f7 100644 --- a/mayastor/tests/bdev_test.rs +++ b/mayastor/tests/bdev_test.rs @@ -117,7 +117,7 @@ async fn create_nexus(container: &str, mut kiddos: Vec) -> String { let bdev = h .bdev .create(BdevUri { - uri: "malloc:///disk0?size_mb=100".into(), + uri: "malloc:///disk0?size_mb=64".into(), }) .await .unwrap(); @@ -127,7 +127,7 @@ async fn create_nexus(container: &str, mut kiddos: Vec) -> String { h.mayastor .create_nexus(CreateNexusRequest { uuid: NEXUS_UUID.to_string(), - size: 96 * 1024 * 1024, + size: 60 * 1024 * 1024, children: kiddos, }) .await @@ -150,8 +150,8 @@ async fn create_nexus(container: &str, mut kiddos: Vec) -> String { /// IO flows through mayastorTest to all 3 containers async fn create_topology(queue: Arc) { let r1 = create_target("ms1").await; - let r2 = create_target("ms2").await; - let endpoint = create_nexus("ms3", vec![r1, r2]).await; + // let r2 = create_target("ms2").await; + let endpoint = create_nexus("ms3", vec![r1]).await; // the nexus is running on ms3 we will use a 4th instance of mayastor to // create a nvmf bdev and push IO to it. @@ -172,7 +172,7 @@ async fn create_topology(queue: Arc) { let job = io_driver::Builder::new() .core(c) .bdev(bdev) - .qd(64) + .qd(8) .io_size(512) .build() .await; @@ -272,10 +272,10 @@ async fn nvmf_bdev_test() { "ms1", Binary::from_dbg("mayastor").with_args(vec!["-l", "1"]), ) - .add_container_bin( - "ms2", - Binary::from_dbg("mayastor").with_args(vec!["-l", "2"]), - ) + // .add_container_bin( + // "ms2", + // Binary::from_dbg("mayastor").with_args(vec!["-l", "2"]), + // ) .add_container_bin( "ms3", Binary::from_dbg("mayastor").with_args(vec!["-l", "3"]), From 861fefef9840dc2a116ef0e8453094d419778f17 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Fri, 20 Nov 2020 19:21:35 +0000 Subject: [PATCH 73/92] cargo tests: Add a Nexus multipath test New test with 2 identical Nexuses on 2 nodes with the same replica on one node as their child. This requires creating the nexus with a local replica on one node first, then sharing the replica over nvmf before creating the nexus on the other node with the remote replica as its child. Use the kernel initiator to connect to both nexuses and check that both are disconnected when disconnecting by NQN. On Jenkins, clean up all nvmf connections after cargo tests in case this test fails to clean up. While I'm here, remove the temporary workaround to clean up state spdk_iscsi_conns shm files as SPDK 20.10 no longer creates such files. --- Jenkinsfile | 6 +- mayastor/tests/nexus_multipath.rs | 173 ++++++++++++++++++++++++++++++ 2 files changed, 175 insertions(+), 4 deletions(-) create mode 100644 mayastor/tests/nexus_multipath.rs diff --git a/Jenkinsfile b/Jenkinsfile index 31e327a80..ba5acc1e3 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -108,8 +108,8 @@ pipeline { } post { always { - // temporary workaround for leaked spdk_iscsi_conns files - sh 'sudo rm -f /dev/shm/*' + // in case of abnormal termination of any nvmf test + sh 'sudo nvme disconnect-all' } } } @@ -121,8 +121,6 @@ pipeline { post { always { junit '*-xunit-report.xml' - // temporary workaround for leaked spdk_iscsi_conns files - sh 'sudo rm -f /dev/shm/*' } } } diff --git a/mayastor/tests/nexus_multipath.rs b/mayastor/tests/nexus_multipath.rs new file mode 100644 index 000000000..adcb0f37d --- /dev/null +++ b/mayastor/tests/nexus_multipath.rs @@ -0,0 +1,173 @@ +//! Multipath NVMf tests +//! Create the same nexus on both nodes with a replica on 1 node their child. +use mayastor::{ + bdev::{nexus_create, nexus_lookup}, + core::MayastorCliArgs, +}; +use rpc::mayastor::{ + CreateNexusRequest, + CreatePoolRequest, + CreateReplicaRequest, + PublishNexusRequest, + ShareProtocolNexus, + ShareReplicaRequest, +}; +use std::process::Command; + +pub mod common; +use common::{compose::Builder, MayastorTest}; + +static POOL_NAME: &str = "tpool"; +static UUID: &str = "cdc2a7db-3ac3-403a-af80-7fadc1581c47"; +static HOSTNQN: &str = "nqn.2019-05.io.openebs"; + +#[tokio::test] +async fn nexus_multipath() { + // create a new composeTest + let test = Builder::new() + .name("nexus_shared_replica_test") + .network("10.1.0.0/16") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); + + let mut hdls = test.grpc_handles().await.unwrap(); + + // create a pool on remote node + hdls[0] + .mayastor + .create_pool(CreatePoolRequest { + name: POOL_NAME.to_string(), + disks: vec!["malloc:///disk0?size_mb=64".into()], + }) + .await + .unwrap(); + + // create replica, not shared + hdls[0] + .mayastor + .create_replica(CreateReplicaRequest { + uuid: UUID.to_string(), + pool: POOL_NAME.to_string(), + size: 32 * 1024 * 1024, + thin: false, + share: 0, + }) + .await + .unwrap(); + + // create nexus on remote node with local replica as child + hdls[0] + .mayastor + .create_nexus(CreateNexusRequest { + uuid: UUID.to_string(), + size: 32 * 1024 * 1024, + children: [format!("loopback:///{}", UUID)].to_vec(), + }) + .await + .unwrap(); + + // share replica + hdls[0] + .mayastor + .share_replica(ShareReplicaRequest { + uuid: UUID.to_string(), + share: 1, + }) + .await + .unwrap(); + + let mayastor = MayastorTest::new(MayastorCliArgs::default()); + let ip0 = hdls[0].endpoint.ip(); + let nexus_name = format!("nexus-{}", UUID); + mayastor + .spawn(async move { + // create nexus on local node with remote replica as child + nexus_create( + &nexus_name, + 32 * 1024 * 1024, + Some(UUID), + &[format!("nvmf://{}:8420/{}:{}", ip0, HOSTNQN, UUID)], + ) + .await + .unwrap(); + // publish nexus on local node over nvmf + nexus_lookup(&nexus_name) + .unwrap() + .share(ShareProtocolNexus::NexusNvmf, None) + .await + .unwrap(); + }) + .await; + + // publish nexus on other node + hdls[0] + .mayastor + .publish_nexus(PublishNexusRequest { + uuid: UUID.to_string(), + key: "".to_string(), + share: ShareProtocolNexus::NexusNvmf as i32, + }) + .await + .unwrap(); + + let nqn = format!("{}:nexus-{}", HOSTNQN, UUID); + let status = Command::new("nvme") + .args(&["connect"]) + .args(&["-t", "tcp"]) + .args(&["-a", "127.0.0.1"]) + .args(&["-s", "8420"]) + .args(&["-n", &nqn]) + .status() + .unwrap(); + assert!( + status.success(), + "failed to connect to local nexus, {}", + status + ); + + // The first attempt often fails with "Duplicate cntlid x with y" error from + // kernel + for i in 0 .. 2 { + let status_c0 = Command::new("nvme") + .args(&["connect"]) + .args(&["-t", "tcp"]) + .args(&["-a", &ip0.to_string()]) + .args(&["-s", "8420"]) + .args(&["-n", &nqn]) + .status() + .unwrap(); + if i == 0 && status_c0.success() { + break; + } + assert!( + status_c0.success() || i != 1, + "failed to connect to remote nexus, {}", + status_c0 + ); + } + + // NQN: disconnected 2 controller(s) + let output_dis = Command::new("nvme") + .args(&["disconnect"]) + .args(&["-n", &nqn]) + .output() + .unwrap(); + assert!( + output_dis.status.success(), + "failed to disconnect from nexuses, {}", + output_dis.status + ); + let s = String::from_utf8(output_dis.stdout).unwrap(); + let v: Vec<&str> = s.split(' ').collect(); + tracing::info!("nvme disconnected: {:?}", v); + assert!(v.len() == 4); + assert!(v[1] == "disconnected"); + assert!( + v[0] == format!("NQN:{}", &nqn), + "mismatched NQN disconnected" + ); + assert!(v[2] == "2", "mismatched number of controllers disconnected"); +} From e86a7121c07f4c3c5cea89586e8e26a5c7fde221 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 20 Nov 2020 18:04:22 +0000 Subject: [PATCH 74/92] Fix rebuild bug and add new subcommand Check if there are any active tasks when completing the rebuild. Add check to fail rebuild if we failed to wait for a task. Add new stats subcommand to retrieve the rebuild stats. --- mayastor-test/test_rebuild.js | 15 ++++ mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs | 37 +++++++++- mayastor/src/bin/cli/rebuild_cli.rs | 70 ++++++++++++++++++- mayastor/src/grpc/mayastor_grpc.rs | 12 ++++ mayastor/src/rebuild/rebuild_api.rs | 6 +- mayastor/src/rebuild/rebuild_impl.rs | 18 ++++- rpc/proto/mayastor.proto | 16 +++++ 7 files changed, 166 insertions(+), 8 deletions(-) diff --git a/mayastor-test/test_rebuild.js b/mayastor-test/test_rebuild.js index 27bfb569d..ad77ba924 100644 --- a/mayastor-test/test_rebuild.js +++ b/mayastor-test/test_rebuild.js @@ -112,6 +112,17 @@ describe('rebuild tests', function () { assert.equal(res.state, expected); } + async function checkRebuildStats () { + const stats = await client.getRebuildStats().sendMessage(rebuildArgs); + assert.isTrue(stats.blocksTotal > 0); + assert.isTrue(stats.blocksRecovered > 0); + assert.isTrue(stats.progress > 0); + assert.isTrue(stats.segmentSizeBlks > 0); + assert.isTrue(stats.blockSize === 4096); + assert.isTrue(stats.tasksTotal > 0); + assert.isTrue(stats.tasksActive === 0); + } + function pingMayastor (done) { // use harmless method to test if the mayastor is up and running client @@ -311,6 +322,10 @@ describe('rebuild tests', function () { it('check number of rebuilds', async () => { await checkNumRebuilds('1'); }); + + it('check stats', async () => { + await checkRebuildStats(); + }); }); describe('resuming rebuild', function () { diff --git a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs index 9cb00e72b..19a296197 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -1,7 +1,11 @@ use futures::channel::oneshot::Receiver; use snafu::ResultExt; -use rpc::mayastor::{RebuildProgressReply, RebuildStateReply}; +use rpc::mayastor::{ + RebuildProgressReply, + RebuildStateReply, + RebuildStatsReply, +}; use crate::{ bdev::{ @@ -21,7 +25,13 @@ use crate::{ VerboseError, }, core::Reactors, - rebuild::{ClientOperations, RebuildError, RebuildJob, RebuildState}, + rebuild::{ + ClientOperations, + RebuildError, + RebuildJob, + RebuildState, + RebuildStats, + }, }; impl Nexus { @@ -157,6 +167,15 @@ impl Nexus { }) } + /// Return the stats of a rebuild job + pub async fn get_rebuild_stats( + &mut self, + name: &str, + ) -> Result { + let rj = self.get_rebuild_job(name)?; + Ok(rj.stats().into()) + } + /// Returns the rebuild progress of child target `name` pub fn get_rebuild_progress( &self, @@ -324,3 +343,17 @@ impl Nexus { } } } + +impl From for RebuildStatsReply { + fn from(stats: RebuildStats) -> Self { + RebuildStatsReply { + blocks_total: stats.blocks_total, + blocks_recovered: stats.blocks_recovered, + progress: stats.progress, + segment_size_blks: stats.segment_size_blks, + block_size: stats.block_size, + tasks_total: stats.tasks_total, + tasks_active: stats.tasks_active, + } + } +} diff --git a/mayastor/src/bin/cli/rebuild_cli.rs b/mayastor/src/bin/cli/rebuild_cli.rs index f8f6fde4d..9496fc061 100644 --- a/mayastor/src/bin/cli/rebuild_cli.rs +++ b/mayastor/src/bin/cli/rebuild_cli.rs @@ -16,6 +16,7 @@ pub async fn handler( ("pause", Some(args)) => pause(ctx, &args).await, ("resume", Some(args)) => resume(ctx, &args).await, ("state", Some(args)) => state(ctx, &args).await, + ("stats", Some(args)) => stats(ctx, &args).await, ("progress", Some(args)) => progress(ctx, &args).await, (cmd, _) => { Err(Status::not_found(format!("command {} does not exist", cmd))) @@ -99,6 +100,21 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .help("uri of child to get the rebuild state from"), ); + let stats = SubCommand::with_name("stats") + .about("gets the rebuild stats of the child") + .arg( + Arg::with_name("uuid") + .required(true) + .index(1) + .help("uuid of the nexus"), + ) + .arg( + Arg::with_name("uri") + .required(true) + .index(2) + .help("uri of child to get the rebuild stats from"), + ); + let progress = SubCommand::with_name("progress") .about("shows the progress of a rebuild") .arg( @@ -126,6 +142,7 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { .subcommand(pause) .subcommand(resume) .subcommand(state) + .subcommand(stats) .subcommand(progress) } @@ -228,7 +245,53 @@ async fn state( }) .await? .into_inner(); - println!("{}", response.state); + ctx.print_list(vec!["state"], vec![vec![response.state]]); + Ok(()) +} + +async fn stats( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> Result<(), Status> { + let uuid = matches.value_of("uuid").unwrap().to_string(); + let uri = matches.value_of("uri").unwrap().to_string(); + + ctx.v2(&format!( + "Getting the rebuild stats of child {} on nexus {}", + uri, uuid + )); + let response = ctx + .client + .get_rebuild_stats(rpc::RebuildStatsRequest { + uuid: uuid.clone(), + uri: uri.clone(), + }) + .await? + .into_inner(); + + ctx.print_list( + vec![ + "blocks_total", + "blocks_recovered", + "progress (%)", + "segment_size_blks", + "block_size", + "tasks_total", + "tasks_active", + ], + vec![vec![ + response.blocks_total, + response.blocks_recovered, + response.progress, + response.segment_size_blks, + response.block_size, + response.tasks_total, + response.tasks_active, + ] + .iter() + .map(|s| s.to_string()) + .collect()], + ); Ok(()) } @@ -251,6 +314,9 @@ async fn progress( }) .await? .into_inner(); - println!("{}% complete", response.progress); + ctx.print_list( + vec!["progress (%)"], + vec![vec![response.progress.to_string()]], + ); Ok(()) } diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index aa038ab86..b4a9d57f4 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -385,6 +385,18 @@ impl mayastor_server::Mayastor for MayastorSvc { }})) } + #[instrument(level = "debug", err)] + async fn get_rebuild_stats( + &self, + request: Request, + ) -> GrpcResult { + let args = request.into_inner(); + trace!("{:?}", args); + Ok(Response::new(locally! { async move { + nexus_lookup(&args.uuid)?.get_rebuild_stats(&args.uri).await + }})) + } + #[instrument(level = "debug", err)] async fn get_rebuild_progress( &self, diff --git a/mayastor/src/rebuild/rebuild_api.rs b/mayastor/src/rebuild/rebuild_api.rs index 89f157fb4..01e653d39 100644 --- a/mayastor/src/rebuild/rebuild_api.rs +++ b/mayastor/src/rebuild/rebuild_api.rs @@ -140,12 +140,16 @@ pub struct RebuildStats { pub blocks_total: u64, /// number of blocks recovered pub blocks_recovered: u64, - /// rebuild progress in % (0-100) + /// rebuild progress in % pub progress: u64, /// granularity of each recovery copy in blocks pub segment_size_blks: u64, /// size in bytes of each block pub block_size: u64, + /// total number of concurrent rebuild tasks + pub tasks_total: u64, + /// number of current active tasks + pub tasks_active: u64, } /// Public facing operations on a Rebuild Job diff --git a/mayastor/src/rebuild/rebuild_impl.rs b/mayastor/src/rebuild/rebuild_impl.rs index 99423f3ec..0af3c9209 100644 --- a/mayastor/src/rebuild/rebuild_impl.rs +++ b/mayastor/src/rebuild/rebuild_impl.rs @@ -500,6 +500,8 @@ impl ClientOperations for RebuildJob { progress, segment_size_blks: self.segment_size_blks, block_size: self.block_size, + tasks_total: self.task_pool.total as u64, + tasks_active: self.task_pool.active as u64, } } @@ -597,11 +599,21 @@ impl RebuildJob { } async fn await_all_tasks(&mut self) { - while self.await_one_task().await.is_some() { - if self.task_pool.active == 0 { - break; + debug!( + "Awaiting all active tasks({}) for rebuild {}", + self.task_pool.active, self.destination + ); + while self.task_pool.active > 0 { + if self.await_one_task().await.is_none() { + error!("Failed to wait for {} rebuild tasks due mpsc channel failure.", self.task_pool.active); + self.fail(); + return; } } + debug!( + "Finished awaiting all tasks for rebuild {}", + self.destination + ); } /// Sends one segment worth of data in a reactor future and notifies the diff --git a/rpc/proto/mayastor.proto b/rpc/proto/mayastor.proto index e3a3fdcbf..e887af13c 100644 --- a/rpc/proto/mayastor.proto +++ b/rpc/proto/mayastor.proto @@ -60,6 +60,7 @@ service Mayastor { rpc PauseRebuild (PauseRebuildRequest) returns (Null) {} rpc ResumeRebuild (ResumeRebuildRequest) returns (Null) {} rpc GetRebuildState (RebuildStateRequest) returns (RebuildStateReply) {} + rpc GetRebuildStats (RebuildStatsRequest) returns (RebuildStatsReply) {} rpc GetRebuildProgress (RebuildProgressRequest) returns (RebuildProgressReply) {} // Snapshot operations @@ -292,6 +293,21 @@ message RebuildStateReply { string state = 1; // current rebuild state (i.e. ready/running/completed etc.) } +message RebuildStatsRequest { + string uuid = 1; // uuid of the nexus + string uri = 2; // uri of the destination child +} + +message RebuildStatsReply { + uint64 blocks_total = 1; // total number of blocks to recover + uint64 blocks_recovered = 2; // number of blocks recovered + uint64 progress = 3; // rebuild progress % + uint64 segment_size_blks = 4; // granularity of each recovery copy in blocks + uint64 block_size = 5; // size in bytes of each block + uint64 tasks_total = 6; // total number of concurrent rebuild tasks + uint64 tasks_active = 7; // number of current active tasks +} + message StartRebuildRequest { string uuid = 1; // uuid of the nexus string uri = 2; // uri of the child to be rebuilt From ae37ba724e0be5896643c2e3df3b8d8dfed67b4f Mon Sep 17 00:00:00 2001 From: Tom Marsh Date: Mon, 28 Sep 2020 10:25:02 +0100 Subject: [PATCH 75/92] terraform: Require the libvirt provider We need this field so that we work with terraform v0.13 --- terraform/README.adoc | 1 + terraform/mod/libvirt/main.tf | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/terraform/README.adoc b/terraform/README.adoc index 37c330819..75318206d 100644 --- a/terraform/README.adoc +++ b/terraform/README.adoc @@ -84,6 +84,7 @@ command and copy paste the output to `~/.kube/config`: ansible -i ansible-hosts -a 'sudo cat /etc/kubernetes/admin.conf' master ---- +Note that for libvirt the configuration works with Terraform versions 0.13 upwards. === Setting up libvirt on Nixos To use the libvirt provider, you must enable libvirtd diff --git a/terraform/mod/libvirt/main.tf b/terraform/mod/libvirt/main.tf index 9da5964e5..b2f54a9a4 100644 --- a/terraform/mod/libvirt/main.tf +++ b/terraform/mod/libvirt/main.tf @@ -147,5 +147,12 @@ output "node_list" { } terraform { - required_version = ">= 0.12" + required_version = ">= 0.13" + + required_providers { + libvirt = { + source = "dmacvicar/libvirt" + version = "0.6.2" + } + } } From 970ccb383482a25be725819d23450609ea3b722b Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 23 Nov 2020 19:53:26 +0000 Subject: [PATCH 76/92] Bump up Jenkins hugepages to 4096 x2MB --- doc/jenkins.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/jenkins.md b/doc/jenkins.md index a5bbb1f69..091f70624 100644 --- a/doc/jenkins.md +++ b/doc/jenkins.md @@ -145,7 +145,7 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). services.jenkinsSlave.enable = true; services.iscsid.enable = true; - boot.kernelParams = ["hugepages=2048" "hugepagesz=2MB"]; + boot.kernelParams = ["hugepages=4096" "hugepagesz=2MB"]; boot.initrd.kernelModules = ["xfs"]; boot.kernelModules = [ "nbd" "xfs" "nvme_tcp" "kvm_intel" ]; boot.extraModprobeConfig = "options kvm_intel nested=1"; From 887acc7a8f0e5675c1122ba45890b2e16e4ea4ff Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Mon, 23 Nov 2020 15:48:57 +0000 Subject: [PATCH 77/92] Make the install test more platform agnostic Enumerate and query the nodes in the test cluster to a) Determine what IP address to use for the test registry b) If there a nodes labelled for mayastor c) Configure pools on suitable labelled nodes --- mayastor-test/e2e/install/install_test.go | 109 ++++++++++++++++++---- 1 file changed, 89 insertions(+), 20 deletions(-) diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go index 925802267..af27511a2 100644 --- a/mayastor-test/e2e/install/install_test.go +++ b/mayastor-test/e2e/install/install_test.go @@ -2,8 +2,8 @@ package basic_test import ( "context" + "errors" "fmt" - corev1 "k8s.io/api/core/v1" "os/exec" "path" "runtime" @@ -13,7 +13,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" + appsV1 "k8s.io/api/apps/v1" + coreV1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/deprecated/scheme" "k8s.io/client-go/rest" @@ -29,6 +30,72 @@ var k8sClient client.Client var k8sManager ctrl.Manager var testEnv *envtest.Environment +/// Enumerate the nodes in the k8s cluster and return +/// 1. the IP address of the master node (if one exists), +/// 2. the number of nodes labelled openebs.io/engine=mayastor +/// 3. the names of nodes labelled openebs.io/engine=mayastor +/// The assumption is that the test-registry is accessible via the IP addr of the master, +/// or any node in the cluster if the master noe does not exist +/// TODO Refine how we workout the address of the test-registry +func getTestClusterDetails() (string, int, []string, error) { + var master = "" + var nme = 0 + nodeList := coreV1.NodeList{} + if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { + return master, nme, nil, errors.New("failed to list nodes") + } + nodeIPs := make([]string, len(nodeList.Items)) + for ix, k8node := range nodeList.Items { + for _, k8Addr := range k8node.Status.Addresses { + if k8Addr.Type == coreV1.NodeInternalIP { + nodeIPs[ix] = k8Addr.Address + for label, value := range k8node.Labels { + if label == "node-role.kubernetes.io/master" { + master = k8Addr.Address + } + if label == "openebs.io/engine" && value == "mayastor" { + nme++ + } + } + } + } + } + + // At least one node where mayastor can be deployed must exist + if nme == 0 { + return "", 0, nil, errors.New("no usable nodes found for the mayastor engine") + } + + mayastorNodes := make([]string, nme) + ix := 0 + for _, k8node := range nodeList.Items { + for _, k8Addr := range k8node.Status.Addresses { + if k8Addr.Type == coreV1.NodeHostName { + for label, value := range k8node.Labels { + if label == "openebs.io/engine" && value == "mayastor" { + mayastorNodes[ix] = k8Addr.Address + ix++ + } + } + } + } + } + + // Redundant check, but keep it anyway, we are writing a test after all. + // We should have found at least one node! + if len(nodeIPs) == 0 { + return "", 0, nil, errors.New("no usable nodes found") + } + + /// TODO Refine how we workout the address of the test-registry + /// If there is master node, use its IP address as the registry IP address + if len(master) != 0 { + return master, nme, mayastorNodes, nil + } + /// Otherwise choose the IP address of first node in the list as the registry IP address + return nodeIPs[0], nme, mayastorNodes, nil +} + // Encapsulate the logic to find where the deploy yamls are func getDeployYamlDir() string { _, filename, _, _ := runtime.Caller(0) @@ -49,12 +116,12 @@ func getTemplateYamlDir() string { return path.Clean(filename + "/../deploy") } -func makeImageName(registryaddress string, registryport string, imagename string, imageversion string) string { - return registryaddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion +func makeImageName(registryAddress string, registryport string, imagename string, imageversion string) string { + return registryAddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion } -func applyTemplatedYaml(filename string, imagename string) { - fullimagename := makeImageName("172.18.8.101", "30291", imagename, "ci") +func applyTemplatedYaml(filename string, imagename string, registryAddress string) { + fullimagename := makeImageName(registryAddress, "30291", imagename, "ci") bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl apply -f -" cmd := exec.Command("bash", "-c", bashcmd) cmd.Dir = getTemplateYamlDir() @@ -65,7 +132,7 @@ func applyTemplatedYaml(filename string, imagename string) { // We expect this to fail a few times before it succeeds, // so no throwing errors from here. func mayastorReadyPodCount() int { - var mayastorDaemonSet appsv1.DaemonSet + var mayastorDaemonSet appsV1.DaemonSet if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "mayastor", Namespace: "mayastor"}, &mayastorDaemonSet) != nil { fmt.Println("Failed to get mayastor DaemonSet") return -1 @@ -74,7 +141,7 @@ func mayastorReadyPodCount() int { } func moacReadyPodCount() int { - var moacDeployment appsv1.Deployment + var moacDeployment appsV1.Deployment if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "moac", Namespace: "mayastor"}, &moacDeployment) != nil { fmt.Println("Failed to get MOAC deployment") return -1 @@ -86,21 +153,27 @@ func moacReadyPodCount() int { // We deliberately call out to kubectl, rather than constructing the client-go // objects, so that we can verfiy the local deploy yamls are correct. func installMayastor() { + registryAddress, numMayastorInstances, mayastorNodes, err := getTestClusterDetails() + Expect(err).ToNot(HaveOccurred()) + Expect(numMayastorInstances).ToNot(Equal(0)) + + fmt.Printf("registry address %v, number of mayastor instances=%v\n", registryAddress, numMayastorInstances) + applyDeployYaml("namespace.yaml") applyDeployYaml("storage-class.yaml") applyDeployYaml("moac-rbac.yaml") applyDeployYaml("mayastorpoolcrd.yaml") applyDeployYaml("nats-deployment.yaml") - applyTemplatedYaml("csi-daemonset.yaml.template", "mayastor-csi") - applyTemplatedYaml("moac-deployment.yaml.template", "moac") - applyTemplatedYaml("mayastor-daemonset.yaml.template", "mayastor") + applyTemplatedYaml("csi-daemonset.yaml.template", "mayastor-csi", registryAddress) + applyTemplatedYaml("moac-deployment.yaml.template", "moac", registryAddress) + applyTemplatedYaml("mayastor-daemonset.yaml.template", "mayastor", registryAddress) // Given the yamls and the environment described in the test readme, - // we expect mayastor to be running on exactly 3 nodes. + // we expect mayastor to be running on exactly 2 nodes. Eventually(mayastorReadyPodCount, "120s", // timeout "1s", // polling interval - ).Should(Equal(3)) + ).Should(Equal(numMayastorInstances)) Eventually(moacReadyPodCount(), "60s", // timeout @@ -109,13 +182,9 @@ func installMayastor() { // Now create pools on all nodes. // Note the disk for use on each node has been set in deploy/pool.yaml - nodeList := corev1.NodeList{} - if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { - fmt.Println("Failed to list Nodes, pools not created") - return - } - for _, k8node := range nodeList.Items { - bashcmd := "NODE_NAME=" + k8node.Name + " envsubst < " + "pool.yaml" + " | kubectl apply -f -" + // TODO make the pool disk configurable + for _, mayastorNode := range mayastorNodes { + bashcmd := "NODE_NAME=" + mayastorNode + " envsubst < " + "pool.yaml" + " | kubectl apply -f -" cmd := exec.Command("bash", "-c", bashcmd) cmd.Dir = getTemplateYamlDir() _, err := cmd.CombinedOutput() From 3668060498bbe97e11b72dd2367fee2c56e90e9c Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Mon, 23 Nov 2020 12:27:02 +0100 Subject: [PATCH 78/92] test: remove ms_exec and fix clippies This also adds a new test which is disabled still as its WIP but depends on this. --- composer/src/lib.rs | 8 +- .../src/bdev/nexus/nexus_bdev_children.rs | 17 +- mayastor/src/bdev/nexus/nexus_fn_table.rs | 16 +- mayastor/src/bdev/nexus/nexus_label.rs | 6 +- mayastor/src/bin/casperf.rs | 8 +- mayastor/src/bin/initiator.rs | 7 +- mayastor/src/core/io_driver.rs | 5 +- mayastor/src/core/mod.rs | 4 + mayastor/src/core/reactor.rs | 24 +- mayastor/src/grpc/mayastor_grpc.rs | 2 +- mayastor/src/host/blk_device.rs | 18 +- mayastor/src/subsys/config/mod.rs | 12 +- mayastor/src/subsys/nvmf/subsystem.rs | 6 +- mayastor/src/subsys/nvmf/transport.rs | 8 +- mayastor/tests/common/mod.rs | 1 - mayastor/tests/common/ms_exec.rs | 220 -------------- mayastor/tests/nvmet.rs | 36 +++ mayastor/tests/replica_snapshot.rs | 194 ------------- mayastor/tests/yaml_config.rs | 268 ------------------ nvmeadm/src/nvmf_discovery.rs | 65 +++-- rest/service/src/message_bus/v0.rs | 5 +- rest/tests/v0_test.rs | 6 +- services/node/src/server.rs | 5 +- shell.nix | 1 + 24 files changed, 176 insertions(+), 766 deletions(-) delete mode 100644 mayastor/tests/common/ms_exec.rs create mode 100644 mayastor/tests/nvmet.rs delete mode 100644 mayastor/tests/replica_snapshot.rs delete mode 100644 mayastor/tests/yaml_config.rs diff --git a/composer/src/lib.rs b/composer/src/lib.rs index 85735b024..b61db5dd4 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -39,7 +39,8 @@ use rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, mayastor_client::MayastorClient, }; - +pub const TEST_NET_NAME: &str = "mayastor-testing-network"; +pub const TEST_NET_NETWORK: &str = "10.1.0.0/16"; #[derive(Clone)] pub struct RpcHandle { pub name: String, @@ -424,8 +425,9 @@ impl ComposeTest { } let name_label = format!("{}.name", self.label_prefix); + // we use the same network everywhere let create_opts = CreateNetworkOptions { - name: self.name.as_str(), + name: TEST_NET_NAME, check_duplicate: true, driver: "bridge", internal: false, @@ -465,7 +467,7 @@ impl ComposeTest { pub async fn network_list(&self) -> Result, Error> { self.docker .list_networks(Some(ListNetworksOptions { - filters: vec![("name", vec![self.name.as_str()])] + filters: vec![("name", vec![TEST_NET_NAME])] .into_iter() .collect(), })) diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index 4bc74c49d..b0a61dba2 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -23,6 +23,8 @@ //! When reconfiguring the nexus, we traverse all our children, create new IO //! channels for all children that are in the open state. +use std::env; + use futures::future::join_all; use snafu::ResultExt; @@ -523,7 +525,20 @@ impl Nexus { let label = self.generate_label(); // ... and write it out to ALL children. - self.write_all_labels(&label).await?; + let label = match self.write_all_labels(&label).await { + Ok(_) => Ok(label), + Err(LabelError::ReReadError { + .. + }) => { + if env::var("NEXUS_LABEL_IGNORE_ERRORS").is_ok() { + warn!("ignoring label error on request"); + Ok(label) + } else { + Err(LabelError::ProbeError {}) + } + } + Err(e) => Err(e), + }?; info!("{}: new label: {}", self.name, label.primary.guid); trace!("{}: new label:\n{}", self.name, label); diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index 3935cf8ee..1d11b1a05 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -35,12 +35,16 @@ unsafe impl Send for NexusFnTable {} impl NexusFnTable { fn new() -> Self { - let mut f_tbl = spdk_bdev_fn_table::default(); - f_tbl.io_type_supported = Some(Self::io_supported); - f_tbl.submit_request = Some(Self::io_submit); - f_tbl.get_io_channel = Some(Self::io_channel); - f_tbl.destruct = Some(Self::destruct); - f_tbl.dump_info_json = Some(Self::dump_info_json); + let f_tbl = spdk_bdev_fn_table { + io_type_supported: Some(Self::io_supported), + submit_request: Some(Self::io_submit), + get_io_channel: Some(Self::io_channel), + destruct: Some(Self::destruct), + dump_info_json: Some(Self::dump_info_json), + write_config_json: None, + get_spin_time: None, + }; + NexusFnTable { f_tbl, } diff --git a/mayastor/src/bdev/nexus/nexus_label.rs b/mayastor/src/bdev/nexus/nexus_label.rs index 106eb450b..9dd02f3a8 100644 --- a/mayastor/src/bdev/nexus/nexus_label.rs +++ b/mayastor/src/bdev/nexus/nexus_label.rs @@ -132,6 +132,8 @@ pub enum LabelError { PartitionTableLocation {}, #[snafu(display("Could not get handle for child bdev {}", name,))] HandleCreate { name: String, source: ChildError }, + #[snafu(display("The written label could not be read from disk, likely the child {} is a null device", name))] + ReReadError { name: String }, } struct LabelData { @@ -396,7 +398,9 @@ impl Nexus { "{}: {}: Error validating newly written disk label: {}", child.parent, child.name, error ); - return Err(LabelError::ProbeError {}); + return Err(LabelError::ReReadError { + name: child.name.clone(), + }); } info!("{}: {}: Disk label written", child.parent, child.name); } diff --git a/mayastor/src/bin/casperf.rs b/mayastor/src/bin/casperf.rs index be4b2d7c4..be02631aa 100644 --- a/mayastor/src/bin/casperf.rs +++ b/mayastor/src/bin/casperf.rs @@ -363,10 +363,10 @@ fn main() { let io_size = value_t!(matches.value_of("io_size"), u64).unwrap_or(IO_SIZE); let qd = value_t!(matches.value_of("queue_depth"), u64).unwrap_or(QD); - let mut args = MayastorCliArgs::default(); - - args.reactor_mask = "0x2".to_string(); - //args.grpc_endpoint = Some("0.0.0.0".to_string()); + let args = MayastorCliArgs { + reactor_mask: "0x2".to_string(), + ..Default::default() + }; MayastorEnvironment::new(args).init(); sig_override(); diff --git a/mayastor/src/bin/initiator.rs b/mayastor/src/bin/initiator.rs index 5fec8c9f1..f83b3579c 100644 --- a/mayastor/src/bin/initiator.rs +++ b/mayastor/src/bin/initiator.rs @@ -19,6 +19,7 @@ use mayastor::{ Bdev, CoreError, DmaError, + MayastorCliArgs, MayastorEnvironment, Reactor, }, @@ -199,10 +200,6 @@ fn main() { None => 0, }; - let mut ms = MayastorEnvironment::default(); - - ms.name = "initiator".into(); - ms.rpc_addr = "/tmp/initiator.sock".into(); // This tool is just a client, so don't start iSCSI or NVMEoF services. Config::get_or_init(|| { let mut cfg = Config::default(); @@ -211,6 +208,8 @@ fn main() { cfg }); + let ms = MayastorEnvironment::new(MayastorCliArgs::default()); + ms.init(); let fut = async move { let res = if let Some(matches) = matches.subcommand_matches("read") { diff --git a/mayastor/src/core/io_driver.rs b/mayastor/src/core/io_driver.rs index bb8d7fb64..14ce02a4e 100644 --- a/mayastor/src/core/io_driver.rs +++ b/mayastor/src/core/io_driver.rs @@ -396,7 +396,10 @@ impl JobQueue { } } - /// stop all jobs + /// stop all jobs we allow holding the lock during await as its fine here + /// because we are shutting down and can only ever shut down if all jobs + /// stop + #[allow(clippy::await_holding_lock)] pub async fn stop_all(&self) { let mut inner = self.inner.lock().unwrap(); while let Some(mut job) = inner.pop() { diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index 095a83f03..fecf51bbc 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -124,4 +124,8 @@ pub enum CoreError { NotSupported { source: Errno, }, + #[snafu(display("failed to configure reactor: {}", source))] + ReactorError { + source: Errno, + }, } diff --git a/mayastor/src/core/reactor.rs b/mayastor/src/core/reactor.rs index e47559bde..bc7aaccee 100644 --- a/mayastor/src/core/reactor.rs +++ b/mayastor/src/core/reactor.rs @@ -57,7 +57,8 @@ use spdk_sys::{ spdk_thread_lib_init_ext, }; -use crate::core::{Cores, Mthread}; +use crate::core::{CoreError, Cores, Mthread}; +use nix::errno::Errno; use std::cell::Cell; #[derive(Debug, Clone, Copy, PartialEq)] @@ -203,7 +204,8 @@ impl Reactors { /// start polling the reactors on the given core, when multiple cores are /// involved they must be running during init as they must process in coming /// messages that are send as part of the init process. - pub fn launch_remote(core: u32) -> Result<(), ()> { + #[allow(clippy::needless_return)] + pub fn launch_remote(core: u32) -> Result<(), CoreError> { // the master core -- who is the only core that can call this function // should not be launched this way. For that use ['launch_master`]. // Nothing prevents anyone from call this function twice now. @@ -219,13 +221,19 @@ impl Reactors { core as *const u32 as *mut c_void, ) }; - if rc == 0 { - return Ok(()); - } + return if rc == 0 { + Ok(()) + } else { + error!("failed to launch core {}", core); + Err(CoreError::ReactorError { + source: Errno::from_i32(rc), + }) + }; + } else { + Err(CoreError::ReactorError { + source: Errno::ENOSYS, + }) } - - error!("failed to launch core {}", core); - Err(()) } /// get a reference to a ['Reactor'] associated with the given core. diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index b4a9d57f4..525861117 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -238,7 +238,7 @@ impl mayastor_server::Mayastor for MayastorSvc { let uuid = args.uuid.clone(); debug!("Publishing nexus {} ...", uuid); - if args.key != "" && args.key.len() != 16 { + if !args.key.is_empty() && args.key.len() != 16 { return Err(nexus_bdev::Error::InvalidKey {}.into()); } diff --git a/mayastor/src/host/blk_device.rs b/mayastor/src/host/blk_device.rs index af22fcc09..fddbeb8c4 100644 --- a/mayastor/src/host/blk_device.rs +++ b/mayastor/src/host/blk_device.rs @@ -127,15 +127,13 @@ fn usable_partition(partition: &Option) -> bool { // At present this simply involves examining the value of // the udev "ID_MODEL" property. fn mayastor_device(device: &Device) -> bool { - match device - .property_value("ID_MODEL") - .map(|s| s.to_str()) - .flatten() - { - Some("Mayastor NVMe controller") => true, // NVMF - Some("Nexus_CAS_Driver") => true, // iSCSI - _ => false, - } + matches!( + device + .property_value("ID_MODEL") + .map(|s| s.to_str()) + .flatten(), + Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver") + ) } // Create a new Partition object from udev::Device properties @@ -234,7 +232,7 @@ fn new_device( .flatten() .unwrap_or("") .split(' ') - .filter(|&s| s != "") + .filter(|&s| !s.is_empty()) .map(String::from) .collect(), size, diff --git a/mayastor/src/subsys/config/mod.rs b/mayastor/src/subsys/config/mod.rs index 47386d17d..bdee98f91 100644 --- a/mayastor/src/subsys/config/mod.rs +++ b/mayastor/src/subsys/config/mod.rs @@ -86,7 +86,7 @@ impl ConfigSubsystem { // if no config file is given, simply return Ok(). jsonrpc_register::<(), _, _, Error>("mayastor_config_export", |_| { let f = async move { - let cfg = Config::get().refresh().unwrap(); + let cfg = Config::get().refresh(); if let Some(target) = cfg.source.as_ref() { if let Err(e) = cfg.write(&target) { error!("error writing config file {} {}", target, e); @@ -208,7 +208,7 @@ impl Config { /// read the config file from disk. If the config file is empty, return the /// default config, but store the empty config file with in the struct to be /// used during saving to disk. - pub fn read

(file: P) -> Result + pub fn read

(file: P) -> Result where P: AsRef + Display + ToString, { @@ -222,7 +222,7 @@ impl Config { Ok(v) => config = v, Err(e) => { error!("{}", e); - return Err(()); + return Err(e); } }; } else { @@ -240,7 +240,7 @@ impl Config { /// collect current configuration snapshot into a new Config object that can /// be exported to a file (YAML or JSON) - pub fn refresh(&self) -> Result { + pub fn refresh(&self) -> Self { // the config is immutable, so we construct a new one which is mutable // such that we can scribble in the current bdevs. The config // gets loaded with the current settings, as we know that these @@ -310,7 +310,7 @@ impl Config { current.pools = Some(pools); - Ok(current) + current } /// write the current pool configuration to disk @@ -537,7 +537,7 @@ impl Config { /// exports the current configuration to the mayastor config file pub(crate) fn export_config() -> Result<(), std::io::Error> { - let cfg = Config::get().refresh().unwrap(); + let cfg = Config::get().refresh(); match cfg.source.as_ref() { Some(target) => cfg.write_pools(&target), // no config file to export to diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index a2735d51e..aaf33877b 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -189,8 +189,10 @@ impl NvmfSubsystem { /// add the given bdev to this namespace pub fn add_namespace(&self, bdev: &Bdev) -> Result<(), Error> { - let mut opts = spdk_nvmf_ns_opts::default(); - opts.nguid = bdev.uuid().as_bytes(); + let opts = spdk_nvmf_ns_opts { + nguid: bdev.uuid().as_bytes(), + ..Default::default() + }; let ns_id = unsafe { spdk_nvmf_subsystem_add_ns( self.0.as_ptr(), diff --git a/mayastor/src/subsys/nvmf/transport.rs b/mayastor/src/subsys/nvmf/transport.rs index c9615cd80..ffa80a464 100644 --- a/mayastor/src/subsys/nvmf/transport.rs +++ b/mayastor/src/subsys/nvmf/transport.rs @@ -88,9 +88,11 @@ impl TransportID { pub fn new(port: u16) -> Self { let address = get_ipv4_address().unwrap(); - let mut trid: spdk_nvme_transport_id = Default::default(); - trid.trtype = SPDK_NVME_TRANSPORT_TCP; - trid.adrfam = SPDK_NVMF_ADRFAM_IPV4; + let mut trid = spdk_nvme_transport_id { + trtype: SPDK_NVME_TRANSPORT_TCP, + adrfam: SPDK_NVMF_ADRFAM_IPV4, + ..Default::default() + }; let c_addr = address.into_cstring(); let port = format!("{}", port); diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index 5bbf29a5b..c38cb59b2 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -22,7 +22,6 @@ use mayastor::{ pub mod bdev_io; pub mod compose; pub mod error_bdev; -pub mod ms_exec; pub use compose::MayastorTest; diff --git a/mayastor/tests/common/ms_exec.rs b/mayastor/tests/common/ms_exec.rs deleted file mode 100644 index 465cb88cd..000000000 --- a/mayastor/tests/common/ms_exec.rs +++ /dev/null @@ -1,220 +0,0 @@ -use std::{ - fs, - io, - io::Write, - panic, - process::{Command, Stdio}, - time::Duration, -}; - -use nix::{ - sys::wait::{waitpid, WaitPidFlag}, - unistd::{gettid, Pid}, -}; - -use mayastor::core::Mthread; - -// there is a CARGO_EXEC_$BIN variable in recent Rust which does -// not seem to work yet with our compiler version -fn get_path(bin: &str) -> String { - if std::path::Path::new("./target/debug/bin").exists() { - format!("./target/debug/{}", bin) - } else { - format!("../target/debug/{}", bin) - } -} - -fn rpc_sock_path() -> String { - format!("/var/tmp/mayastor-test-{}", gettid()) -} - -fn hugetlbfs_path() -> String { - format!("/tmp/mayastor-test-{}", gettid()) -} - -/// start mayastor as a separate process and run the closure. By wrapping the -/// test closure, we can catch errors but still kill mayastor to avoid dangling -/// process. -pub fn run_test(args: Box<[String]>, test: T) -where - T: FnOnce(&MayastorProcess) + panic::UnwindSafe, -{ - let ms = MayastorProcess::new(args).unwrap(); - let ret = panic::catch_unwind(|| test(&ms)); - drop(ms); - assert!(ret.is_ok()); -} - -#[derive(Debug)] -/// this structure is used to fork mayastor(s) and to test them using -/// (g)rpc calls. -/// -/// Note that depending on the configuration that is passed, one or more -/// instances might fail to start as the instances might overlap ports. -pub struct MayastorProcess { - /// the PID we are tracked under - child: u32, - /// the json-rpc socket we listen on - pub rpc_path: String, - /// the hugepage directory we are using - pub hugetlbfs: String, -} - -impl MayastorProcess { - /// start mayastor and open the unix socket, if we are able to connect - /// we know we are up and running and ready for business. - pub fn new(args: Box<[String]>) -> Result { - let mayastor = get_path("mayastor"); - - let (tx, rx) = std::sync::mpsc::channel::(); - Mthread::spawn_unaffinitized(move || { - if let Err(e) = fs::create_dir(hugetlbfs_path()) { - panic!("failed to create hugetlbfs mount path {}", e); - } - - let output = Command::new("mount") - .args(&[ - "-t", - "hugetlbfs", - "nodev", - &hugetlbfs_path(), - "-o", - "pagesize=2048k", - ]) - .output() - .expect("could not exec mount"); - - if !output.status.success() { - io::stderr().write_all(&output.stderr).unwrap(); - panic!("failed to mount hugetlbfs"); - } - - let mut child = Command::new(mayastor) - .args(&["-r", &rpc_sock_path()]) - .args(&["--huge-dir", &hugetlbfs_path()]) - .args(args.into_vec()) - .stdout(Stdio::piped()) - .stderr(Stdio::inherit()) - .spawn() - .unwrap(); - - while !MayastorProcess::ping(&rpc_sock_path()) { - match child.try_wait() { - Ok(Some(_status)) => tx - .send(MayastorProcess { - child: child.id(), - rpc_path: rpc_sock_path(), - hugetlbfs: hugetlbfs_path(), - }) - .unwrap(), - Err(_e) => tx - .send(MayastorProcess { - child: 0, - rpc_path: rpc_sock_path(), - hugetlbfs: hugetlbfs_path(), - }) - .unwrap(), - _ => (), - }; - - std::thread::sleep(Duration::from_millis(200)); - } - - let m = MayastorProcess { - child: child.id(), - rpc_path: rpc_sock_path(), - hugetlbfs: hugetlbfs_path(), - }; - - let _ = tx.send(m); - }); - - let m = rx.recv().unwrap(); - if m.child == 0 { - panic!("Mayastor not started within deadline"); - } else { - Ok(m) - } - } - - /// check to see if rpc is up - pub fn ping(path: &str) -> bool { - use std::os::unix::net::UnixStream; - let _stream = match UnixStream::connect(path) { - Ok(stream) => stream, - Err(_) => return false, - }; - true - } - - /// call json-rpc method using the binary - pub fn rpc_call( - &self, - method: &str, - arg: serde_json::Value, - ) -> Result { - let jsonrpc = get_path("jsonrpc"); - - let output = Command::new(jsonrpc) - .args(&["-s", &self.rpc_path, "raw", method]) - .arg(serde_json::to_string(&arg).unwrap()) - .output() - .expect("could not exec jsonrpc"); - - if !output.status.success() { - panic!( - "RPC to socket {} with method {} failed arguments {:?}", - self.rpc_path, method, arg - ); - } - - let output_string = String::from_utf8_lossy(&output.stdout); - Ok(serde_json::from_str(&output_string).unwrap()) - } - - fn sig_x(&mut self, sig_str: &str, options: Option) { - if self.child == 0 { - return; - } - let child = self.child; - if sig_str == "TERM" { - self.child = 0; - } - Command::new("kill") - .args(&["-s", sig_str, &format!("{}", child)]) - .spawn() - .unwrap(); - - // blocks until child changes state, signals are racy by themselves - // however - waitpid(Pid::from_raw(child as i32), options).unwrap(); - } - - /// terminate the mayastor process and wait for it to die - pub fn sig_term(&mut self) { - self.sig_x("TERM", None); - } - - /// stop the mayastor process and wait for it to stop - pub fn sig_stop(&mut self) { - self.sig_x("STOP", Some(WaitPidFlag::WUNTRACED)); - } - - /// continue the mayastor process and wait for it to continue - pub fn sig_cont(&mut self) { - self.sig_x("CONT", Some(WaitPidFlag::WCONTINUED)); - } -} - -/// ensure we umount the huge pages during shutdown -impl Drop for MayastorProcess { - fn drop(&mut self) { - self.sig_term(); - let _ = Command::new("umount") - .args(&[&self.hugetlbfs]) - .output() - .unwrap(); - let _ = fs::remove_dir(&self.hugetlbfs); - let _ = Command::new("rm").args(&[&self.rpc_path]).output().unwrap(); - } -} diff --git a/mayastor/tests/nvmet.rs b/mayastor/tests/nvmet.rs new file mode 100644 index 000000000..cac78a03d --- /dev/null +++ b/mayastor/tests/nvmet.rs @@ -0,0 +1,36 @@ +use common::compose::MayastorTest; +use mayastor::core::{Bdev, MayastorCliArgs}; + +use mayastor::bdev::nexus_create; + +pub mod common; +async fn create_nexus() { + nexus_create( + "nexus0", + 250 * 1024 * 1024 * 1024, + None, + &["nvmf://127.0.0.1/replica1".to_string()], + ) + .await + .unwrap(); +} + +async fn bdev_info() { + let bdev = Bdev::bdev_first().unwrap(); + dbg!(bdev); +} + +#[ignore] +#[tokio::test] +async fn nvmet_nexus_test() { + std::env::set_var("NEXUS_LABEL_IGNORE_ERRORS", "1"); + let ms = MayastorTest::new(MayastorCliArgs { + reactor_mask: 0x3.to_string(), + no_pci: true, + grpc_endpoint: "0.0.0.0".to_string(), + ..Default::default() + }); + + ms.spawn(create_nexus()).await; + ms.spawn(bdev_info()).await; +} diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs deleted file mode 100644 index 120b52ae1..000000000 --- a/mayastor/tests/replica_snapshot.rs +++ /dev/null @@ -1,194 +0,0 @@ -use std::{io, io::Write, process::Command, thread, time}; - -use common::{bdev_io, ms_exec::MayastorProcess}; -use mayastor::{ - bdev::nexus_create, - core::{ - mayastor_env_stop, - BdevHandle, - CoreError, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, - lvs::{Lvol, Lvs}, - subsys, - subsys::Config, -}; - -pub mod common; - -static DISKNAME1: &str = "/tmp/disk1.img"; -static DISKNAME2: &str = "/tmp/disk2.img"; - -static DISKSIZE_KB: u64 = 128 * 1024; - -static CFGNAME1: &str = "/tmp/child1.yaml"; -static CFGNAME2: &str = "/tmp/child2.yaml"; -static UUID1: &str = "00000000-76b6-4fcf-864d-1027d4038756"; - -static NXNAME: &str = "replica_snapshot_test"; -static NXNAME_SNAP: &str = "replica_snapshot_test-snap"; - -fn generate_config() { - let mut config = Config::default(); - - config.nexus_opts.iscsi_enable = false; - let pool1 = subsys::Pool { - name: "pool1".to_string(), - disks: vec!["aio://".to_string() + &DISKNAME1.to_string()], - replicas: Default::default(), - }; - config.pools = Some(vec![pool1]); - config.write(CFGNAME1).unwrap(); - config.nexus_opts.nvmf_replica_port = 8430; - config.nexus_opts.nvmf_nexus_port = 8440; - let pool2 = subsys::Pool { - name: "pool2".to_string(), - disks: vec!["aio://".to_string() + &DISKNAME2.to_string()], - replicas: Default::default(), - }; - config.pools = Some(vec![pool2]); - config.write(CFGNAME2).unwrap(); -} - -fn start_mayastor(cfg: &str) -> MayastorProcess { - let args = vec![ - "-s".to_string(), - "128".to_string(), - "-g".to_string(), - "127.0.0.1:10125".to_string(), - "-y".to_string(), - cfg.to_string(), - ]; - - MayastorProcess::new(Box::from(args)).unwrap() -} - -fn conf_mayastor(msc_args: &[&str]) { - let msc = "../target/debug/mayastor-client"; - let output = Command::new(msc) - .args(&*msc_args) - .output() - .expect("could not exec mayastor-client"); - if !output.status.success() { - io::stderr().write_all(&output.stderr).unwrap(); - panic!("failed to configure mayastor"); - } -} - -fn create_replica() { - // configuration yaml does not yet support creating replicas - conf_mayastor(&[ - "-p", - "10125", - "replica", - "create", - "--protocol", - "nvmf", - "pool2", - UUID1, - "--size", - "64M", - ]); -} - -fn share_snapshot(t: u64) { - conf_mayastor(&[ - "-p", - "10125", - "replica", - "share", - &Lvol::format_snapshot_name(UUID1, t), - "nvmf", - ]); -} -#[ignore] -#[test] -fn replica_snapshot() { - generate_config(); - - // Start with fresh pools - common::delete_file(&[DISKNAME1.to_string()]); - common::truncate_file(DISKNAME1, DISKSIZE_KB); - common::delete_file(&[DISKNAME2.to_string()]); - common::truncate_file(DISKNAME2, DISKSIZE_KB); - - let _ms2 = start_mayastor(CFGNAME2); - // Allow Mayastor process to start listening on NVMf port - thread::sleep(time::Duration::from_millis(250)); - - create_replica(); - - test_init!(CFGNAME1); - - Reactor::block_on(async { - let pool = Lvs::lookup("pool1").unwrap(); - pool.create_lvol(UUID1, 64 * 1024 * 1024, true) - .await - .unwrap(); - create_nexus(0).await; - bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); - // Issue an unimplemented vendor command - custom_nvme_admin(0xc1) - .await - .expect_err("unexpectedly succeeded invalid nvme admin command"); - bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); - let t = create_snapshot().await.unwrap(); - // Check that IO to the replica still works after creating a snapshot - bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); - bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); - bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); - bdev_io::write_some(NXNAME, 1024, 0xaa).await.unwrap(); - bdev_io::read_some(NXNAME, 1024, 0xaa).await.unwrap(); - // Share the snapshot and create a new nexus - share_snapshot(t); - create_nexus(t).await; - bdev_io::write_some(NXNAME_SNAP, 0, 0xff) - .await - .expect_err("writing to snapshot should fail"); - // Verify that data read from snapshot remains unchanged - bdev_io::write_some(NXNAME, 0, 0x55).await.unwrap(); - bdev_io::read_some(NXNAME, 0, 0x55).await.unwrap(); - bdev_io::read_some(NXNAME_SNAP, 0, 0xff).await.unwrap(); - bdev_io::read_some(NXNAME_SNAP, 1024, 0).await.unwrap(); - }); - mayastor_env_stop(0); - - common::delete_file(&[DISKNAME1.to_string()]); - common::delete_file(&[DISKNAME2.to_string()]); -} - -async fn create_nexus(t: u64) { - let mut children = vec![ - "loopback:///".to_string() + &UUID1.to_string(), - "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:".to_string() - + &UUID1.to_string(), - ]; - let mut nexus_name = NXNAME; - if t > 0 { - children - .iter_mut() - .for_each(|c| *c = Lvol::format_snapshot_name(&c, t)); - nexus_name = NXNAME_SNAP; - } - - nexus_create(&nexus_name, 64 * 1024 * 1024, None, &children) - .await - .unwrap(); -} - -async fn create_snapshot() -> Result { - let h = BdevHandle::open(NXNAME, true, false).unwrap(); - let t = h - .create_snapshot() - .await - .expect("failed to create snapshot"); - Ok(t) -} - -async fn custom_nvme_admin(opc: u8) -> Result<(), CoreError> { - let h = BdevHandle::open(NXNAME, true, false).unwrap(); - h.nvme_admin_custom(opc).await?; - Ok(()) -} diff --git a/mayastor/tests/yaml_config.rs b/mayastor/tests/yaml_config.rs deleted file mode 100644 index ad769eca9..000000000 --- a/mayastor/tests/yaml_config.rs +++ /dev/null @@ -1,268 +0,0 @@ -use std::{fs::metadata, sync::Mutex, time::Duration}; - -use common::ms_exec::run_test; -use mayastor::{subsys, subsys::Config}; - -pub mod common; - -#[test] -// Test we can start without any mayastor specific configuration. -fn yaml_default() { - let args = vec!["-s".into(), "128".into()]; - run_test(Box::from(args), |ms| { - // knock, anyone there? - let out = ms - .rpc_call("rpc_get_methods", serde_json::json!(null)) - .unwrap(); - assert_ne!(out.as_array().unwrap().len(), 0); - }); -} - -#[test] -// The YAML file we load does not exist. The test ensures we create a new one -// when we save it where the values are bootstrapped with our defaults defined -// in ['Config::opts'] -fn yaml_not_exist() { - let args = vec![ - "-s".to_string(), - "128".into(), - "-y".into(), - "/tmp/test.yaml".into(), - ]; - - // delete any existing file - common::delete_file(&["/tmp/test.yaml".into()]); - - run_test(Box::from(args), |ms| { - let out = ms - .rpc_call("mayastor_config_export", serde_json::json!(null)) - .unwrap(); - assert_eq!(out, serde_json::Value::Null); - assert_eq!(metadata("/tmp/test.yaml").unwrap().is_file(), true); - }); -} - -#[test] -// Create a new config file with some bdevs in it. Write out the config file and -// then start mayastor using it. This tests that serialisation is done properly -// and that we indeed can create bdevs defined with in the config -fn yaml_load_from_existing() { - let mut cfg = Config::default(); - common::truncate_file("/tmp/disk1.img", 1024 * 64); - - let bdev = subsys::BaseBdev { - uri: "aio:///tmp/disk1.img?blk_size=512&uuid=3dbbaeb0-ec02-4962-99c5-4e8f67c6b80c".to_string(), - }; - - cfg.source = Some("/tmp/loadme.yaml".into()); - cfg.base_bdevs = Some(vec![bdev]); - - cfg.write("/tmp/loadme.yaml").unwrap(); - - assert_eq!( - std::fs::metadata("/tmp/loadme.yaml").unwrap().is_file(), - true - ); - - let args = vec![ - "-s".to_string(), - "128".to_string(), - "-y".to_string(), - "/tmp/loadme.yaml".to_string(), - ]; - - run_test(Box::from(args), |ms| { - // get the config we just loaded and validate it's the bdev is in that - // config - let out = ms - .rpc_call( - "framework_get_config", - serde_json::json!({"name": "MayastorConfig"}), - ) - .unwrap(); - - let base_bdevs = out - .as_object() - .unwrap() - .get("base_bdevs") - .unwrap() - .as_array() - .unwrap(); - - assert_eq!( - base_bdevs[0]["uri"].as_str().unwrap(), - "aio:///tmp/disk1.img?blk_size=512&uuid=3dbbaeb0-ec02-4962-99c5-4e8f67c6b80c" - ); - - // out of scope for testing this but -- lets ensure the bdev is actually - // here - let bdev = ms.rpc_call( - "bdev_get_bdevs", - serde_json::json!({"name": "aio:///tmp/disk1.img?blk_size=512&uuid=3dbbaeb0-ec02-4962-99c5-4e8f67c6b80c"}), - ).unwrap(); - - assert_ne!(bdev.as_array().unwrap().len(), 0); - }); - - common::delete_file(&["/tmp/disk1.img".into()]); -} - -#[test] -// In this test we want to validate that we can create a pool using a config -// file. Moreover, we also want to validate that if we have a pool, we can -// export the pool topology and then restart ourselves with that pool defined in -// the config. Said differently, import and export pools. -fn yaml_pool_tests() { - let mut cfg = Config::default(); - common::delete_file(&["/tmp/disk1.img".into()]); - common::truncate_file("/tmp/disk1.img", 1024 * 64); - - // create a config where and define the pool we want to create. The pool - // does not exist, so we expect that it gets created -- and not imported. - let pool = subsys::Pool { - name: "tpool".to_string(), - disks: vec!["/tmp/disk1.img".into()], - replicas: Default::default(), - }; - - // we use this UUID to ensure that the created pool is indeed -- the pool - // we later import. We use a mutex to get unwind safety. - - let uuid: Mutex = Mutex::new("".into()); - - cfg.source = Some("/tmp/pool.yaml".into()); - cfg.pools = Some(vec![pool]); - cfg.nexus_opts.nvmf_enable = false; - - cfg.write("/tmp/pool.yaml").unwrap(); - - // setup the arguments we want to load mayastor with - let args = vec![ - "-s".to_string(), - "128".into(), - "-y".into(), - "/tmp/pool.yaml".to_string(), - ]; - - run_test(Box::from(args.clone()), |ms| { - let lvs_list = common::retry(10, Duration::from_millis(500), || { - let lvs_list = ms - .rpc_call("bdev_lvol_get_lvstores", serde_json::json!(null)) - .unwrap(); - if lvs_list.is_array() { - Ok(lvs_list) - } else { - Err(()) - } - }); - let lvs = lvs_list.as_array().unwrap()[0].as_object().unwrap(); - assert_eq!(lvs.get("name").unwrap(), "tpool"); - *uuid.lock().unwrap() = lvs.get("uuid").unwrap().to_string(); - - // delete our config file to validate that pool export logic works - // properly - common::delete_file(&["/tmp/pool.yaml".into()]); - - let out = ms - .rpc_call("mayastor_config_export", serde_json::json!(null)) - .unwrap(); - assert_eq!(out, serde_json::Value::Null); - assert_eq!(metadata("/tmp/pool.yaml").unwrap().is_file(), true); - }); - - // Part two, in a galaxy far far away... the string has been set by the - // first jedi maya instance. Now we need to determine if we can trust the - // import, or if a certain amount of fear is justified. - // - // Fear is the path to the dark side. Fear leads to anger. Anger leads to - // hate. Hate leads to suffering. - // - // In episode one we used (attack of the) arg.clone() -- so we can use the - // same arguments to start his next episode. We load the same config and - // expect the UUID to be the exact same. As we do not specify a UUID - // explicitly, matching UUIDs confirm that the pool has not been - // recreated. - run_test(Box::from(args), |ms| { - let lvols = common::retry(10, Duration::from_millis(500), || { - let vols = ms - .rpc_call("bdev_lvol_get_lvstores", serde_json::json!(null)) - .unwrap(); - if vols.as_array().unwrap().is_empty() { - Err(()) - } else { - Ok(vols) - } - }); - - // compare the UUID we stored from the first step, with the current - assert_eq!( - *uuid.lock().unwrap(), - lvols.as_array().unwrap()[0] - .as_object() - .unwrap() - .get("uuid") - .unwrap() - .to_string() - ); - }); - - // delete the pool - common::delete_file(&["/tmp/disk1.img".into()]); -} -#[ignore] -#[test] -// Try to see if we can start two mayastor instances where the nvmf and iSCSI -// target is disabled for one of them. If we did not disable one of them, one -// would fail to start. -fn yaml_multi_maya() { - common::delete_file(&[ - "/tmp/first.yaml".to_string(), - "/tmp/second.yaml".into(), - ]); - - let mut first = Config::default(); - let second = Config::default(); - - first.nexus_opts.iscsi_enable = false; - first.nexus_opts.nvmf_enable = false; - - first.write("/tmp/first.yaml").unwrap(); - second.write("/tmp/second.yaml").unwrap(); - - let first_args = vec![ - "-s".to_string(), - "128".into(), - "-y".into(), - "/tmp/first.yaml".into(), - "-g".to_string(), - "127.0.0.1:10124".to_string(), - ]; - - let second_args = vec![ - "-s".to_string(), - "128".into(), - "-y".into(), - "/tmp/second.yaml".into(), - "-g".to_string(), - "127.0.0.1:10125".to_string(), - ]; - - run_test(Box::from(first_args), |ms1| { - let out = ms1 - .rpc_call("rpc_get_methods", serde_json::json!(null)) - .unwrap(); - assert_ne!(out.as_array().unwrap().len(), 0); - - run_test(Box::from(second_args), |ms2| { - let out = ms2 - .rpc_call("rpc_get_methods", serde_json::json!(null)) - .unwrap(); - assert_ne!(out.as_array().unwrap().len(), 0); - }); - }); - - common::delete_file(&[ - "/tmp/first.yaml".to_string(), - "/tmp/second.yaml".into(), - ]) -} diff --git a/nvmeadm/src/nvmf_discovery.rs b/nvmeadm/src/nvmf_discovery.rs index a022576a7..891efdd6d 100644 --- a/nvmeadm/src/nvmf_discovery.rs +++ b/nvmeadm/src/nvmf_discovery.rs @@ -1,3 +1,18 @@ +use std::{ + fmt, + fs::OpenOptions, + io::{ErrorKind, Read, Write}, + net::IpAddr, + os::unix::io::AsRawFd, + path::Path, + str::FromStr, +}; + +use error::{ConnectError, DiscoveryError, FileIoError, NvmeError}; +use nix::libc::ioctl as nix_ioctl; +use num_traits::FromPrimitive; +use snafu::ResultExt; + use crate::{ error, nvme_page::{NvmeAdminCmd, NvmfDiscRspPageEntry, NvmfDiscRspPageHdr}, @@ -10,20 +25,6 @@ use crate::{ /// referred as. const MACHINE_UUID_PATH: &str = "/sys/class/dmi/id/product_uuid"; -use error::{ConnectError, DiscoveryError, FileIoError, NvmeError}; -use nix::libc::ioctl as nix_ioctl; -use num_traits::FromPrimitive; -use snafu::ResultExt; -use std::{ - fmt, - fs::OpenOptions, - io::{ErrorKind, Read, Write}, - net::IpAddr, - os::unix::io::AsRawFd, - path::Path, - str::FromStr, -}; - static HOST_ID: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { let mut host_id = uuid::Uuid::new_v4().to_string(); @@ -181,11 +182,13 @@ impl Discovery { // See NVM-Express1_3d 5.14 let hdr_len = std::mem::size_of::() as u32; let h = NvmfDiscRspPageHdr::default(); - let mut cmd = NvmeAdminCmd::default(); - cmd.opcode = 0x02; - cmd.nsid = 0; - cmd.dptr_len = hdr_len; - cmd.dptr = &h as *const _ as u64; + let mut cmd = NvmeAdminCmd { + opcode: 0x02, + nsid: 0, + dptr: &h as *const _ as u64, + dptr_len: hdr_len, + ..Default::default() + }; // bytes to dwords, divide by 4. Spec says 0's value @@ -230,14 +233,16 @@ impl Discovery { let hdr_len = std::mem::size_of::(); let response_len = std::mem::size_of::(); let total_length = hdr_len + (response_len * count as usize); + // TODO: fixme let buffer = unsafe { libc::calloc(1, total_length) }; - let mut cmd = NvmeAdminCmd::default(); - - cmd.opcode = 0x02; - cmd.nsid = 0; - cmd.dptr_len = total_length as u32; - cmd.dptr = buffer as *const _ as u64; + let mut cmd = NvmeAdminCmd { + opcode: 0x02, + nsid: 0, + dptr: buffer as _, + dptr_len: total_length as _, + ..Default::default() + }; let dword_count = ((total_length >> 2) - 1) as u32; let numdl: u16 = (dword_count & 0xFFFF) as u16; @@ -246,15 +251,21 @@ impl Discovery { cmd.cdw10 = 0x70 | u32::from(numdl) << 16_u32; cmd.cdw11 = u32::from(numdu); - let _ret = unsafe { + let ret = unsafe { convert_ioctl_res!(nix_ioctl( f.as_raw_fd(), u64::from(NVME_ADMIN_CMD_IOCTL), &cmd )) - .context(DiscoveryError)? }; + if let Err(e) = ret { + unsafe { libc::free(buffer) }; + return Err(NvmeError::DiscoveryError { + source: e, + }); + } + let hdr = unsafe { &mut *(buffer as *mut NvmfDiscRspPageHdr) }; let entries = unsafe { hdr.entries.as_slice(hdr.numrec as usize) }; diff --git a/rest/service/src/message_bus/v0.rs b/rest/service/src/message_bus/v0.rs index 517eba082..97e511fc3 100644 --- a/rest/service/src/message_bus/v0.rs +++ b/rest/service/src/message_bus/v0.rs @@ -68,11 +68,12 @@ mod tests { #[tokio::test] async fn bus() -> Result<(), Box> { init_tracing(); - let nats_arg = vec!["-n", "nats.rest_backend"]; + let natsep = format!("nats.{}", TEST_NET_NAME); + let nats_arg = vec!["-n", &natsep]; let mayastor = "node-test-name"; let test = Builder::new() .name("rest_backend") - .network("10.1.0.0/16") + .network(TEST_NET_NETWORK) .add_container_bin( "nats", Binary::from_nix("nats-server").with_arg("-DV"), diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index 06f0d6bb0..d248dc1f5 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -1,4 +1,5 @@ mod test; +use composer::{TEST_NET_NAME, TEST_NET_NETWORK}; use mbus_api::{ v0::{GetNodes, NodeState}, Message, @@ -32,11 +33,12 @@ async fn orderly_start( async fn client() -> Result<(), Box> { test::init(); - let nats_arg = vec!["-n", "nats.rest"]; + let natsep = format!("nats.{}", TEST_NET_NAME); + let nats_arg = vec!["-n", &natsep]; let mayastor = "node-test-name"; let test = Builder::new() .name("rest") - .network("10.1.0.0/16") + .network(TEST_NET_NETWORK) .add_container_spec( ContainerSpec::new( "nats", diff --git a/services/node/src/server.rs b/services/node/src/server.rs index 1e131faee..318196110 100644 --- a/services/node/src/server.rs +++ b/services/node/src/server.rs @@ -251,11 +251,12 @@ mod tests { #[tokio::test] async fn node() -> Result<(), Box> { init_tracing(); - let nats_arg = vec!["-n", "nats.node"]; + let natsep = format!("nats.{}", TEST_NET_NAME); + let nats_arg = vec!["-n", &natsep]; let maya_name = "node-test-name"; let test = Builder::new() .name("node") - .network("10.1.0.0/16") + .network(TEST_NET_NETWORK) .add_container_bin( "nats", Binary::from_nix("nats-server").with_arg("-DV"), diff --git a/shell.nix b/shell.nix index c457eb206..1f1b0a36d 100644 --- a/shell.nix +++ b/shell.nix @@ -37,6 +37,7 @@ mkShell { nats-server nodejs-12_x numactl + nvmet-cli meson ninja nvme-cli From a02b89fd8b2f502342fa88eb6ec218ecdb23ee3a Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Tue, 24 Nov 2020 09:13:47 +0000 Subject: [PATCH 79/92] moac: Fix unreliable time interval tests for watcher By strange coincidence the timer triggers sooner than it should which according to nodejs doc should not happen. Extend time interval by one millisecond to avoid it. --- csi/moac/test/watcher_test.js | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/csi/moac/test/watcher_test.js b/csi/moac/test/watcher_test.js index 0cc90a5f1..121881ea3 100644 --- a/csi/moac/test/watcher_test.js +++ b/csi/moac/test/watcher_test.js @@ -219,7 +219,7 @@ module.exports = function () { timeout = setTimeout(() => watcher.emitKubeEvent('add', apple), EVENT_DELAY_MS); await watcher.create(apple); const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + expect(delta).to.be.within(EVENT_DELAY_MS - 2, EVENT_DELAY_MS + EYE_BLINK_MS); sinon.assert.calledOnce(createStub); }); @@ -247,7 +247,7 @@ module.exports = function () { return createApple(orig.metadata.name, [], 'also valid'); }); const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + expect(delta).to.be.within(EVENT_DELAY_MS - 2, EVENT_DELAY_MS + EYE_BLINK_MS); assertReplaceCalledWith(replaceStub, 'name1', apple, { spec: 'also valid' }); @@ -388,7 +388,7 @@ module.exports = function () { sinon.assert.calledOnce(deleteStub); sinon.assert.calledWith(deleteStub, 'openebs.io', 'v1alpha1', 'namespace', 'apples', 'name1'); - expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + expect(delta).to.be.within(EVENT_DELAY_MS - 2, EVENT_DELAY_MS + EYE_BLINK_MS); }); it('should timeout when "delete" event does not come after a delete', async () => { @@ -425,7 +425,7 @@ module.exports = function () { timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); await watcher.addFinalizer('name1', 'test.finalizer.com'); const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + expect(delta).to.be.within(EVENT_DELAY_MS - 2, EVENT_DELAY_MS + EYE_BLINK_MS); assertReplaceCalledWith(replaceStub, 'name1', apple, { metadata: { finalizers: ['test.finalizer.com'] @@ -443,7 +443,7 @@ module.exports = function () { timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); await watcher.addFinalizer('name1', 'new.finalizer.com'); const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + expect(delta).to.be.within(EVENT_DELAY_MS - 2, EVENT_DELAY_MS + EYE_BLINK_MS); assertReplaceCalledWith(replaceStub, 'name1', apple, { metadata: { finalizers: ['new.finalizer.com', 'test.finalizer.com', 'test2.finalizer.com'] @@ -483,7 +483,7 @@ module.exports = function () { timeout = setTimeout(() => watcher.emitKubeEvent('update', apple), EVENT_DELAY_MS); await watcher.removeFinalizer('name1', 'test.finalizer.com'); const delta = new Date() - startTime; - expect(delta).to.be.within(EVENT_DELAY_MS, EVENT_DELAY_MS + EYE_BLINK_MS); + expect(delta).to.be.within(EVENT_DELAY_MS - 2, EVENT_DELAY_MS + EYE_BLINK_MS); sinon.assert.calledOnce(replaceStub); assertReplaceCalledWith(replaceStub, 'name1', apple, { metadata: { From 73502ee0e2731cad9bd546649352644ed84f02f5 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 20 Nov 2020 18:27:16 +0000 Subject: [PATCH 80/92] Try to fix CI nbd issues Run the set timeout in another thread while we poll as it seems that when the nbd device is open it triggers a partition scan. Set the NBD size to 0 before disconnecting as it was observed that when we disconnect sometimes the device gets rescanned and the kernel thinks it should partition scan again... setting the size to 0 before the disconnect seems to alleviate this. --- mayastor/src/bdev/nexus/nexus_nbd.rs | 57 ++++++++++++++++++++-------- nvmeadm/tests/discovery_test.rs | 3 ++ 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_nbd.rs b/mayastor/src/bdev/nexus/nexus_nbd.rs index 54e18d7cf..ebc924ef0 100644 --- a/mayastor/src/bdev/nexus/nexus_nbd.rs +++ b/mayastor/src/bdev/nexus/nexus_nbd.rs @@ -32,6 +32,8 @@ use crate::{ // include/uapi/linux/fs.h const IOCTL_BLKGETSIZE: u32 = ior!(0x12, 114, std::mem::size_of::()); const SET_TIMEOUT: u32 = io!(0xab, 9); +const SET_SIZE: u32 = io!(0xab, 2); + #[derive(Debug, Snafu)] pub enum NbdError { #[snafu(display("No free NBD devices available (is NBD kmod loaded?)"))] @@ -55,8 +57,23 @@ pub(crate) fn wait_until_ready(path: &str) -> Result<(), ()> { let tpath = String::from(path); let s = started.clone(); + debug!("Waiting for NBD device {} to become ready...", path); // start a thread that loops and tries to open us and asks for our size Mthread::spawn_unaffinitized(move || { + // this should not be needed but for some unknown reason, we end up with + // stale NBD devices. Setting this to non zero, prevents that from + // happening (although we dont actually timeout). + let timeout = 3; + let f = OpenOptions::new().read(true).open(Path::new(&tpath)); + unsafe { + convert_ioctl_res!(libc::ioctl( + f.unwrap().as_raw_fd(), + SET_TIMEOUT as u64, + timeout + )) + } + .unwrap(); + debug!("Timeout of NBD device {} was set to {}", tpath, timeout); let size: u64 = 0; let mut delay = 1; for _i in 0i32 .. 10 { @@ -181,6 +198,13 @@ pub async fn start( .context(StartNbd { dev: device_path.to_owned(), }) + .map(|ok| { + info!( + "Nbd device {} for parent {} started", + device_path, bdev_name + ); + ok + }) } /// NBD disk representation. @@ -196,20 +220,6 @@ impl NbdDisk { let device_path = find_unused()?; let nbd_ptr = start(bdev_name, &device_path).await?; - // this should not be needed but for some unknown reason, we end up with - // stale NBD devices. Setting this to non zero, prevents that from - // happening (although we dont actually timeout). - - let f = OpenOptions::new().read(true).open(Path::new(&device_path)); - unsafe { - convert_ioctl_res!(libc::ioctl( - f.unwrap().as_raw_fd(), - SET_TIMEOUT as u64, - 3, - )) - } - .unwrap(); - // we wait for the dev to come up online because // otherwise the mount done too early would fail. // If it times out, continue anyway and let the mount fail. @@ -233,8 +243,25 @@ impl NbdDisk { let ptr = self.nbd_ptr as usize; let name = self.get_path(); + let nbd_name = name.clone(); + debug!("Stopping NBD device {}...", name); Mthread::spawn_unaffinitized(move || { - unsafe { nbd_disconnect(ptr as *mut _) }; + unsafe { + // After disconnecting a disk changed event is triggered which + // causes a refresh of the size back to the + // original size and a partition scan. + // Set the size to 0 before disconnecting in hopes of stopping + // that. + let f = + OpenOptions::new().read(true).open(Path::new(&nbd_name)); + convert_ioctl_res!(libc::ioctl( + f.unwrap().as_raw_fd(), + SET_SIZE as u64, + 0 + )) + .unwrap(); + nbd_disconnect(ptr as *mut _); + }; debug!("NBD device disconnected successfully"); s.store(true, SeqCst); }); diff --git a/nvmeadm/tests/discovery_test.rs b/nvmeadm/tests/discovery_test.rs index e626b2252..9bf04afb1 100644 --- a/nvmeadm/tests/discovery_test.rs +++ b/nvmeadm/tests/discovery_test.rs @@ -184,6 +184,9 @@ fn test_against_real_target() { .connect(SERVED_DISK_NQN) .expect("Problem connecting to valid target"); + // allow the part scan to complete for most cases + std::thread::sleep(std::time::Duration::from_secs(1)); + // Check that we CAN disconnect from a served NQN let num_disconnects = disconnect(SERVED_DISK_NQN); assert_eq!(num_disconnects.unwrap(), 1); From f28fa5e6efe6d9640ab74b501049f9cedf464859 Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Tue, 24 Nov 2020 10:01:37 +0000 Subject: [PATCH 81/92] Clean up log/tracing event formatter --- mayastor/src/logger.rs | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index 94eac6a36..13f8c02e1 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -1,4 +1,4 @@ -use std::{ffi::CStr, fmt::Write, os::raw::c_char, path::Path}; +use std::{ffi::CStr, os::raw::c_char, path::Path}; use ansi_term::{Colour, Style}; @@ -170,7 +170,7 @@ where .flat_map(|span| span.from_root().chain(std::iter::once(span))); for span in scope { - write!(f, "{}", bold.paint(span.metadata().name()))?; + write!(f, ":{}", bold.paint(span.metadata().name()))?; let extensions = span.extensions(); @@ -181,8 +181,6 @@ where if !fields.is_empty() { write!(f, "{}{}{}", bold.paint("{"), fields, bold.paint("}"))?; } - - f.write_char(' ')?; } Ok(()) @@ -211,7 +209,7 @@ impl std::fmt::Display for Location<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(file) = self.meta.file() { if let Some(line) = self.meta.line() { - write!(f, "{}:{}] ", basename(file), line)?; + write!(f, ":{}:{}", basename(file), line)?; } } Ok(()) @@ -240,20 +238,14 @@ where write!( writer, - "[{} {} {}:", + "[{} {} {}{}{}] ", chrono::Local::now().format("%FT%T%.9f%Z"), FormatLevel::new(meta.level(), self.ansi), - meta.target() - )?; - - write!( - writer, - "{}", - CustomContext::new(context, event.parent(), self.ansi) + meta.target(), + CustomContext::new(context, event.parent(), self.ansi), + Location::new(&meta) )?; - write!(writer, "{}", Location::new(&meta))?; - context.format_fields(writer, event)?; writeln!(writer) From 4c4d46f2064b6b79e1a11674501d638f32e12635 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Tue, 24 Nov 2020 17:50:56 +0000 Subject: [PATCH 82/92] Print env vars in each test stage of jenkins This helps when debugging problems. In particular it gives us a name of the node where the tests are running which is useful when trying to reproduce a failure. --- Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index ba5acc1e3..7163c1bdf 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -104,6 +104,7 @@ pipeline { stage('rust unit tests') { agent { label 'nixos-mayastor' } steps { + sh 'printenv' sh 'nix-shell --run "./scripts/cargo-test.sh"' } post { @@ -116,6 +117,7 @@ pipeline { stage('mocha api tests') { agent { label 'nixos-mayastor' } steps { + sh 'printenv' sh 'nix-shell --run "./scripts/node-test.sh"' } post { @@ -127,6 +129,7 @@ pipeline { stage('moac unit tests') { agent { label 'nixos-mayastor' } steps { + sh 'printenv' sh 'nix-shell --run "./scripts/moac-test.sh"' } post { From c3d052502e5527a27b5ce868ca4ca3cebf8d00a1 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Wed, 25 Nov 2020 10:57:34 +0000 Subject: [PATCH 83/92] Set a child to closed only when necessary When removing a child only set the state to closed if it is open or rebuilding. This ensures the child is taken out of the I/O path. In all other cases, do not modify the state. --- mayastor/src/bdev/nexus/nexus_child.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 49b77a2b3..542ad4571 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -350,8 +350,19 @@ impl NexusChild { // The bdev is being removed, so ensure we don't use it again. self.bdev = None; + match self.state.load() { + ChildState::Open | Faulted(Reason::OutOfSync) => { + // Change the state of the child to ensure it is taken out of + // the I/O path when the nexus is reconfigured. + self.set_state(ChildState::Closed) + } + ChildState::Init + | ChildState::ConfigInvalid + | ChildState::Closed + | Faulted(_) => {} + } + // Remove the child from the I/O path. - self.set_state(ChildState::Closed); let nexus_name = self.parent.clone(); Reactor::block_on(async move { match nexus_lookup(&nexus_name) { From 8c78a8ec8939ad530ebb5d3a3aba54c247fdd567 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Wed, 25 Nov 2020 14:34:48 +0000 Subject: [PATCH 84/92] Limit number of layers in docker images to avoid problems. --- nix/pkgs/images/default.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index a1eac0769..7f7abab4a 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -1,6 +1,10 @@ # It would be cool to produce OCI images instead of docker images to # avoid dependency on docker tool chain. Though the maturity of OCI # builder in nixpkgs is questionable which is why we postpone this step. +# +# We limit max number of image layers to 42 because there is a bug in +# containerd triggered when there are too many layers: +# https://github.com/containerd/containerd/issues/4684 { stdenv , busybox @@ -83,6 +87,7 @@ rec { mayastor-csi-image = dockerTools.buildLayeredImage (mayastorCsiImageProps // { name = "mayadata/mayastor-csi"; contents = [ busybox mayastor mayastorIscsiadm ]; + maxLayers = 42; }); mayastor-csi-dev-image = dockerTools.buildImage (mayastorCsiImageProps // { @@ -109,12 +114,14 @@ rec { ln -s ${moac.out}/bin/moac bin/moac chmod u-w bin ''; + maxLayers = 42; }; services-kiiss-image = dockerTools.buildLayeredImage (servicesImageProps // { name = "mayadata/services-kiiss"; contents = [ busybox mayastor ]; config = { Entrypoint = [ "/bin/kiiss" ]; }; + maxLayers = 42; }); services-kiiss-dev-image = dockerTools.buildImage (servicesImageProps // { From 9f868f0a8c2a6a05d90cfc2dc9938350fe0482c4 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Wed, 25 Nov 2020 19:03:57 +0000 Subject: [PATCH 85/92] snapshot: Reintroduce replica snapshot cargo test Refactor the test to use docker compose so that the remote mayastor instance runs in a separate container instead of juggling multiple mayastor instances, which became impossible when ms_exec was removed in 36680604. Have Bio::assess allow non-success replies with NVMe status code of InvalidOpcode to not fault the child as these are only sent by tests. While I'm here, move the handling of non-success replies to a separate non-inlined function for readability and to keep the hot path simple. --- mayastor/src/bdev/nexus/nexus_io.rs | 98 +++++++++------- mayastor/src/core/nvme.rs | 4 +- mayastor/tests/replica_snapshot.rs | 168 ++++++++++++++++++++++++++++ 3 files changed, 225 insertions(+), 45 deletions(-) create mode 100644 mayastor/tests/replica_snapshot.rs diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index 6d4c916ea..324d863ea 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -26,7 +26,7 @@ use crate::{ NexusStatus, Reason, }, - core::{Bdev, Cores, Mthread, NvmeStatus, Reactors}, + core::{Bdev, Cores, GenericStatusCode, Mthread, NvmeStatus, Reactors}, nexus_uri::bdev_destroy, }; @@ -262,6 +262,59 @@ impl Bio { } } + /// assess non-success IO, out of the hot path + #[inline(never)] + fn assess_non_success(&mut self, child_io: &mut Bio) { + // note although this is not the hot path, with a sufficiently high + // queue depth it can turn whitehot rather quickly + let nvme_status = NvmeStatus::from(child_io.clone()); + // invalid NVMe opcodes are not sufficient to fault a child + // currently only tests send those + if nvme_status.status_code() == GenericStatusCode::InvalidOpcode { + return; + } + error!("{:#?}", nvme_status); + let child = child_io.bdev_as_ref(); + let n = self.nexus_as_ref(); + + if let Some(child) = n.child_lookup(&child.name()) { + let current_state = child.state.compare_and_swap( + ChildState::Open, + ChildState::Faulted(Reason::IoError), + ); + + if current_state == ChildState::Open { + warn!( + "core {} thread {:?}, faulting child {}", + Cores::current(), + Mthread::current(), + child + ); + + let name = n.name.clone(); + let uri = child.name.clone(); + + let fut = async move { + if let Some(nexus) = nexus_lookup(&name) { + nexus.pause().await.unwrap(); + nexus.reconfigure(DREvent::ChildFault).await; + bdev_destroy(&uri).await.unwrap(); + if nexus.status() != NexusStatus::Faulted { + nexus.resume().await.unwrap(); + } else { + error!(":{} has no children left... ", nexus); + } + } + }; + + Reactors::master().send_future(fut); + } + } else { + debug!("core {} thread {:?}, not faulting child {} as its already being removed", + Cores::current(), Mthread::current(), child); + } + } + /// assess the IO if we need to mark it failed or ok. #[inline] pub(crate) fn assess(&mut self, child_io: &mut Bio, success: bool) { @@ -273,48 +326,7 @@ impl Bio { } if !success { - // note although this is not the hot path, with a sufficiently high - // queue depth it can turn whitehot rather quickly - error!("{:#?}", NvmeStatus::from(child_io.clone())); - let child = child_io.bdev_as_ref(); - let n = self.nexus_as_ref(); - - if let Some(child) = n.child_lookup(&child.name()) { - let current_state = child.state.compare_and_swap( - ChildState::Open, - ChildState::Faulted(Reason::IoError), - ); - - if current_state == ChildState::Open { - warn!( - "core {} thread {:?}, faulting child {}", - Cores::current(), - Mthread::current(), - child - ); - - let name = n.name.clone(); - let uri = child.name.clone(); - - let fut = async move { - if let Some(nexus) = nexus_lookup(&name) { - nexus.pause().await.unwrap(); - nexus.reconfigure(DREvent::ChildFault).await; - bdev_destroy(&uri).await.unwrap(); - if nexus.status() != NexusStatus::Faulted { - nexus.resume().await.unwrap(); - } else { - error!(":{} has no children left... ", nexus); - } - } - }; - - Reactors::master().send_future(fut); - } - } else { - debug!("core {} thread {:?}, not faulting child {} as its already being removed", - Cores::current(), Mthread::current(), child); - } + self.assess_non_success(child_io); } let pio_ctx = self.ctx_as_mut_ref(); diff --git a/mayastor/src/core/nvme.rs b/mayastor/src/core/nvme.rs index 2b49916d6..beb16d839 100644 --- a/mayastor/src/core/nvme.rs +++ b/mayastor/src/core/nvme.rs @@ -4,7 +4,7 @@ use spdk_sys::spdk_bdev_io_get_nvme_status; #[derive(Debug, Copy, Clone, Eq, PartialOrd, PartialEq)] pub enum GenericStatusCode { Success, - InvalidOPCode, + InvalidOpcode, InternalDeviceError, AbortedRequested, Reserved, @@ -15,7 +15,7 @@ impl From for GenericStatusCode { fn from(i: i32) -> Self { match i { 0x00 => Self::Success, - 0x1 => Self::InvalidOPCode, + 0x01 => Self::InvalidOpcode, 0x06 => Self::InternalDeviceError, 0x07 => Self::AbortedRequested, 0x08 => Self::AbortedSubmissionQueueDeleted, diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs new file mode 100644 index 000000000..5779e9351 --- /dev/null +++ b/mayastor/tests/replica_snapshot.rs @@ -0,0 +1,168 @@ +use common::bdev_io; +use mayastor::{ + bdev::nexus_create, + core::{BdevHandle, CoreError, MayastorCliArgs}, + lvs::{Lvol, Lvs}, +}; +use rpc::mayastor::{ + CreatePoolRequest, + CreateReplicaRequest, + ShareProtocolReplica, + ShareReplicaRequest, +}; +use tracing::info; + +pub mod common; +use common::{compose::Builder, MayastorTest}; + +static DISKNAME1: &str = "/tmp/disk1.img"; +static POOL1_NAME: &str = "pool1"; +static POOL2_NAME: &str = "pool2"; + +static DISKSIZE_KB: u64 = 96 * 1024; + +static UUID1: &str = "00000000-76b6-4fcf-864d-1027d4038756"; + +static NXNAME: &str = "replica_snapshot_test"; +static NXNAME_SNAP: &str = "replica_snapshot_test-snap"; + +#[tokio::test] +async fn replica_snapshot() { + // Start with fresh pools + common::delete_file(&[DISKNAME1.to_string()]); + common::truncate_file(DISKNAME1, DISKSIZE_KB); + + let test = Builder::new() + .name("replica_snapshot_test") + .network("10.1.0.0/16") + .add_container("ms1") + .with_clean(true) + .build() + .await + .unwrap(); + + let mut hdls = test.grpc_handles().await.unwrap(); + + // create a pool on remote node + hdls[0] + .mayastor + .create_pool(CreatePoolRequest { + name: POOL2_NAME.to_string(), + disks: vec!["malloc:///disk0?size_mb=96".into()], + }) + .await + .unwrap(); + + // create replica, shared over nvmf + hdls[0] + .mayastor + .create_replica(CreateReplicaRequest { + uuid: UUID1.to_string(), + pool: POOL2_NAME.to_string(), + size: 64 * 1024 * 1024, + thin: false, + share: ShareProtocolReplica::ReplicaNvmf as i32, + }) + .await + .unwrap(); + + let mayastor = MayastorTest::new(MayastorCliArgs::default()); + let ip0 = hdls[0].endpoint.ip(); + + let t = mayastor + .spawn(async move { + Lvs::create_or_import(CreatePoolRequest { + name: POOL1_NAME.to_string(), + disks: vec![format!("aio://{}", DISKNAME1)], + }) + .await + .unwrap(); + let pool = Lvs::lookup(POOL1_NAME).unwrap(); + pool.create_lvol(UUID1, 64 * 1024 * 1024, true) + .await + .unwrap(); + create_nexus(0, &ip0).await; + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + // Issue an unimplemented vendor command + // This checks that the target is correctly rejecting such commands + // In practice the nexus will not send such commands + custom_nvme_admin(0xc1).await.expect_err( + "unexpectedly succeeded invalid nvme admin command", + ); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + let ts = create_snapshot().await.unwrap(); + // Check that IO to the replica still works after creating a + // snapshot + info!("testing IO to nexus"); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::write_some(NXNAME, 1024, 0xaa).await.unwrap(); + bdev_io::read_some(NXNAME, 1024, 0xaa).await.unwrap(); + ts + }) + .await; + + // Share the snapshot and create a new nexus + info!("sharing snapshot {}", t); + hdls[0] + .mayastor + .share_replica(ShareReplicaRequest { + uuid: format!("{}-snap-{}", UUID1, t), + share: ShareProtocolReplica::ReplicaNvmf as i32, + }) + .await + .unwrap(); + + mayastor + .spawn(async move { + info!("creating nexus for snapshot"); + create_nexus(t, &ip0).await; + // FIXME: Re-enable when addressing read-only aspect of snapshots + //bdev_io::write_some(NXNAME_SNAP, 0, 0xff) + // .await + // .expect_err("writing to snapshot should fail"); + // Verify that data read from snapshot remains unchanged + info!("testing IO to nexus for snapshot"); + bdev_io::write_some(NXNAME, 0, 0x55).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0x55).await.unwrap(); + bdev_io::read_some(NXNAME_SNAP, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME_SNAP, 1024, 0).await.unwrap(); + }) + .await; + + common::delete_file(&[DISKNAME1.to_string()]); +} + +async fn create_nexus(t: u64, ip: &std::net::IpAddr) { + let mut children = vec![ + "loopback:///".to_string() + &UUID1.to_string(), + format!("nvmf://{}:8420/nqn.2019-05.io.openebs:{}", &ip, UUID1), + ]; + let mut nexus_name = NXNAME; + if t > 0 { + children + .iter_mut() + .for_each(|c| *c = Lvol::format_snapshot_name(&c, t)); + nexus_name = NXNAME_SNAP; + } + + nexus_create(&nexus_name, 64 * 1024 * 1024, None, &children) + .await + .unwrap(); +} + +async fn create_snapshot() -> Result { + let h = BdevHandle::open(NXNAME, true, false).unwrap(); + let t = h + .create_snapshot() + .await + .expect("failed to create snapshot"); + Ok(t) +} + +async fn custom_nvme_admin(opc: u8) -> Result<(), CoreError> { + let h = BdevHandle::open(NXNAME, true, false).unwrap(); + h.nvme_admin_custom(opc).await?; + Ok(()) +} From 399a512235949d126c48c52782dd1a520ffcbf1e Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Mon, 23 Nov 2020 19:54:22 +0000 Subject: [PATCH 86/92] Make nexus children wait for bdev removal on close A channel is used to allow a nexus child to wait on the removal of the underlying bdev before completing the close call. The previous assumption had been that a call to destroy() would only return when the bdev had actually been removed. However, in some cases, such as for NVMf bdevs, this was not the case. When closing a local (loopback) child, the underlying bdev must not be destroyed. Doing so would result in the lvol and data being deleted. Instead, the child is only removed, but the underlying bdev remains untouched. --- mayastor/src/bdev/dev/loopback.rs | 5 ++- mayastor/src/bdev/mod.rs | 2 +- mayastor/src/bdev/nexus/nexus_bdev.rs | 4 +- mayastor/src/bdev/nexus/nexus_child.rs | 59 ++++++++++++++++++++++++-- mayastor/src/core/bdev.rs | 17 ++------ 5 files changed, 66 insertions(+), 21 deletions(-) diff --git a/mayastor/src/bdev/dev/loopback.rs b/mayastor/src/bdev/dev/loopback.rs index 67ce09f27..a8718ab4b 100644 --- a/mayastor/src/bdev/dev/loopback.rs +++ b/mayastor/src/bdev/dev/loopback.rs @@ -5,7 +5,7 @@ use snafu::ResultExt; use url::Url; use crate::{ - bdev::{util::uri, CreateDestroy, GetName}, + bdev::{lookup_child_from_bdev, util::uri, CreateDestroy, GetName}, core::Bdev, nexus_uri::{self, NexusBdevError}, }; @@ -78,6 +78,9 @@ impl CreateDestroy for Loopback { } async fn destroy(self: Box) -> Result<(), Self::Error> { + if let Some(child) = lookup_child_from_bdev(&self.name) { + child.remove(); + } Ok(()) } } diff --git a/mayastor/src/bdev/mod.rs b/mayastor/src/bdev/mod.rs index 442a90709..a8060bece 100644 --- a/mayastor/src/bdev/mod.rs +++ b/mayastor/src/bdev/mod.rs @@ -9,7 +9,7 @@ pub use nexus::{ NexusStatus, VerboseError, }, - nexus_child::{ChildState, Reason}, + nexus_child::{lookup_child_from_bdev, ChildState, Reason}, nexus_child_error_store::{ActionType, NexusErrStore, QueryType}, nexus_child_status_config, nexus_io::Bio, diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index a9d874687..64dcea3ac 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -519,7 +519,7 @@ impl Nexus { let nexus_name = self.name.clone(); Reactor::block_on(async move { let nexus = nexus_lookup(&nexus_name).expect("Nexus not found"); - for child in &nexus.children { + for child in &mut nexus.children { if child.state() == ChildState::Open { if let Err(e) = child.close().await { error!( @@ -656,7 +656,7 @@ impl Nexus { unsafe { spdk_io_device_unregister(self.as_ptr(), None); } - for child in &self.children { + for child in &mut self.children { if let Err(e) = child.close().await { error!( "{}: child {} failed to close with error {}", diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 542ad4571..6df438c0e 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -7,6 +7,7 @@ use snafu::{ResultExt, Snafu}; use crate::{ bdev::{ nexus::{ + instances, nexus_channel::DREvent, nexus_child::ChildState::Faulted, nexus_child_status_config::ChildStatusConfig, @@ -15,12 +16,13 @@ use crate::{ NexusErrStore, VerboseError, }, - core::{Bdev, BdevHandle, CoreError, Descriptor, Reactor}, + core::{Bdev, BdevHandle, CoreError, Descriptor, Reactor, Reactors}, nexus_uri::{bdev_create, bdev_destroy, NexusBdevError}, rebuild::{ClientOperations, RebuildJob}, subsys::Config, }; use crossbeam::atomic::AtomicCell; +use futures::{channel::mpsc, SinkExt, StreamExt}; #[derive(Debug, Snafu)] pub enum ChildError { @@ -133,6 +135,8 @@ pub struct NexusChild { /// record of most-recent IO errors #[serde(skip_serializing)] pub(crate) err_store: Option, + #[serde(skip_serializing)] + remove_channel: (mpsc::Sender<()>, mpsc::Receiver<()>), } impl Display for NexusChild { @@ -330,13 +334,28 @@ impl NexusChild { } /// Close the nexus child. - pub(crate) async fn close(&self) -> Result<(), NexusBdevError> { + pub(crate) async fn close(&mut self) -> Result<(), NexusBdevError> { info!("Closing child {}", self.name); - if self.desc.is_some() && self.bdev.is_some() { + if self.bdev.is_none() { + info!("Child {} already closed", self.name); + return Ok(()); + } + + if self.desc.is_some() { self.desc.as_ref().unwrap().unclaim(); } + // Destruction raises an SPDK_BDEV_EVENT_REMOVE event. - self.destroy().await + let destroyed = self.destroy().await; + + // Only wait for bdev removal if the child has been initialised. + // An unintialised child won't have an underlying bdev. + if self.state.load() != ChildState::Init { + self.remove_channel.1.next().await; + } + + info!("Child {} closed", self.name); + destroyed } /// Called in response to a SPDK_BDEV_EVENT_REMOVE event. @@ -375,6 +394,23 @@ impl NexusChild { // This must be performed in this function. let desc = self.desc.take(); drop(desc); + + self.remove_complete(); + info!("Child {} removed", self.name); + } + + /// Signal that the child removal is complete. + fn remove_complete(&self) { + let mut sender = self.remove_channel.0.clone(); + let name = self.name.clone(); + Reactors::current().send_future(async move { + if let Err(e) = sender.send(()).await { + error!( + "Failed to send remove complete for child {}, error {}", + name, e + ); + } + }); } /// create a new nexus child @@ -386,6 +422,7 @@ impl NexusChild { desc: None, state: AtomicCell::new(ChildState::Init), err_store: None, + remove_channel: mpsc::channel(0), } } @@ -455,3 +492,17 @@ impl NexusChild { } } } + +/// Looks up a child based on the underlying bdev name +pub fn lookup_child_from_bdev(bdev_name: &str) -> Option<&mut NexusChild> { + for nexus in instances() { + for child in &mut nexus.children { + if child.bdev.is_some() + && child.bdev.as_ref().unwrap().name() == bdev_name + { + return Some(child); + } + } + } + None +} diff --git a/mayastor/src/core/bdev.rs b/mayastor/src/core/bdev.rs index 930a1ec3a..23a7041a6 100644 --- a/mayastor/src/core/bdev.rs +++ b/mayastor/src/core/bdev.rs @@ -32,7 +32,7 @@ use spdk_sys::{ }; use crate::{ - bdev::nexus::{instances, nexus_io::IoType}, + bdev::{lookup_child_from_bdev, nexus::nexus_io::IoType}, core::{ share::{Protocol, Share}, uuid::Uuid, @@ -166,18 +166,9 @@ impl Bdev { match event { spdk_sys::SPDK_BDEV_EVENT_REMOVE => { info!("Received remove event for bdev {}", bdev.name()); - instances().iter_mut().for_each(|n| { - n.children - .iter_mut() - .filter(|c| { - c.bdev.is_some() - && c.bdev.as_ref().unwrap().name() - == bdev.name() - }) - .for_each(|c| { - c.remove(); - }); - }); + if let Some(child) = lookup_child_from_bdev(&bdev.name()) { + child.remove(); + } } spdk_sys::SPDK_BDEV_EVENT_RESIZE => { info!("Received resize event for bdev {}", bdev.name()) From b560dcb5d484ae7b7aa336d2edaf7a2dd5e2b259 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Tue, 24 Nov 2020 11:57:27 +0000 Subject: [PATCH 87/92] Add uninstall test For CI we want to use a single cluster to install and run builds for different commits. So the sequence would be 1. Push the images to the test registry 2. Run the install test 3. Run all other tests 4. Run the uninstall test 5. and maybe restart the test registry --- mayastor-test/e2e/uninstall/uninstall_test.go | 193 ++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 mayastor-test/e2e/uninstall/uninstall_test.go diff --git a/mayastor-test/e2e/uninstall/uninstall_test.go b/mayastor-test/e2e/uninstall/uninstall_test.go new file mode 100644 index 000000000..4b97f3cf3 --- /dev/null +++ b/mayastor-test/e2e/uninstall/uninstall_test.go @@ -0,0 +1,193 @@ +package basic_test + +import ( + "context" + "errors" + "fmt" + "os/exec" + "path" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + coreV1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/deprecated/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var testEnv *envtest.Environment + +/// Examine the nodes in the k8s cluster and return +/// the IP address of the master node (if one exists), +/// The assumption is that the test-registry is accessible via the IP addr of the master, +/// or any node in the cluster if the master noe does not exist +/// TODO Refine how we workout the address of the test-registry +func getRegistryAddress() (string, error) { + var master = "" + nodeList := coreV1.NodeList{} + if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { + return master, errors.New("failed to list nodes") + } + nodeIPs := make([]string, len(nodeList.Items)) + for ix, k8node := range nodeList.Items { + for _, k8Addr := range k8node.Status.Addresses { + if k8Addr.Type == coreV1.NodeInternalIP { + nodeIPs[ix] = k8Addr.Address + for label := range k8node.Labels { + if label == "node-role.kubernetes.io/master" { + master = k8Addr.Address + } + } + } + } + } + + /// TODO Refine how we workout the address of the test-registry + + /// If there is master node, use its IP address as the registry IP address + if len(master) != 0 { + return master, nil + } + + if len(nodeIPs) == 0 { + return "", errors.New("no usable nodes found") + } + + /// Choose the IP address of first node in the list as the registry IP address + return nodeIPs[0], nil +} + +// Encapsulate the logic to find where the deploy yamls are +func getDeployYamlDir() string { + _, filename, _, _ := runtime.Caller(0) + return path.Clean(filename + "/../../../../deploy") +} + +// Helper for passing yaml from the deploy directory to kubectl +func deleteDeployYaml(filename string) { + cmd := exec.Command("kubectl", "delete", "-f", filename) + cmd.Dir = getDeployYamlDir() + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// Encapsulate the logic to find where the templated yamls are +func getTemplateYamlDir() string { + _, filename, _, _ := runtime.Caller(0) + return path.Clean(filename + "/../../install/deploy") +} + +func makeImageName(registryAddress string, registryport string, imagename string, imageversion string) string { + return registryAddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion +} + +func deleteTemplatedYaml(filename string, imagename string, registryAddress string) { + fullimagename := makeImageName(registryAddress, "30291", imagename, "ci") + bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl delete -f -" + cmd := exec.Command("bash", "-c", bashcmd) + cmd.Dir = getTemplateYamlDir() + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// We expect this to fail a few times before it succeeds, +// so no throwing errors from here. +func mayastorReadyPodCount() int { + var mayastorDaemonSet appsv1.DaemonSet + if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "mayastor", Namespace: "mayastor"}, &mayastorDaemonSet) != nil { + return -1 + } + return int(mayastorDaemonSet.Status.CurrentNumberScheduled) +} + +// Teardown mayastor on the cluster under test. +// We deliberately call out to kubectl, rather than constructing the client-go +// objects, so that we can verfiy the local deploy yamls are correct. +func teardownMayastor() { + registryAddress, err := getRegistryAddress() + Expect(err).ToNot(HaveOccurred()) + deleteTemplatedYaml("mayastor-daemonset.yaml.template", "mayastor", registryAddress) + deleteTemplatedYaml("moac-deployment.yaml.template", "moac", registryAddress) + deleteTemplatedYaml("csi-daemonset.yaml.template", "mayastor-csi", registryAddress) + deleteDeployYaml("nats-deployment.yaml") + deleteDeployYaml("mayastorpoolcrd.yaml") + deleteDeployYaml("moac-rbac.yaml") + deleteDeployYaml("storage-class.yaml") + deleteDeployYaml("namespace.yaml") + + // Given the yamls and the environment described in the test readme, + // we expect mayastor to be running on exactly 3 nodes. + Eventually(mayastorReadyPodCount, + "120s", // timeout + "1s", // polling interval + ).Should(Equal(-1)) +} + +func TestTeardownSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Basic Teardown Suite") +} + +var _ = Describe("Mayastor setup", func() { + It("should teardown using yamls", func() { + teardownMayastor() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + useCluster := true + testEnv = &envtest.Environment{ + UseExistingCluster: &useCluster, + AttachControlPlaneOutput: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + go func() { + err = k8sManager.Start(ctrl.SetupSignalHandler()) + Expect(err).ToNot(HaveOccurred()) + }() + + mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer mgrSyncCtxCancel() + if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { + fmt.Println("Failed to sync") + } + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) From 7c8cc9338af273b33687fb92378d223ad26c8f55 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Wed, 25 Nov 2020 09:00:42 +0000 Subject: [PATCH 88/92] Fixes for review 1 --- mayastor-test/e2e/uninstall/uninstall_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mayastor-test/e2e/uninstall/uninstall_test.go b/mayastor-test/e2e/uninstall/uninstall_test.go index 4b97f3cf3..b8e00ac62 100644 --- a/mayastor-test/e2e/uninstall/uninstall_test.go +++ b/mayastor-test/e2e/uninstall/uninstall_test.go @@ -33,7 +33,7 @@ var testEnv *envtest.Environment /// Examine the nodes in the k8s cluster and return /// the IP address of the master node (if one exists), /// The assumption is that the test-registry is accessible via the IP addr of the master, -/// or any node in the cluster if the master noe does not exist +/// or any node in the cluster if the master node does not exist /// TODO Refine how we workout the address of the test-registry func getRegistryAddress() (string, error) { var master = "" @@ -128,8 +128,6 @@ func teardownMayastor() { deleteDeployYaml("storage-class.yaml") deleteDeployYaml("namespace.yaml") - // Given the yamls and the environment described in the test readme, - // we expect mayastor to be running on exactly 3 nodes. Eventually(mayastorReadyPodCount, "120s", // timeout "1s", // polling interval From 6bea972feef04a73abb36e1e9527e306ff5ee2ce Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Tue, 24 Nov 2020 21:36:46 +0100 Subject: [PATCH 89/92] nexus: do not hold lock in reconfigure --- mayastor/src/bdev/nexus/nexus_bdev.rs | 8 +- mayastor/src/bdev/nexus/nexus_channel.rs | 15 ++-- mayastor/src/bdev/nexus/nexus_child.rs | 1 + mayastor/src/bdev/nexus/nexus_io.rs | 5 +- mayastor/src/core/env.rs | 6 +- mayastor/src/core/nvme.rs | 97 ++++++++++++++++++++++-- mayastor/tests/common/compose.rs | 2 +- mayastor/tests/nvmet.rs | 83 +++++++++++++++++--- 8 files changed, 185 insertions(+), 32 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 64dcea3ac..4bddd268e 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -453,7 +453,6 @@ impl Nexus { /// reconfigure the child event handler pub(crate) async fn reconfigure(&mut self, event: DREvent) { - let _var = self.reconfigure_mutex.lock().await; let (s, r) = oneshot::channel::(); info!( @@ -568,7 +567,7 @@ impl Nexus { self.cancel_child_rebuild_jobs(&child.name).await; } - for child in self.children.iter_mut() { + for child in self.children.pop() { info!("Destroying child bdev {}", child.name); if let Err(e) = child.close().await { // TODO: should an error be returned here? @@ -771,6 +770,9 @@ impl Nexus { pub(crate) fn readv(&self, io: &Bio, channels: &mut NexusChannelInner) { // we use RR to read from the children. let child = channels.child_select(); + if child.is_none() { + return; + } // if there is no buffer space for us allocated within the request // allocate it now, taking care of proper alignment @@ -785,7 +787,7 @@ impl Nexus { return; } - let (desc, ch) = channels.readers[child].io_tuple(); + let (desc, ch) = channels.readers[child.unwrap()].io_tuple(); let ret = Self::readv_impl(io.as_ptr(), desc, ch); diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index ed6c6bcbe..ed7d231bc 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -75,14 +75,17 @@ pub enum DREvent { impl NexusChannelInner { /// very simplistic routine to rotate between children for read operations - pub(crate) fn child_select(&mut self) -> usize { - debug_assert!(!self.readers.is_empty()); - if self.previous < self.readers.len() - 1 { - self.previous += 1; + pub(crate) fn child_select(&mut self) -> Option { + if self.readers.is_empty() { + None } else { - self.previous = 0; + if self.previous < self.readers.len() - 1 { + self.previous += 1; + } else { + self.previous = 0; + } + Some(self.previous) } - self.previous } /// refreshing our channels simply means that we either have a child going diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 6df438c0e..60be84744 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -365,6 +365,7 @@ impl NexusChild { /// Note: The descriptor *must* be dropped for the remove to complete. pub(crate) fn remove(&mut self) { info!("Removing child {}", self.name); + dbg!(&self); // The bdev is being removed, so ensure we don't use it again. self.bdev = None; diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index 324d863ea..14f7c674a 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -285,10 +285,11 @@ impl Bio { if current_state == ChildState::Open { warn!( - "core {} thread {:?}, faulting child {}", + "core {} thread {:?}, faulting child {} : {:#?}", Cores::current(), Mthread::current(), - child + child, + NvmeStatus::from(child_io.clone()) ); let name = n.name.clone(); diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 586b0ad27..69405a330 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -40,7 +40,7 @@ use spdk_sys::{ }; use crate::{ - bdev::nexus::nexus_child_status_config::ChildStatusConfig, + bdev::nexus::{instances, nexus_child_status_config::ChildStatusConfig}, core::{ reactor::{Reactor, ReactorState, Reactors}, Cores, @@ -279,7 +279,9 @@ async fn do_shutdown(arg: *mut c_void) { } iscsi::fini(); - + for n in instances() { + n.destroy().await; + } unsafe { spdk_rpc_finish(); spdk_subsystem_fini(Some(reactors_stop), arg); diff --git a/mayastor/src/core/nvme.rs b/mayastor/src/core/nvme.rs index beb16d839..388c3da5e 100644 --- a/mayastor/src/core/nvme.rs +++ b/mayastor/src/core/nvme.rs @@ -1,14 +1,69 @@ -use crate::bdev::Bio; +use crate::{ + bdev::Bio, + core::nvme::StatusCodeType::{ + CommandSpecificStatus, + GenericCommandStatus, + MediaDataIntegrityErrors, + Reserved, + VendorSpecific, + }, +}; use spdk_sys::spdk_bdev_io_get_nvme_status; +#[derive(Debug, Copy, Clone, Eq, PartialOrd, PartialEq)] +pub enum StatusCodeType { + GenericCommandStatus, + CommandSpecificStatus, + MediaDataIntegrityErrors, + Reserved, + VendorSpecific, +} + +impl From for StatusCodeType { + fn from(i: i32) -> Self { + match i { + 0x00 => GenericCommandStatus, + 0x01 => CommandSpecificStatus, + 0x02 => MediaDataIntegrityErrors, + 0x07 => VendorSpecific, + _ => Reserved, + } + } +} + #[derive(Debug, Copy, Clone, Eq, PartialOrd, PartialEq)] pub enum GenericStatusCode { Success, InvalidOpcode, + InvalidOPCode, + InvalidFieldInCommand, + CommandIDConflict, + DataTransferError, + CommandsAbortedDueToPowerLoss, InternalDeviceError, AbortedRequested, - Reserved, AbortedSubmissionQueueDeleted, + AbortedSubmissionFailedFusedCommand, + AbortedSubmissionMissingFusedCommand, + InvalidNameSpaceOrFormat, + CommandSequenceError, + InvalidSGLDescriptor, + InvalidNumberOfSGLDescriptors, + DataSGLLengthInvalid, + MetaDataSGLLengthInvalid, + SGLTypeDescriptorInvalid, + InvalidUseOfControlMemoryBuffer, + PRPOffsetInvalid, + AtomicWriteUnitExceeded, + OperationDenied, + SGLOffsetInvalid, + HostIdentifierInvalidFormat, + KATOExpired, + KATOInvalid, + CommandAbortPreemt, + SanitizeFailed, + SanitizeInProgress, + Reserved, } impl From for GenericStatusCode { @@ -16,9 +71,35 @@ impl From for GenericStatusCode { match i { 0x00 => Self::Success, 0x01 => Self::InvalidOpcode, + 0x01 => Self::InvalidOPCode, + 0x02 => Self::InvalidFieldInCommand, + 0x03 => Self::CommandIDConflict, + 0x04 => Self::DataTransferError, + 0x05 => Self::CommandsAbortedDueToPowerLoss, 0x06 => Self::InternalDeviceError, 0x07 => Self::AbortedRequested, 0x08 => Self::AbortedSubmissionQueueDeleted, + 0x09 => Self::AbortedSubmissionFailedFusedCommand, + 0x0A => Self::AbortedSubmissionMissingFusedCommand, + 0x0B => Self::InvalidNameSpaceOrFormat, + 0x0C => Self::CommandSequenceError, + 0x0D => Self::InvalidSGLDescriptor, + 0x0E => Self::InvalidSGLDescriptor, + 0x0F => Self::DataSGLLengthInvalid, + 0x10 => Self::MetaDataSGLLengthInvalid, + 0x11 => Self::SGLTypeDescriptorInvalid, + 0x12 => Self::InvalidUseOfControlMemoryBuffer, + 0x13 => Self::PRPOffsetInvalid, + 0x14 => Self::AtomicWriteUnitExceeded, + 0x15 => Self::OperationDenied, + 0x16 => Self::SGLOffsetInvalid, + 0x17 => Self::Reserved, + 0x18 => Self::HostIdentifierInvalidFormat, + 0x19 => Self::KATOExpired, + 0x1A => Self::KATOInvalid, + 0x1B => Self::CommandAbortPreemt, + 0x1C => Self::SanitizeFailed, + 0x1D => Self::SanitizeInProgress, _ => { error!("unknown code {}", i); Self::Reserved @@ -32,7 +113,7 @@ pub struct NvmeStatus { /// NVMe completion queue entry cdw0: u32, /// NVMe status code type - sct: i32, + sct: StatusCodeType, /// NVMe status code sc: GenericStatusCode, } @@ -41,7 +122,9 @@ impl NvmeStatus { pub fn status_code(&self) -> GenericStatusCode { self.sc } - // todo make enums + pub fn status_type(&self) -> StatusCodeType { + self.sct + } } impl From for NvmeStatus { @@ -61,7 +144,7 @@ impl From for NvmeStatus { Self { cdw0, - sct, + sct: StatusCodeType::from(sct), sc: GenericStatusCode::from(sc), } } @@ -84,7 +167,7 @@ impl From<&mut Bio> for NvmeStatus { Self { cdw0, - sct, + sct: StatusCodeType::from(sct), sc: GenericStatusCode::from(sc), } } @@ -106,7 +189,7 @@ impl From<&Bio> for NvmeStatus { Self { cdw0, - sct, + sct: StatusCodeType::from(sct), sc: GenericStatusCode::from(sc), } } diff --git a/mayastor/tests/common/compose.rs b/mayastor/tests/common/compose.rs index 33514e6e7..9461872b2 100644 --- a/mayastor/tests/common/compose.rs +++ b/mayastor/tests/common/compose.rs @@ -75,7 +75,7 @@ impl<'a> MayastorTest<'a> { .spawn(move || { MayastorEnvironment::new(args).init(); tx.send(Reactors::master()).unwrap(); - Reactors::master().developer_delayed(); + Reactors::master().running(); Reactors::master().poll_reactor(); }) .unwrap(); diff --git a/mayastor/tests/nvmet.rs b/mayastor/tests/nvmet.rs index cac78a03d..df1f5784c 100644 --- a/mayastor/tests/nvmet.rs +++ b/mayastor/tests/nvmet.rs @@ -1,18 +1,40 @@ use common::compose::MayastorTest; -use mayastor::core::{Bdev, MayastorCliArgs}; +use mayastor::core::{ + io_driver, + mayastor_env_stop, + Bdev, + Cores, + MayastorCliArgs, + SIG_RECEIVED, +}; +static MAYASTOR: OnceCell = OnceCell::new(); -use mayastor::bdev::nexus_create; +use mayastor::{ + bdev::{nexus_create, nexus_lookup}, + core::io_driver::JobQueue, +}; +use once_cell::sync::OnceCell; +use std::{ + sync::{atomic::Ordering, Arc}, + time::Duration, +}; pub mod common; async fn create_nexus() { - nexus_create( - "nexus0", - 250 * 1024 * 1024 * 1024, - None, - &["nvmf://127.0.0.1/replica1".to_string()], - ) - .await - .unwrap(); + let children = (1 ..= 3) + .into_iter() + .map(|i| format!("nvmf://127.0.0.1/replica{}", i)) + .collect::>(); + + nexus_create("nexus0", 250 * 1024 * 1024 * 1024, None, &children) + .await + .unwrap(); +} + +async fn remove_child(index: usize) { + let to_remove = format!("127.0.0.1/replica{}", index); + let nexus = nexus_lookup("nexus0").unwrap(); + nexus.remove_child(&to_remove).await.unwrap() } async fn bdev_info() { @@ -20,7 +42,24 @@ async fn bdev_info() { dbg!(bdev); } -#[ignore] +async fn start_workload(queue: Arc) { + let ms = MAYASTOR.get().unwrap(); + ms.spawn(async move { + for c in Cores::count() { + let bdev = Bdev::lookup_by_name("nexus0").unwrap(); + let job = io_driver::Builder::new() + .core(c) + .bdev(bdev) + .qd(8) + .io_size(512) + .build() + .await; + queue.start(job); + } + }) + .await; +} + #[tokio::test] async fn nvmet_nexus_test() { std::env::set_var("NEXUS_LABEL_IGNORE_ERRORS", "1"); @@ -30,7 +69,29 @@ async fn nvmet_nexus_test() { grpc_endpoint: "0.0.0.0".to_string(), ..Default::default() }); + let ms = MAYASTOR.get_or_init(|| ms); ms.spawn(create_nexus()).await; ms.spawn(bdev_info()).await; + + let queue = Arc::new(JobQueue::new()); + start_workload(Arc::clone(&queue)).await; + + let mut ticker = tokio::time::interval(Duration::from_millis(1000)); + // ctrl was hit so exit the loop here + loop { + if SIG_RECEIVED.load(Ordering::Relaxed) { + break; + } + + ms.spawn(async { + let bdev = Bdev::bdev_first().unwrap(); + println!("{:?}", bdev.stats().await.unwrap()); + }) + .await; + ticker.tick().await; + } + + queue.stop_all().await; + ms.stop().await; } From 04efc8bd46932d1acd17a97df1b79b9521f95074 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 25 Nov 2020 18:06:03 +0100 Subject: [PATCH 90/92] nexus: add retire function --- mayastor/src/bdev/nexus/nexus_bdev.rs | 2 +- mayastor/src/bin/nvmet.rs | 98 +++++++++++++++++++++++++++ mayastor/src/core/io_driver.rs | 4 +- mayastor/src/subsys/config/opts.rs | 3 +- mayastor/tests/nvmet.rs | 33 +++++---- 5 files changed, 124 insertions(+), 16 deletions(-) create mode 100644 mayastor/src/bin/nvmet.rs diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 4bddd268e..bce1d9b96 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -452,7 +452,7 @@ impl Nexus { } /// reconfigure the child event handler - pub(crate) async fn reconfigure(&mut self, event: DREvent) { + pub(crate) async fn reconfigure(&self, event: DREvent) { let (s, r) = oneshot::channel::(); info!( diff --git a/mayastor/src/bin/nvmet.rs b/mayastor/src/bin/nvmet.rs new file mode 100644 index 000000000..7678ca2f1 --- /dev/null +++ b/mayastor/src/bin/nvmet.rs @@ -0,0 +1,98 @@ +use clap::{App, AppSettings, Arg, ArgMatches}; +use futures::FutureExt; +use mayastor::{ + bdev::{nexus_create, nexus_lookup, util::uring}, + core::{MayastorCliArgs, MayastorEnvironment, Reactors, Share}, + grpc, + grpc::endpoint, + logger, + subsys, +}; +use std::path::Path; +mayastor::CPS_INIT!(); +#[macro_use] +extern crate tracing; + +const NEXUS: &str = "nexus-e1e27668-fbe1-4c8a-9108-513f6e44d342"; + +async fn create_nexus(args: &ArgMatches<'_>) { + let ep = args.values_of("endpoint").expect("invalid endpoints"); + + let children = ep + .into_iter() + .map(|v| { + dbg!(v); + format!("nvmf://{}/replica1", v) + }) + .collect::>(); + + nexus_create(NEXUS, 250 * 1024 * 1024 * 1024, Some(NEXUS), &children) + .await + .unwrap(); + + let nexus = nexus_lookup(NEXUS).unwrap(); + nexus.share_nvmf().await.unwrap(); +} + +fn main() -> Result<(), Box> { + std::env::set_var("NEXUS_LABEL_IGNORE_ERRORS", "1"); + std::env::set_var("MY_POD_IP", "192.168.1.4"); + + let matches = App::new("NVMeT CLI") + .version("0.1") + .settings(&[ + AppSettings::ColoredHelp, + AppSettings::ColorAlways]) + .about("NVMe test utility to quickly create a nexus over existing nvme targets") + .arg( + Arg::with_name("replica-index") + .short("n") + .long("replica-index") + .default_value("1") + .help("index of the NQN to connect to e.g 1 for replica1")) + .arg( + Arg::with_name("endpoint") + .short("e") + .min_values(1) + .long("endpoint") + .help("endpoints to connect to")) + .get_matches(); + + let mut margs = MayastorCliArgs::default(); + + margs.rpc_address = "0.0.0.0:10124".to_string(); + + logger::init("INFO"); + + let mut rt = tokio::runtime::Builder::new() + .basic_scheduler() + .enable_all() + .build() + .unwrap(); + + let grpc_endpoint = grpc::endpoint(margs.grpc_endpoint.clone()); + let rpc_address = margs.rpc_address.clone(); + + let ms = rt.enter(|| MayastorEnvironment::new(margs).init()); + let master = Reactors::master(); + + master.send_future(async { info!("NVMeT started {} ...", '\u{1F680}') }); + master.send_future(async move { + create_nexus(&matches).await; + }); + + let mut futures = Vec::new(); + + futures.push(master.boxed_local()); + futures.push(subsys::Registration::run().boxed_local()); + futures.push( + grpc::MayastorGrpcServer::run(grpc_endpoint, rpc_address).boxed_local(), + ); + + rt.block_on(futures::future::try_join_all(futures)) + .expect_err("reactor exit in abnormal state"); + + ms.fini(); + + Ok(()) +} diff --git a/mayastor/src/core/io_driver.rs b/mayastor/src/core/io_driver.rs index 14ce02a4e..6530a05b9 100644 --- a/mayastor/src/core/io_driver.rs +++ b/mayastor/src/core/io_driver.rs @@ -16,7 +16,7 @@ use crate::{ nexus_uri::bdev_create, }; -#[derive(Debug)] +#[derive(Debug, Copy, Clone)] pub enum IoType { /// perform random read operations READ, @@ -324,7 +324,7 @@ impl Builder { queue.push(Io { buf: DmaBuf::new(self.io_size as u64, bdev.alignment()) .unwrap(), - iot: IoType::READ, + iot: self.iot, offset, job: NonNull::dangling(), }); diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index 29b400283..c82d0f04e 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -17,6 +17,7 @@ use spdk_sys::{ spdk_nvmf_target_opts, spdk_nvmf_transport_opts, SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, + SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET, }; use crate::bdev::ActionType; @@ -255,7 +256,7 @@ impl Default for NvmeBdevOpts { low_priority_weight: 0, medium_priority_weight: 0, high_priority_weight: 0, - nvme_adminq_poll_period_us: 100, + nvme_adminq_poll_period_us: 0, nvme_ioq_poll_period_us: 0, io_queue_requests: 0, delay_cmd_submit: true, diff --git a/mayastor/tests/nvmet.rs b/mayastor/tests/nvmet.rs index df1f5784c..a13e962e9 100644 --- a/mayastor/tests/nvmet.rs +++ b/mayastor/tests/nvmet.rs @@ -5,13 +5,14 @@ use mayastor::core::{ Bdev, Cores, MayastorCliArgs, + Share, SIG_RECEIVED, }; static MAYASTOR: OnceCell = OnceCell::new(); use mayastor::{ bdev::{nexus_create, nexus_lookup}, - core::io_driver::JobQueue, + core::io_driver::{IoType, JobQueue}, }; use once_cell::sync::OnceCell; use std::{ @@ -26,14 +27,22 @@ async fn create_nexus() { .map(|i| format!("nvmf://127.0.0.1/replica{}", i)) .collect::>(); - nexus_create("nexus0", 250 * 1024 * 1024 * 1024, None, &children) - .await - .unwrap(); + nexus_create( + "e1e27668-fbe1-4c8a-9108-513f6e44d342", + 250 * 1024 * 1024 * 1024, + None, + &children, + ) + .await + .unwrap(); + + let nexus = nexus_lookup("e1e27668-fbe1-4c8a-9108-513f6e44d342").unwrap(); + nexus.share_nvmf().await; } async fn remove_child(index: usize) { let to_remove = format!("127.0.0.1/replica{}", index); - let nexus = nexus_lookup("nexus0").unwrap(); + let nexus = nexus_lookup("e1e27668-fbe1-4c8a-9108-513f6e44d342").unwrap(); nexus.remove_child(&to_remove).await.unwrap() } @@ -46,11 +55,14 @@ async fn start_workload(queue: Arc) { let ms = MAYASTOR.get().unwrap(); ms.spawn(async move { for c in Cores::count() { - let bdev = Bdev::lookup_by_name("nexus0").unwrap(); + let bdev = + Bdev::lookup_by_name("e1e27668-fbe1-4c8a-9108-513f6e44d342") + .unwrap(); let job = io_driver::Builder::new() .core(c) + .rw(IoType::WRITE) .bdev(bdev) - .qd(8) + .qd(64) .io_size(512) .build() .await; @@ -63,8 +75,9 @@ async fn start_workload(queue: Arc) { #[tokio::test] async fn nvmet_nexus_test() { std::env::set_var("NEXUS_LABEL_IGNORE_ERRORS", "1"); + std::env::set_var("MY_POD_IP", "192.168.1.4"); let ms = MayastorTest::new(MayastorCliArgs { - reactor_mask: 0x3.to_string(), + reactor_mask: 0xA.to_string(), no_pci: true, grpc_endpoint: "0.0.0.0".to_string(), ..Default::default() @@ -74,9 +87,6 @@ async fn nvmet_nexus_test() { ms.spawn(create_nexus()).await; ms.spawn(bdev_info()).await; - let queue = Arc::new(JobQueue::new()); - start_workload(Arc::clone(&queue)).await; - let mut ticker = tokio::time::interval(Duration::from_millis(1000)); // ctrl was hit so exit the loop here loop { @@ -92,6 +102,5 @@ async fn nvmet_nexus_test() { ticker.tick().await; } - queue.stop_all().await; ms.stop().await; } From d1ece72940e599cfad58392c37948155e5294577 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Fri, 27 Nov 2020 10:39:59 +0100 Subject: [PATCH 91/92] update dependencies and fix clippies --- Cargo.lock | 394 +++++++++++------- mayastor/src/bdev/nexus/nexus_bdev.rs | 15 +- mayastor/src/bdev/nexus/nexus_channel.rs | 10 +- mayastor/src/bdev/nexus/nexus_child.rs | 35 +- .../src/bdev/nexus/nexus_child_error_store.rs | 9 +- mayastor/src/bdev/nexus/nexus_io.rs | 144 +++---- mayastor/src/bdev/nexus/nexus_nbd.rs | 6 +- mayastor/src/bin/nvmet.rs | 75 ++-- mayastor/src/core/env.rs | 11 +- mayastor/src/core/nvme.rs | 1 - mayastor/src/subsys/config/opts.rs | 1 - mayastor/tests/bdev_test.rs | 3 +- mayastor/tests/common/mod.rs | 10 +- mayastor/tests/io_job.rs | 22 +- mayastor/tests/iscsi_tgt.rs | 7 +- mayastor/tests/lock_lba_range.rs | 2 +- mayastor/tests/lvs_pool.rs | 6 +- mayastor/tests/nexus_rebuild.rs | 27 +- mayastor/tests/nexus_share.rs | 6 +- mayastor/tests/nvmet.rs | 106 ----- mayastor/tests/nvmf.rs | 6 +- mayastor/tests/reactor.rs | 7 +- mayastor/tests/remove_child.rs | 2 - nix/lib/rust.nix | 2 +- nix/pkgs/mayastor/default.nix | 2 +- nix/sources.json | 18 +- 26 files changed, 442 insertions(+), 485 deletions(-) delete mode 100644 mayastor/tests/nvmet.rs diff --git a/Cargo.lock b/Cargo.lock index 2837d18c9..5457418af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "actix-http" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "404df68c297f73b8d36c9c9056404913d25905a8f80127b0e5fe147c9c4b9f02" +checksum = "452299e87817ae5673910e53c243484ca38be3828db819b6011736fc6982e874" dependencies = [ "actix-codec", "actix-connect", @@ -85,15 +85,15 @@ dependencies = [ "log", "mime", "percent-encoding 2.1.0", - "pin-project 1.0.1", + "pin-project 1.0.2", "rand 0.7.3", "regex", "serde", "serde_json", - "serde_urlencoded", + "serde_urlencoded 0.7.0", "sha-1", "slab", - "time 0.2.22", + "time 0.2.23", ] [[package]] @@ -103,7 +103,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a60f9ba7c4e6df97f3aacb14bb5c0cd7d98a49dcbaed0d7f292912ad9a6a3ed2" dependencies = [ "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -231,9 +231,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88344b7a5ef27e5e09e73565379f69273dd3e2d29e82afc381b84d170d0a5631" +checksum = "3a89a7b133e734f6d1e555502d450408ae04105826aef7e3605019747d3ac732" dependencies = [ "actix-codec", "actix-http", @@ -257,15 +257,15 @@ dependencies = [ "fxhash", "log", "mime", - "pin-project 1.0.1", + "pin-project 1.0.2", "regex", "rustls", "serde", "serde_json", - "serde_urlencoded", + "serde_urlencoded 0.7.0", "socket2", - "time 0.2.22", - "tinyvec 1.0.1", + "time 0.2.23", + "tinyvec", "url", ] @@ -277,7 +277,7 @@ checksum = "ad26f77093333e0e7c6ffe54ebe3582d908a104e448723eec6d43d08b07143fb" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -382,14 +382,14 @@ dependencies = [ "flate2", "futures-core", "memchr", - "pin-project-lite", + "pin-project-lite 0.1.11", ] [[package]] name = "async-executor" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801" +checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" dependencies = [ "async-task", "concurrent-queue", @@ -412,9 +412,9 @@ dependencies = [ [[package]] name = "async-io" -version = "1.1.10" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54bc4c1c7292475efb2253227dbcfad8fe1ca4c02bc62c510cc2f3da5c4704e" +checksum = "40a0b2bb8ae20fede194e779150fe283f65a4a08461b496de546ec366b174ad9" dependencies = [ "concurrent-queue", "fastrand", @@ -478,9 +478,9 @@ dependencies = [ [[package]] name = "async-rustls" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c238bd34d425674d8003b8d674cc04baf74e1b71802f3c62451e3bf86f2858ef" +checksum = "7f38092e8f467f47aadaff680903c7cbfeee7926b058d7f40af2dd4c878fbdee" dependencies = [ "futures-lite", "rustls", @@ -505,7 +505,7 @@ checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -516,13 +516,13 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.41" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -556,9 +556,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "awc" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "425980a1e58e5030a3e4b065a3d577c8f0e16142ea9d81f30614eae810c98577" +checksum = "e9056f5e27b0d56bedd82f78eceaba0bcddcbbcbbefae3cd0a53994b28c96ff5" dependencies = [ "actix-codec", "actix-http", @@ -576,14 +576,14 @@ dependencies = [ "rustls", "serde", "serde_json", - "serde_urlencoded", + "serde_urlencoded 0.7.0", ] [[package]] name = "backtrace" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2baad346b2d4e94a24347adeee9c7a93f412ee94b9cc26e5b59dea23848e9f28" +checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -622,11 +622,11 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "base64-url" -version = "1.4.7" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9956620977549f836bcfa3ab63551fe9a021df799437a403629bd991aa8d92ff" +checksum = "c8a7558a139be0909d407d70873248681e70bac73595c3ded9dba98a625c8acb" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", ] [[package]] @@ -748,7 +748,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "serde_urlencoded", + "serde_urlencoded 0.6.1", "thiserror", "tokio", "tokio-util 0.3.1", @@ -851,9 +851,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cc" -version = "1.0.61" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" +checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15" [[package]] name = "cexpr" @@ -969,11 +969,21 @@ dependencies = [ "cache-padded", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" +dependencies = [ + "cfg-if 0.1.10", + "wasm-bindgen", +] + [[package]] name = "const-random" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dc82c12dc2ee6e1ded861cf7d582b46f66f796d1b6c93fa28b911ead95da02" +checksum = "486d435a7351580347279f374cb8a3c16937485441db80181357b7c4d70f17ed" dependencies = [ "const-random-macro", "proc-macro-hack", @@ -981,12 +991,14 @@ dependencies = [ [[package]] name = "const-random-macro" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc757bbb9544aa296c2ae00c679e81f886b37e28e59097defe0cf524306f6685" +checksum = "49a84d8ff70e3ec52311109b019c27672b4c1929e4cf7c18bcf0cd9fb5e230be" dependencies = [ "getrandom 0.2.0", + "lazy_static", "proc-macro-hack", + "tiny-keccak", ] [[package]] @@ -1008,7 +1020,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784ad0fbab4f3e9cef09f20e0aea6000ae08d2cb98ac4c0abc53df18803d702f" dependencies = [ "percent-encoding 2.1.0", - "time 0.2.22", + "time 0.2.23", "version_check", ] @@ -1085,7 +1097,7 @@ dependencies = [ "crossbeam-deque", "crossbeam-epoch", "crossbeam-queue", - "crossbeam-utils", + "crossbeam-utils 0.7.2", ] [[package]] @@ -1094,7 +1106,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" dependencies = [ - "crossbeam-utils", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -1105,7 +1117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -1117,7 +1129,7 @@ checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg 1.0.1", "cfg-if 0.1.10", - "crossbeam-utils", + "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", "memoffset", @@ -1131,7 +1143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ "cfg-if 0.1.10", - "crossbeam-utils", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -1152,6 +1164,23 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "crossbeam-utils" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +dependencies = [ + "autocfg 1.0.1", + "cfg-if 1.0.0", + "lazy_static", +] + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "csi" version = "0.2.0" @@ -1266,7 +1295,7 @@ dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", "strsim 0.9.3", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1288,7 +1317,7 @@ checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1316,7 +1345,7 @@ checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1352,7 +1381,7 @@ checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1442,7 +1471,7 @@ checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1498,7 +1527,7 @@ dependencies = [ "heck", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1535,7 +1564,7 @@ dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", "rustversion", - "syn 1.0.48", + "syn 1.0.51", "synstructure", ] @@ -1584,7 +1613,7 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", "synstructure", ] @@ -1737,7 +1766,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite", + "pin-project-lite 0.1.11", "waker-fn", ] @@ -1750,7 +1779,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -1787,7 +1816,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.1", + "pin-project 1.0.2", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1879,7 +1908,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -2033,7 +2062,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.1", + "pin-project 1.0.2", "socket2", "tokio", "tower-service", @@ -2115,9 +2144,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ "cfg-if 1.0.0", ] @@ -2294,7 +2323,7 @@ dependencies = [ "serde_yaml", "static_assertions", "thiserror", - "time 0.2.22", + "time 0.2.23", "tokio", "url", ] @@ -2309,7 +2338,7 @@ dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", "serde_json", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -2382,9 +2411,9 @@ checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" [[package]] name = "lock_api" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" dependencies = [ "scopeguard", ] @@ -2613,7 +2642,7 @@ checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", - "miow 0.3.5", + "miow 0.3.6", "winapi 0.3.9", ] @@ -2642,9 +2671,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", "winapi 0.3.9", @@ -2658,9 +2687,9 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "native-tls" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a1cda389c26d6b88f3d2dc38aa1b750fe87d298cc5d795ec9e975f402f00372" +checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" dependencies = [ "lazy_static", "libc", @@ -2676,9 +2705,9 @@ dependencies = [ [[package]] name = "nats" -version = "0.8.2" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8512d6f66c58eaa2e7785412b50bc1f6ecded27c01b175b4eb18a3f41beb2c47" +checksum = "14b716f15b711daea70d5da9195f5c10063d2a14d74b8dba256f8eb6d45d8b29" dependencies = [ "async-channel", "async-executor", @@ -2686,6 +2715,7 @@ dependencies = [ "async-lock", "async-net", "async-rustls", + "base64 0.13.0", "base64-url", "fastrand", "futures-lite", @@ -2696,7 +2726,7 @@ dependencies = [ "nuid", "once_cell", "regex", - "webpki-roots", + "rustls-native-certs", ] [[package]] @@ -2844,9 +2874,9 @@ checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" [[package]] name = "once_cell" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "opaque-debug" @@ -2929,9 +2959,9 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", "lock_api", @@ -2969,11 +2999,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59698ea79df9bf77104aefd39cc3ec990cb9693fb59c3b0a70ddf2646fdffb4b" +checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "once_cell", "regex", ] @@ -3011,11 +3041,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" dependencies = [ - "pin-project-internal 1.0.1", + "pin-project-internal 1.0.2", ] [[package]] @@ -3026,18 +3056,18 @@ checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] name = "pin-project-internal" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -3046,6 +3076,12 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" +[[package]] +name = "pin-project-lite" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" + [[package]] name = "pin-utils" version = "0.1.0" @@ -3086,7 +3122,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", "version_check", ] @@ -3179,7 +3215,7 @@ dependencies = [ "itertools 0.8.2", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -3444,12 +3480,12 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e" +checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" dependencies = [ "async-compression", - "base64 0.12.3", + "base64 0.13.0", "bytes 0.5.6", "encoding_rs", "futures-core", @@ -3466,24 +3502,25 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding 2.1.0", - "pin-project-lite", + "pin-project-lite 0.2.0", "serde", "serde_json", - "serde_urlencoded", + "serde_urlencoded 0.7.0", "tokio", "tokio-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] [[package]] name = "resolv-conf" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11834e137f3b14e309437a8276714eed3a80d1ef894869e510f2c0c0b98b9f4a" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", "quick-error", @@ -3515,9 +3552,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.15" +version = "0.16.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +checksum = "70017ed5c555d79ee3538fc63ca09c70ad8f317dcadc1adc2c496b60c22bb24f" dependencies = [ "cc", "libc", @@ -3554,14 +3591,14 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", + "crossbeam-utils 0.8.1", ] [[package]] @@ -3742,7 +3779,7 @@ checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -3769,11 +3806,23 @@ dependencies = [ "url", ] +[[package]] +name = "serde_urlencoded" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "serde_with" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bac272128fb3b1e98872dca27a05c18d8b78b9bd089d3edb7b5871501b50bce" +checksum = "15f6201e064705553ece353a736a64be975680bd244908cf63e8fa71e478a51a" dependencies = [ "serde", "serde_with_macros", @@ -3781,14 +3830,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c747a9ab2e833b807f74f6b6141530655010bfa9c9c06d5508bce75c8f8072f" +checksum = "1197ff7de45494f290c1e3e1a6f80e108974681984c87a3e480991ef3d0f1950" dependencies = [ "darling 0.10.2", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -3921,9 +3970,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" +checksum = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85" [[package]] name = "smol" @@ -3963,16 +4012,16 @@ checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] name = "socket2" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44" +checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "redox_syscall", "winapi 0.3.9", @@ -3994,9 +4043,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" +checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" dependencies = [ "version_check", ] @@ -4037,7 +4086,7 @@ dependencies = [ "quote 1.0.7", "serde", "serde_derive", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4053,7 +4102,7 @@ dependencies = [ "serde_derive", "serde_json", "sha1", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4101,7 +4150,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4119,7 +4168,7 @@ dependencies = [ "heck", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4161,9 +4210,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.48" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" +checksum = "3b4f34193997d92804d359ed09953e25d5138df6bcc055a71bf68ee89fdf9223" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", @@ -4187,7 +4236,7 @@ checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", "unicode-xid 0.2.1", ] @@ -4222,9 +4271,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] @@ -4255,7 +4304,7 @@ checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4289,9 +4338,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b7151c9065e80917fbf285d9a5d1432f60db41d170ccafc749a136b41a93af" +checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" dependencies = [ "const_fn", "libc", @@ -4322,20 +4371,23 @@ dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", "standback", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] -name = "tinyvec" -version = "0.3.4" +name = "tiny-keccak" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] [[package]] name = "tinyvec" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b78a366903f506d2ad52ca8dc552102ffdd3e937ba8a227f024dc1d1eae28575" +checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" dependencies = [ "tinyvec_macros", ] @@ -4348,9 +4400,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" +checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" dependencies = [ "bytes 0.5.6", "fnv", @@ -4363,7 +4415,7 @@ dependencies = [ "mio-named-pipes", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.11", "signal-hook-registry", "slab", "tokio-macros", @@ -4372,13 +4424,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4413,7 +4465,7 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite", + "pin-project-lite 0.1.11", "tokio", ] @@ -4427,7 +4479,7 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite", + "pin-project-lite 0.1.11", "tokio", ] @@ -4470,7 +4522,7 @@ dependencies = [ "proc-macro2 1.0.24", "prost-build", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4653,13 +4705,13 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "log", - "pin-project-lite", + "pin-project-lite 0.2.0", "tracing-attributes", "tracing-core", ] @@ -4672,7 +4724,7 @@ checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", ] [[package]] @@ -4739,9 +4791,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd7061ba6f4d4d9721afedffbfd403f20f39a4301fee1b70d6fcd09cca69f28" +checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" dependencies = [ "async-trait", "backtrace", @@ -4759,9 +4811,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f23cdfdc3d8300b3c50c9e84302d3bd6d860fb9529af84ace6cf9665f181b77" +checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" dependencies = [ "backtrace", "cfg-if 0.1.10", @@ -4819,18 +4871,18 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" dependencies = [ - "tinyvec 0.3.4", + "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-width" @@ -4974,7 +5026,7 @@ dependencies = [ "log", "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", "wasm-bindgen-shared", ] @@ -5008,7 +5060,7 @@ checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5019,6 +5071,30 @@ version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" +[[package]] +name = "wasm-bindgen-test" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34d1cdc8b98a557f24733d50a1199c4b0635e465eecba9c45b214544da197f64" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8fb9c67be7439ee8ab1b7db502a49c05e51e2835b66796c705134d9b8e1a585" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", +] + [[package]] name = "web-sys" version = "0.3.45" @@ -5170,6 +5246,6 @@ checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.48", + "syn 1.0.51", "synstructure", ] diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index bce1d9b96..7bcfbf308 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -10,7 +10,6 @@ use std::{ os::raw::c_void, }; -use async_mutex::Mutex; use futures::channel::oneshot; use nix::errno::Errno; use serde::Serialize; @@ -324,8 +323,6 @@ pub struct Nexus { pub nexus_target: Option, /// the maximum number of times to attempt to send an IO pub(crate) max_io_attempts: i32, - /// mutex to serialise reconfigure - reconfigure_mutex: Mutex<()>, } unsafe impl core::marker::Sync for Nexus {} @@ -418,7 +415,6 @@ impl Nexus { size, nexus_target: None, max_io_attempts: cfg.err_store_opts.max_io_attempts, - reconfigure_mutex: Mutex::new(()), }); n.bdev.set_uuid(match uuid { @@ -542,6 +538,7 @@ impl Nexus { /// Destroy the nexus pub async fn destroy(&mut self) -> Result<(), Error> { + info!("Destroying nexus {}", self.name); // used to synchronize the destroy call extern "C" fn nexus_destroy_cb(arg: *mut c_void, rc: i32) { let s = unsafe { Box::from_raw(arg as *mut oneshot::Sender) }; @@ -567,7 +564,7 @@ impl Nexus { self.cancel_child_rebuild_jobs(&child.name).await; } - for child in self.children.pop() { + for child in self.children.iter_mut() { info!("Destroying child bdev {}", child.name); if let Err(e) = child.close().await { // TODO: should an error be returned here? @@ -579,8 +576,6 @@ impl Nexus { } } - info!("Destroying nexus {}", self.name); - let (s, r) = oneshot::channel::(); unsafe { @@ -711,7 +706,7 @@ impl Nexus { pio ); - pio.ctx_as_mut_ref().status = IoStatus::Failed.into(); + pio.ctx_as_mut_ref().status = IoStatus::Failed; } pio.assess(&mut chio, success); // always free the child IO @@ -724,7 +719,7 @@ impl Nexus { let pio_ctx = pio.ctx_as_mut_ref(); if !success { - pio_ctx.status = IoStatus::Failed.into(); + pio_ctx.status = IoStatus::Failed; } // As there is no child IO, perform the IO accounting that Bio::assess @@ -733,7 +728,7 @@ impl Nexus { debug_assert!(pio_ctx.in_flight >= 0); if pio_ctx.in_flight == 0 { - if IoStatus::from(pio_ctx.status) == IoStatus::Failed { + if pio_ctx.status == IoStatus::Failed { pio_ctx.io_attempts -= 1; if pio_ctx.io_attempts == 0 { pio.fail(); diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index ed7d231bc..a1b56e2ea 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -14,7 +14,7 @@ use spdk_sys::{ use crate::{ bdev::{nexus::nexus_child::ChildState, Nexus}, - core::BdevHandle, + core::{BdevHandle, Mthread}, }; use futures::channel::oneshot; use std::ptr::NonNull; @@ -75,6 +75,10 @@ pub enum DREvent { impl NexusChannelInner { /// very simplistic routine to rotate between children for read operations + /// note that the channels can be None during a reconfigure; this is usually + /// not the case but a side effect of using the async. As we poll + /// threads more often depending on what core we are on etc, we might be + /// "awaiting' while the thread is already trying to submit IO. pub(crate) fn child_select(&mut self) -> Option { if self.readers.is_empty() { None @@ -96,9 +100,9 @@ impl NexusChannelInner { pub(crate) fn refresh(&mut self) { let nexus = unsafe { Nexus::from_raw(self.device) }; info!( - "{}(tid:{:?}), refreshing IO channels", + "{}(thread:{:?}), refreshing IO channels", nexus.name, - std::thread::current().name().unwrap_or("none") + Mthread::current().unwrap().name(), ); trace!( diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 60be84744..d4389635a 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -365,31 +365,36 @@ impl NexusChild { /// Note: The descriptor *must* be dropped for the remove to complete. pub(crate) fn remove(&mut self) { info!("Removing child {}", self.name); - dbg!(&self); // The bdev is being removed, so ensure we don't use it again. self.bdev = None; - match self.state.load() { - ChildState::Open | Faulted(Reason::OutOfSync) => { + let state = self.state(); + + match state { + ChildState::Open + | Faulted(Reason::OutOfSync) + | Faulted(Reason::IoError) => { // Change the state of the child to ensure it is taken out of // the I/O path when the nexus is reconfigured. self.set_state(ChildState::Closed) } - ChildState::Init - | ChildState::ConfigInvalid - | ChildState::Closed - | Faulted(_) => {} + // leave the state into whatever we found it as + _ => {} } - // Remove the child from the I/O path. - let nexus_name = self.parent.clone(); - Reactor::block_on(async move { - match nexus_lookup(&nexus_name) { - Some(n) => n.reconfigure(DREvent::ChildRemove).await, - None => error!("Nexus {} not found", nexus_name), - } - }); + // Remove the child from the I/O path. If we had an IO error the bdev, + // the channels where already reconfigured so we dont have to do + // that twice. + if state != ChildState::Faulted(Reason::IoError) { + let nexus_name = self.parent.clone(); + Reactor::block_on(async move { + match nexus_lookup(&nexus_name) { + Some(n) => n.reconfigure(DREvent::ChildRemove).await, + None => error!("Nexus {} not found", nexus_name), + } + }); + } // Dropping the last descriptor results in the bdev being removed. // This must be performed in this function. diff --git a/mayastor/src/bdev/nexus/nexus_child_error_store.rs b/mayastor/src/bdev/nexus/nexus_child_error_store.rs index a00ef43f5..539fdd6dc 100644 --- a/mayastor/src/bdev/nexus/nexus_child_error_store.rs +++ b/mayastor/src/bdev/nexus/nexus_child_error_store.rs @@ -193,7 +193,7 @@ impl NexusErrStore { } } - fn error_fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + fn error_fmt(&self, f: &mut Formatter<'_>) { let mut idx = self.next_record_index; write!(f, "\nErrors ({}):", self.no_of_records) .expect("invalid format"); @@ -215,19 +215,20 @@ impl NexusErrStore { ) .expect("invalid format"); } - Ok(()) } } impl Debug for NexusErrStore { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - self.error_fmt(f) + self.error_fmt(f); + Ok(()) } } impl Display for NexusErrStore { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - self.error_fmt(f) + self.error_fmt(f); + Ok(()) } } diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index 14f7c674a..fdccbd172 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -36,11 +36,19 @@ pub struct NioCtx { /// read consistency pub(crate) in_flight: i8, /// status of the IO - pub(crate) status: i32, + pub(crate) status: IoStatus, /// attempts left pub(crate) io_attempts: i32, } +impl NioCtx { + #[inline] + pub fn dec(&mut self) { + self.in_flight -= 1; + debug_assert!(self.in_flight >= 0); + } +} + /// BIO is a wrapper to provides a "less unsafe" wrappers around raw /// pointers only proper scenario testing and QA cycles can determine if this /// code is good @@ -231,7 +239,7 @@ impl Bio { /// reset the ctx fields of an spdk_bdev_io to submit or resubmit an IO pub fn reset(&mut self, in_flight: usize) { self.ctx_as_mut_ref().in_flight = in_flight as i8; - self.ctx_as_mut_ref().status = IoStatus::Success.into(); + self.ctx_as_mut_ref().status = IoStatus::Success; } /// complete an IO for the nexus. In the IO completion routine in @@ -241,7 +249,7 @@ impl Bio { pub(crate) fn ok(&mut self) { if cfg!(debug_assertions) { // have a child IO that has failed - if self.ctx_as_mut_ref().status < 0 { + if self.ctx_as_mut_ref().status != IoStatus::Success { debug!("BIO for nexus {} failed", self.nexus_as_ref().name) } // we are marking the IO done but not all child IOs have returned, @@ -262,78 +270,11 @@ impl Bio { } } - /// assess non-success IO, out of the hot path - #[inline(never)] - fn assess_non_success(&mut self, child_io: &mut Bio) { - // note although this is not the hot path, with a sufficiently high - // queue depth it can turn whitehot rather quickly - let nvme_status = NvmeStatus::from(child_io.clone()); - // invalid NVMe opcodes are not sufficient to fault a child - // currently only tests send those - if nvme_status.status_code() == GenericStatusCode::InvalidOpcode { - return; - } - error!("{:#?}", nvme_status); - let child = child_io.bdev_as_ref(); - let n = self.nexus_as_ref(); - - if let Some(child) = n.child_lookup(&child.name()) { - let current_state = child.state.compare_and_swap( - ChildState::Open, - ChildState::Faulted(Reason::IoError), - ); - - if current_state == ChildState::Open { - warn!( - "core {} thread {:?}, faulting child {} : {:#?}", - Cores::current(), - Mthread::current(), - child, - NvmeStatus::from(child_io.clone()) - ); - - let name = n.name.clone(); - let uri = child.name.clone(); - - let fut = async move { - if let Some(nexus) = nexus_lookup(&name) { - nexus.pause().await.unwrap(); - nexus.reconfigure(DREvent::ChildFault).await; - bdev_destroy(&uri).await.unwrap(); - if nexus.status() != NexusStatus::Faulted { - nexus.resume().await.unwrap(); - } else { - error!(":{} has no children left... ", nexus); - } - } - }; - - Reactors::master().send_future(fut); - } - } else { - debug!("core {} thread {:?}, not faulting child {} as its already being removed", - Cores::current(), Mthread::current(), child); - } - } - - /// assess the IO if we need to mark it failed or ok. #[inline] - pub(crate) fn assess(&mut self, child_io: &mut Bio, success: bool) { - { - let pio_ctx = self.ctx_as_mut_ref(); - pio_ctx.in_flight -= 1; - - debug_assert!(pio_ctx.in_flight >= 0); - } - - if !success { - self.assess_non_success(child_io); - } - + pub(crate) fn complete(&mut self) { let pio_ctx = self.ctx_as_mut_ref(); - if pio_ctx.in_flight == 0 { - if IoStatus::from(pio_ctx.status) == IoStatus::Failed { + if pio_ctx.status == IoStatus::Failed { pio_ctx.io_attempts -= 1; if pio_ctx.io_attempts > 0 { NexusFnTable::io_submit_or_resubmit( @@ -349,6 +290,65 @@ impl Bio { } } + /// assess the IO if we need to mark it failed or ok. + #[inline] + pub(crate) fn assess(&mut self, child_io: &mut Bio, success: bool) { + self.ctx_as_mut_ref().dec(); + + if !success { + // currently, only tests send those but invalid op codes should not + // result into faulting a child device. + if NvmeStatus::from(child_io.clone()).status_code() + == GenericStatusCode::InvalidOpcode + { + self.complete(); + return; + } + + // all other status codes indicate a fatal error + Reactors::master().send_future(Self::child_retire( + self.nexus_as_ref().name.clone(), + child_io.bdev_as_ref(), + )); + } + + self.complete(); + } + + async fn child_retire(nexus: String, child: Bdev) { + error!("{:#?}", child); + + if let Some(nexus) = nexus_lookup(&nexus) { + if let Some(child) = nexus.child_lookup(&child.name()) { + let current_state = child.state.compare_and_swap( + ChildState::Open, + ChildState::Faulted(Reason::IoError), + ); + + if current_state == ChildState::Open { + warn!( + "core {} thread {:?}, faulting child {}", + Cores::current(), + Mthread::current(), + child, + ); + + let uri = child.name.clone(); + nexus.pause().await.unwrap(); + nexus.reconfigure(DREvent::ChildFault).await; + //nexus.remove_child(&uri).await.unwrap(); + bdev_destroy(&uri).await.unwrap(); + if nexus.status() != NexusStatus::Faulted { + nexus.resume().await.unwrap(); + } else { + error!(":{} has no children left... ", nexus); + } + } + } + } else { + debug!("{} does not belong (anymore) to nexus {}", child, nexus); + } + } /// obtain the Nexus struct embedded within the bdev pub(crate) fn nexus_as_ref(&self) -> &Nexus { let b = self.bdev_as_ref(); diff --git a/mayastor/src/bdev/nexus/nexus_nbd.rs b/mayastor/src/bdev/nexus/nexus_nbd.rs index ebc924ef0..04a8b552e 100644 --- a/mayastor/src/bdev/nexus/nexus_nbd.rs +++ b/mayastor/src/bdev/nexus/nexus_nbd.rs @@ -51,7 +51,7 @@ extern "C" { /// perspective. This is somewhat annoying, but what makes matters worse is that /// if we are running the device creation path, on the same core that is /// handling the IO, we get into a state where we make no forward progress. -pub(crate) fn wait_until_ready(path: &str) -> Result<(), ()> { +pub(crate) fn wait_until_ready(path: &str) { let started = Arc::new(AtomicBool::new(false)); let tpath = String::from(path); @@ -112,8 +112,6 @@ pub(crate) fn wait_until_ready(path: &str) -> Result<(), ()> { while !started.load(SeqCst) { Reactors::current().poll_once(); } - - Ok(()) } /// Return first unused nbd device in /dev. @@ -223,7 +221,7 @@ impl NbdDisk { // we wait for the dev to come up online because // otherwise the mount done too early would fail. // If it times out, continue anyway and let the mount fail. - wait_until_ready(&device_path).unwrap(); + wait_until_ready(&device_path); info!("Started nbd disk {} for {}", device_path, bdev_name); Ok(Self { diff --git a/mayastor/src/bin/nvmet.rs b/mayastor/src/bin/nvmet.rs index 7678ca2f1..ccb78354e 100644 --- a/mayastor/src/bin/nvmet.rs +++ b/mayastor/src/bin/nvmet.rs @@ -1,14 +1,24 @@ +//! +//! This utility assists in testing nexus behaviour by simply allow one to +//! start it against well-known targets. Example usage is: +//! +//! ```bash +//! nvmet -u nvmf://10.1.0.101/replica1 \ +//! nvmf://10.1.0.102/replica1 \ +//! nvmf://10.1.0.103/replica1 +//! ``` +//! This will start a nexus which is shared over MY_POD_IP. Another env variable +//! is set to ignore labeling errors. This does not work for rebuild tests +//! however. use clap::{App, AppSettings, Arg, ArgMatches}; use futures::FutureExt; use mayastor::{ - bdev::{nexus_create, nexus_lookup, util::uring}, + bdev::{nexus_create, nexus_lookup}, core::{MayastorCliArgs, MayastorEnvironment, Reactors, Share}, grpc, - grpc::endpoint, logger, - subsys, }; -use std::path::Path; + mayastor::CPS_INIT!(); #[macro_use] extern crate tracing; @@ -16,17 +26,13 @@ extern crate tracing; const NEXUS: &str = "nexus-e1e27668-fbe1-4c8a-9108-513f6e44d342"; async fn create_nexus(args: &ArgMatches<'_>) { - let ep = args.values_of("endpoint").expect("invalid endpoints"); + let ep = args.values_of("uri").expect("invalid endpoints"); + + let size = args.value_of("size").unwrap().parse::().unwrap(); - let children = ep - .into_iter() - .map(|v| { - dbg!(v); - format!("nvmf://{}/replica1", v) - }) - .collect::>(); + let children = ep.into_iter().map(|v| v.into()).collect::>(); - nexus_create(NEXUS, 250 * 1024 * 1024 * 1024, Some(NEXUS), &children) + nexus_create(NEXUS, size * 1024 * 1024, Some(NEXUS), &children) .await .unwrap(); @@ -34,9 +40,8 @@ async fn create_nexus(args: &ArgMatches<'_>) { nexus.share_nvmf().await.unwrap(); } -fn main() -> Result<(), Box> { +fn main() { std::env::set_var("NEXUS_LABEL_IGNORE_ERRORS", "1"); - std::env::set_var("MY_POD_IP", "192.168.1.4"); let matches = App::new("NVMeT CLI") .version("0.1") @@ -44,23 +49,26 @@ fn main() -> Result<(), Box> { AppSettings::ColoredHelp, AppSettings::ColorAlways]) .about("NVMe test utility to quickly create a nexus over existing nvme targets") + .arg(Arg::with_name("size") + .required(true) + .default_value("64") + .short("s") + .long("size") + .help("Size of the nexus to create in MB") + ) .arg( - Arg::with_name("replica-index") - .short("n") - .long("replica-index") - .default_value("1") - .help("index of the NQN to connect to e.g 1 for replica1")) - .arg( - Arg::with_name("endpoint") - .short("e") + Arg::with_name("uri") + .short("u") .min_values(1) - .long("endpoint") - .help("endpoints to connect to")) + .long("uris") + .help("NVMe-OF TCP targets to connect to")) .get_matches(); - let mut margs = MayastorCliArgs::default(); - - margs.rpc_address = "0.0.0.0:10124".to_string(); + let margs = MayastorCliArgs { + rpc_address: "0.0.0.0:10124".to_string(), + reactor_mask: "0xF".to_string(), + ..Default::default() + }; logger::init("INFO"); @@ -81,18 +89,13 @@ fn main() -> Result<(), Box> { create_nexus(&matches).await; }); - let mut futures = Vec::new(); - - futures.push(master.boxed_local()); - futures.push(subsys::Registration::run().boxed_local()); - futures.push( + let futures = vec![ + master.boxed_local(), grpc::MayastorGrpcServer::run(grpc_endpoint, rpc_address).boxed_local(), - ); + ]; rt.block_on(futures::future::try_join_all(futures)) .expect_err("reactor exit in abnormal state"); ms.fini(); - - Ok(()) } diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 69405a330..0c7ebae66 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -40,7 +40,7 @@ use spdk_sys::{ }; use crate::{ - bdev::nexus::{instances, nexus_child_status_config::ChildStatusConfig}, + bdev::nexus::nexus_child_status_config::ChildStatusConfig, core::{ reactor::{Reactor, ReactorState, Reactors}, Cores, @@ -279,9 +279,6 @@ async fn do_shutdown(arg: *mut c_void) { } iscsi::fini(); - for n in instances() { - n.destroy().await; - } unsafe { spdk_rpc_finish(); spdk_subsystem_fini(Some(reactors_stop), arg); @@ -368,7 +365,7 @@ impl MayastorEnvironment { } /// configure signal handling - fn install_signal_handlers(&self) -> Result<()> { + fn install_signal_handlers(&self) { unsafe { signal_hook::register(signal_hook::SIGTERM, || { mayastor_signal_handler(1) @@ -382,8 +379,6 @@ impl MayastorEnvironment { }) } .unwrap(); - - Ok(()) } /// construct an array of options to be passed to EAL and start it @@ -658,7 +653,7 @@ impl MayastorEnvironment { ); // setup our signal handlers - self.install_signal_handlers().unwrap(); + self.install_signal_handlers(); // allocate a Reactor per core Reactors::init(); diff --git a/mayastor/src/core/nvme.rs b/mayastor/src/core/nvme.rs index 388c3da5e..4715aac58 100644 --- a/mayastor/src/core/nvme.rs +++ b/mayastor/src/core/nvme.rs @@ -71,7 +71,6 @@ impl From for GenericStatusCode { match i { 0x00 => Self::Success, 0x01 => Self::InvalidOpcode, - 0x01 => Self::InvalidOPCode, 0x02 => Self::InvalidFieldInCommand, 0x03 => Self::CommandIDConflict, 0x04 => Self::DataTransferError, diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index c82d0f04e..54c3f353d 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -17,7 +17,6 @@ use spdk_sys::{ spdk_nvmf_target_opts, spdk_nvmf_transport_opts, SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, - SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET, }; use crate::bdev::ActionType; diff --git a/mayastor/tests/bdev_test.rs b/mayastor/tests/bdev_test.rs index 3300580f7..240e8e474 100644 --- a/mayastor/tests/bdev_test.rs +++ b/mayastor/tests/bdev_test.rs @@ -85,7 +85,7 @@ async fn create_target(container: &str) -> String { .unwrap(); h.bdev .create(BdevUri { - uri: "malloc:///disk0?size_mb=100".into(), + uri: "malloc:///disk0?size_mb=64".into(), }) .await .unwrap(); @@ -99,6 +99,7 @@ async fn create_target(container: &str) -> String { .await .unwrap(); + DOCKER_COMPOSE.get().unwrap().logs_all().await.unwrap(); ep.into_inner().uri } diff --git a/mayastor/tests/common/mod.rs b/mayastor/tests/common/mod.rs index c38cb59b2..49d4f19d6 100644 --- a/mayastor/tests/common/mod.rs +++ b/mayastor/tests/common/mod.rs @@ -417,15 +417,11 @@ pub fn get_device_size(nexus_device: &str) -> u64 { } /// Waits for the rebuild to reach `state`, up to `timeout` -pub fn wait_for_rebuild( - name: String, - state: RebuildState, - timeout: Duration, -) -> Result<(), ()> { +pub fn wait_for_rebuild(name: String, state: RebuildState, timeout: Duration) { let (s, r) = unbounded::<()>(); let job = match RebuildJob::lookup(&name) { Ok(job) => job, - Err(_) => return Ok(()), + Err(_) => return, }; job.as_client().stats(); @@ -459,7 +455,7 @@ pub fn wait_for_rebuild( if let Ok(job) = RebuildJob::lookup(&name) { job.as_client().stats(); } - t.join().unwrap() + t.join().unwrap().unwrap(); } pub fn fio_verify_size(device: &str, size: u64) -> i32 { diff --git a/mayastor/tests/io_job.rs b/mayastor/tests/io_job.rs index 6c0522435..94602921f 100644 --- a/mayastor/tests/io_job.rs +++ b/mayastor/tests/io_job.rs @@ -26,7 +26,7 @@ async fn create_work(queue: Arc) { // create the bdev h.bdev .create(BdevUri { - uri: "malloc:///disk0?size_mb=100".into(), + uri: "malloc:///disk0?size_mb=64".into(), }) .await .unwrap(); @@ -40,6 +40,8 @@ async fn create_work(queue: Arc) { .unwrap(); } + DOCKER_COMPOSE.get().unwrap().logs_all().await.unwrap(); + // get a reference to mayastor (used later) let ms = MAYASTOR.get().unwrap(); @@ -49,7 +51,7 @@ async fn create_work(queue: Arc) { ms.spawn(async move { nexus_create( "nexus0", - 1024 * 1024 * 50, + 1024 * 1024 * 60, None, &[ format!( @@ -72,20 +74,7 @@ async fn create_work(queue: Arc) { let job = io_driver::Builder::new() .core(1) .bdev(bdev) - .qd(64) - .io_size(512) - .build() - .await; - - // start the first job - queue.start(job); - - // create a new job and start it. Note that the malloc bdev is created - // implicitly with the uri() argument - let job = io_driver::Builder::new() - .core(0) - .uri("malloc:///disk0?size_mb=100") - .qd(64) + .qd(32) .io_size(512) .build() .await; @@ -150,6 +139,7 @@ async fn io_driver() { "nvmf-target2", Binary::from_dbg("mayastor").with_args(vec!["-l", "3"]), ) + .with_prune(true) .with_clean(true) .build() .await diff --git a/mayastor/tests/iscsi_tgt.rs b/mayastor/tests/iscsi_tgt.rs index 231ef2e6b..133989c49 100644 --- a/mayastor/tests/iscsi_tgt.rs +++ b/mayastor/tests/iscsi_tgt.rs @@ -9,8 +9,11 @@ static BDEV: &str = "malloc:///malloc0?size_mb=64"; #[tokio::test] async fn iscsi_target() { - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x3".into(); + let args = MayastorCliArgs { + reactor_mask: "0x3".into(), + ..Default::default() + }; + let ms = common::MayastorTest::new(args); ms.spawn(async { // test we can create a nvmf subsystem diff --git a/mayastor/tests/lock_lba_range.rs b/mayastor/tests/lock_lba_range.rs index e68096293..1cd16fae0 100644 --- a/mayastor/tests/lock_lba_range.rs +++ b/mayastor/tests/lock_lba_range.rs @@ -1,6 +1,6 @@ +#![allow(clippy::await_holding_refcell_ref)] #[macro_use] extern crate tracing; - use std::{ cell::{Ref, RefCell, RefMut}, ops::{Deref, DerefMut}, diff --git a/mayastor/tests/lvs_pool.rs b/mayastor/tests/lvs_pool.rs index e6b510d74..6ec72d3f7 100644 --- a/mayastor/tests/lvs_pool.rs +++ b/mayastor/tests/lvs_pool.rs @@ -15,8 +15,10 @@ static DISKNAME1: &str = "/tmp/disk1.img"; async fn lvs_pool_test() { common::delete_file(&[DISKNAME1.into()]); common::truncate_file(DISKNAME1, 64 * 1024); - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x3".into(); + let args = MayastorCliArgs { + reactor_mask: "0x3".into(), + ..Default::default() + }; let ms = MayastorTest::new(args); // should fail to import a pool that does not exist on disk diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index f2bb040bb..b507af714 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -142,8 +142,7 @@ fn rebuild_progress() { get_dev(1), RebuildState::Paused, std::time::Duration::from_millis(1000), - ) - .unwrap(); + ); let p = nexus.get_rebuild_progress(&get_dev(1)).unwrap(); assert!(p.progress >= progress); p.progress @@ -316,8 +315,7 @@ async fn nexus_add_child(new_child: u64, wait: bool) { get_dev(new_child), RebuildState::Completed, std::time::Duration::from_secs(10), - ) - .unwrap(); + ); nexus_test_child(new_child).await; } else { @@ -331,8 +329,7 @@ async fn nexus_test_child(child: u64) { get_dev(child), RebuildState::Completed, std::time::Duration::from_secs(10), - ) - .unwrap(); + ); let nexus = nexus_lookup(nexus_name()).unwrap(); @@ -403,8 +400,7 @@ fn rebuild_sizes() { get_dev(2), RebuildState::Completed, std::time::Duration::from_secs(20), - ) - .unwrap(); + ); nexus.destroy().await.unwrap(); }); @@ -548,8 +544,7 @@ fn rebuild_operations() { RebuildState::Stopped, // already stopping, should be enough std::time::Duration::from_millis(250), - ) - .unwrap(); + ); // already stopped nexus.stop_rebuild(&get_dev(1)).await.unwrap(); @@ -584,8 +579,7 @@ fn rebuild_multiple() { get_dev(child), RebuildState::Completed, std::time::Duration::from_secs(20), - ) - .unwrap(); + ); nexus.remove_child(&get_dev(child)).await.unwrap(); } @@ -601,8 +595,7 @@ fn rebuild_multiple() { get_dev(child), RebuildState::Running, std::time::Duration::from_millis(100), - ) - .unwrap(); + ); nexus.remove_child(&get_dev(child)).await.unwrap(); } @@ -634,8 +627,7 @@ fn rebuild_fault_src() { get_dev(1), RebuildState::Failed, std::time::Duration::from_secs(20), - ) - .unwrap(); + ); // allow the nexus futures to run reactor_poll!(10); assert_eq!( @@ -671,8 +663,7 @@ fn rebuild_fault_dst() { get_dev(1), RebuildState::Failed, std::time::Duration::from_secs(20), - ) - .unwrap(); + ); // allow the nexus futures to run reactor_poll!(10); assert_eq!( diff --git a/mayastor/tests/nexus_share.rs b/mayastor/tests/nexus_share.rs index 2a5cf07ed..199a48754 100644 --- a/mayastor/tests/nexus_share.rs +++ b/mayastor/tests/nexus_share.rs @@ -15,8 +15,10 @@ use common::MayastorTest; #[tokio::test] async fn nexus_test() { - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x2".into(); + let args = MayastorCliArgs { + reactor_mask: "0x3".into(), + ..Default::default() + }; MayastorTest::new(args) .spawn(async { diff --git a/mayastor/tests/nvmet.rs b/mayastor/tests/nvmet.rs deleted file mode 100644 index a13e962e9..000000000 --- a/mayastor/tests/nvmet.rs +++ /dev/null @@ -1,106 +0,0 @@ -use common::compose::MayastorTest; -use mayastor::core::{ - io_driver, - mayastor_env_stop, - Bdev, - Cores, - MayastorCliArgs, - Share, - SIG_RECEIVED, -}; -static MAYASTOR: OnceCell = OnceCell::new(); - -use mayastor::{ - bdev::{nexus_create, nexus_lookup}, - core::io_driver::{IoType, JobQueue}, -}; -use once_cell::sync::OnceCell; -use std::{ - sync::{atomic::Ordering, Arc}, - time::Duration, -}; - -pub mod common; -async fn create_nexus() { - let children = (1 ..= 3) - .into_iter() - .map(|i| format!("nvmf://127.0.0.1/replica{}", i)) - .collect::>(); - - nexus_create( - "e1e27668-fbe1-4c8a-9108-513f6e44d342", - 250 * 1024 * 1024 * 1024, - None, - &children, - ) - .await - .unwrap(); - - let nexus = nexus_lookup("e1e27668-fbe1-4c8a-9108-513f6e44d342").unwrap(); - nexus.share_nvmf().await; -} - -async fn remove_child(index: usize) { - let to_remove = format!("127.0.0.1/replica{}", index); - let nexus = nexus_lookup("e1e27668-fbe1-4c8a-9108-513f6e44d342").unwrap(); - nexus.remove_child(&to_remove).await.unwrap() -} - -async fn bdev_info() { - let bdev = Bdev::bdev_first().unwrap(); - dbg!(bdev); -} - -async fn start_workload(queue: Arc) { - let ms = MAYASTOR.get().unwrap(); - ms.spawn(async move { - for c in Cores::count() { - let bdev = - Bdev::lookup_by_name("e1e27668-fbe1-4c8a-9108-513f6e44d342") - .unwrap(); - let job = io_driver::Builder::new() - .core(c) - .rw(IoType::WRITE) - .bdev(bdev) - .qd(64) - .io_size(512) - .build() - .await; - queue.start(job); - } - }) - .await; -} - -#[tokio::test] -async fn nvmet_nexus_test() { - std::env::set_var("NEXUS_LABEL_IGNORE_ERRORS", "1"); - std::env::set_var("MY_POD_IP", "192.168.1.4"); - let ms = MayastorTest::new(MayastorCliArgs { - reactor_mask: 0xA.to_string(), - no_pci: true, - grpc_endpoint: "0.0.0.0".to_string(), - ..Default::default() - }); - let ms = MAYASTOR.get_or_init(|| ms); - - ms.spawn(create_nexus()).await; - ms.spawn(bdev_info()).await; - - let mut ticker = tokio::time::interval(Duration::from_millis(1000)); - // ctrl was hit so exit the loop here - loop { - if SIG_RECEIVED.load(Ordering::Relaxed) { - break; - } - - ms.spawn(async { - let bdev = Bdev::bdev_first().unwrap(); - println!("{:?}", bdev.stats().await.unwrap()); - }) - .await; - ticker.tick().await; - } - - ms.stop().await; -} diff --git a/mayastor/tests/nvmf.rs b/mayastor/tests/nvmf.rs index 30387a939..c1fed33c7 100644 --- a/mayastor/tests/nvmf.rs +++ b/mayastor/tests/nvmf.rs @@ -21,8 +21,10 @@ static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; fn nvmf_target() { common::mayastor_test_init(); common::truncate_file(DISKNAME1, 64 * 1024); - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x3".into(); + let args = MayastorCliArgs { + reactor_mask: "0x3".into(), + ..Default::default() + }; MayastorEnvironment::new(args) .start(|| { // test we can create a nvmf subsystem diff --git a/mayastor/tests/reactor.rs b/mayastor/tests/reactor.rs index cfcdec00e..1eae4ec99 100644 --- a/mayastor/tests/reactor.rs +++ b/mayastor/tests/reactor.rs @@ -17,8 +17,11 @@ pub mod common; // This test requires the system to have at least 2 cpus #[test] fn reactor_start_stop() { - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x1".to_string(); + let args = MayastorCliArgs { + reactor_mask: "0x3".into(), + ..Default::default() + }; + let ms = MayastorEnvironment::new(args); static mut WAIT_FOR: Lazy = diff --git a/mayastor/tests/remove_child.rs b/mayastor/tests/remove_child.rs index 65c05198d..8e66390ab 100644 --- a/mayastor/tests/remove_child.rs +++ b/mayastor/tests/remove_child.rs @@ -36,8 +36,6 @@ fn remove_child() { .expect("failed exec truncate"); assert_eq!(output.status.success(), true); - let mut args = MayastorCliArgs::default(); - args.log_components = vec!["all".into()]; let rc = MayastorEnvironment::new(MayastorCliArgs::default()) .start(|| Reactor::block_on(works()).unwrap()) .unwrap(); diff --git a/nix/lib/rust.nix b/nix/lib/rust.nix index 4bb1a5a26..870fd5bca 100644 --- a/nix/lib/rust.nix +++ b/nix/lib/rust.nix @@ -3,6 +3,6 @@ let pkgs = import sources.nixpkgs { overlays = [ (import sources.nixpkgs-mozilla) ]; }; in rec { - nightly = pkgs.rustChannelOf { channel = "nightly"; date = "2020-07-26"; }; + nightly = pkgs.rustChannelOf { channel = "nightly"; date = "2020-11-24"; }; stable = pkgs.rustChannelOf { channel = "stable"; }; } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 02c665108..b69c1988e 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -39,7 +39,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "0dmg0y1wp3gkfiql80b8li20x6l407cih16i9sdbbly34bc84w09"; + cargoSha256 = "1jpp98vnshymzfm1rhm7hpkgkiah47k0xgpa8ywji1znsvp8wqsc"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/nix/sources.json b/nix/sources.json index e3fbb8e8c..5ea8be783 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -29,10 +29,10 @@ "homepage": "https://github.com/nmattia/niv", "owner": "nmattia", "repo": "niv", - "rev": "9d35b9e4837ab88517210b1701127612c260eccf", - "sha256": "0q50xhnm8g2yfyakrh0nly4swyygxpi0a8cb9gp65wcakcgvzvdh", + "rev": "ba57d5a29b4e0f2085917010380ef3ddc3cf380f", + "sha256": "1kpsvc53x821cmjg1khvp1nz7906gczq8mp83664cr15h94sh8i4", "type": "tarball", - "url": "https://github.com/nmattia/niv/archive/9d35b9e4837ab88517210b1701127612c260eccf.tar.gz", + "url": "https://github.com/nmattia/niv/archive/ba57d5a29b4e0f2085917010380ef3ddc3cf380f.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "nixpkgs": { @@ -41,10 +41,10 @@ "homepage": "https://github.com/NixOS/nixpkgs", "owner": "NixOS", "repo": "nixpkgs", - "rev": "7ef527cff856ea7938dba20769a6d59ebc9575e6", - "sha256": "0zhvly0b99846x1y3jyva79amf0kyi9c6lwg8l3ghig669kxlwa7", + "rev": "19908dd99da03196ffbe9d30e7ee01c7e7cc614d", + "sha256": "0n7ql60xlgxcppnqbrbdyvg2w2kma053cg4gwsdikrhmm8jf64fy", "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/7ef527cff856ea7938dba20769a6d59ebc9575e6.tar.gz", + "url": "https://github.com/NixOS/nixpkgs/archive/19908dd99da03196ffbe9d30e7ee01c7e7cc614d.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "nixpkgs-mozilla": { @@ -53,10 +53,10 @@ "homepage": "https://github.com/mozilla/nixpkgs-mozilla", "owner": "mozilla", "repo": "nixpkgs-mozilla", - "rev": "57c8084c7ef41366993909c20491e359bbb90f54", - "sha256": "0lchhjys1jj8fdiisd2718sqd63ys7jrj6hq6iq9l1gxj3mz2w81", + "rev": "8c007b60731c07dd7a052cce508de3bb1ae849b4", + "sha256": "1zybp62zz0h077zm2zmqs2wcg3whg6jqaah9hcl1gv4x8af4zhs6", "type": "tarball", - "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/57c8084c7ef41366993909c20491e359bbb90f54.tar.gz", + "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/8c007b60731c07dd7a052cce508de3bb1ae849b4.tar.gz", "url_template": "https://github.com///archive/.tar.gz" } } From 7c7f7b4f187ccddb4e049898c04798cdb3af5e9c Mon Sep 17 00:00:00 2001 From: GlennBullingham Date: Tue, 1 Dec 2020 13:27:59 +0000 Subject: [PATCH 92/92] Update image tags (:v0.6.0) in deployment yaml --- deploy/csi-daemonset.yaml | 2 +- deploy/mayastor-daemonset-config.yaml | 2 +- deploy/mayastor-daemonset.yaml | 2 +- deploy/moac-deployment.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index a8c91b687..eb2d32dd4 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -28,7 +28,7 @@ spec: # the same. containers: - name: mayastor-csi - image: mayadata/mayastor-csi:latest + image: mayadata/mayastor-csi:v0.6.0 imagePullPolicy: Always # we need privileged because we mount filesystems and use mknod securityContext: diff --git a/deploy/mayastor-daemonset-config.yaml b/deploy/mayastor-daemonset-config.yaml index 578a685c4..191a891f4 100644 --- a/deploy/mayastor-daemonset-config.yaml +++ b/deploy/mayastor-daemonset-config.yaml @@ -35,7 +35,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:latest + image: mayadata/mayastor:v0.6.0 imagePullPolicy: Always env: - name: MY_NODE_NAME diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 57a3f413c..22539bd0a 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -31,7 +31,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:latest + image: mayadata/mayastor:v0.6.0 imagePullPolicy: Always env: - name: MY_NODE_NAME diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index 90484d4a2..7b9a3b50a 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -44,7 +44,7 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ - name: moac - image: mayadata/moac:latest + image: mayadata/moac:v0.6.0 imagePullPolicy: Always args: - "--csi-address=$(CSI_ENDPOINT)"