From d7640de081092eb45a8a7c2534a19147f7959d18 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 17 Dec 2020 14:19:47 +0000 Subject: [PATCH 01/78] ci: support for running nightly e2e tests New variable in e2e-test.sh script is introduced that is expected to contain names of all e2e tests that should run nightly in the future. Resolves: CAS-575 --- Jenkinsfile | 10 +++++++++- scripts/e2e-test.sh | 32 +++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1230ca274..459fcd9b4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -54,6 +54,8 @@ if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && // Only schedule regular builds on develop branch, so we don't need to guard against it String cron_schedule = BRANCH_NAME == "develop" ? "0 2 * * *" : "" +// Some long e2e tests are not suitable to be run for each PR +boolean run_extended_e2e_tests = (env.BRANCH_NAME != 'staging' && env.BRANCH_NAME != 'trying') ? true : false pipeline { agent none @@ -206,7 +208,13 @@ pipeline { fingerprintArtifacts: true ) sh 'kubectl get nodes -o wide' - sh "nix-shell --run './scripts/e2e-test.sh --device /dev/sdb --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\"'" + script { + def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\"" + if (run_extended_e2e_tests) { + cmd = cmd + " --extended" + } + sh "nix-shell --run '${cmd}'" + } } post { failure { diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 52de8e059..5e60d94bc 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -9,12 +9,17 @@ REPORTSDIR=$(realpath "$SCRIPTDIR/..") # List and Sequence of tests. #tests="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall" # Restrictions: -# 1. resource_check MUST follow csi +# 1. resource_check MUST follow csi # resource_check is a follow up check for the 3rd party CSI test suite. # 2. replicas_pod_remove SHOULD be the last test before uninstall # this is a disruptive test. -tests="install basic_volume_io csi resource_check uninstall" +#TESTS="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall" +TESTS="install basic_volume_io csi resource_check uninstall" +EXTENDED_TESTS="" +# Global state variables +tests="" +run_extended_tests= device= registry= tag="ci" @@ -34,6 +39,7 @@ Options: --tests Lists of tests to run, delimited by spaces (default: "$tests") Note: the last 2 tests should be (if they are to be run) node_disconnect/replica_pod_remove uninstall + --extended Run long running tests also. --reportsdir Path to use for junit xml test reports (default: repo root) --logs Generate logs and cluster state dump at the end of successful test run, prior to uninstall. @@ -41,6 +47,7 @@ Options: --onfail On fail, stop immediately or continue default($on_fail) Behaviour for "continue" only differs if uninstall is in the list of tests (the default). --uninstall_cleanup On uninstall cleanup for reusable cluster. default($uninstall_cleanup) + Examples: $0 --device /dev/nvme0n1 --registry 127.0.0.1:5000 --tag a80ce0c EOF @@ -80,6 +87,9 @@ while [ "$#" -gt 0 ]; do shift logsdir="$1" ;; + -e|--extended) + run_extended_tests=1 + ;; --onfail) shift case $1 in @@ -121,17 +131,21 @@ if [ -z "$device" ]; then fi export e2e_pool_device=$device -if [ -z "$registry" ]; then - echo "Registry to pull the mayastor images from, must be specified" - help - exit 1 -fi -export e2e_docker_registry="$registry" - if [ -n "$tag" ]; then export e2e_image_tag="$tag" fi +if [ -n "$registry" ]; then + export e2e_docker_registry="$registry" +fi + +if [ -z "$tests" ]; then + tests="$TESTS" + if [ -n "$run_extended_tests" ]; then + tests="$tests $EXTENDED_TESTS" + fi +fi + export e2e_reports_dir="$REPORTSDIR" if [ ! -d "$e2e_reports_dir" ] ; then echo "Reports directory $e2e_reports_dir does not exist" From 70bf0920a3434a3797fa50cbe9ea94a208d77b9a Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 8 Feb 2021 11:47:08 +0000 Subject: [PATCH 02/78] test: send xray reports Send reports to Jira xray. Specify which test spec in the Jenkinsfile. Currently implemented to on-demand and nightly tests. Updated one readme. --- Jenkinsfile | 43 +++++++++++++++++++ .../basic_volume_io/basic_volume_io_test.go | 7 +-- test/e2e/common/reporter/junit.go | 19 ++++++++ test/e2e/csi/e2e_suite_test.go | 6 +-- test/e2e/install/install_test.go | 8 ++-- test/e2e/node_disconnect/README.md | 24 ++--------- .../replica_pod_remove_test.go | 9 ++-- .../e2e/pvc_stress_fio/pvc_stress_fio_test.go | 7 +-- test/e2e/rebuild/basic_rebuild_test.go | 8 +--- test/e2e/replica/replica_test.go | 9 +--- .../e2e/resource_check/resource_check_test.go | 9 +--- test/e2e/uninstall/uninstall_test.go | 13 +++--- 12 files changed, 90 insertions(+), 72 deletions(-) create mode 100644 test/e2e/common/reporter/junit.go diff --git a/Jenkinsfile b/Jenkinsfile index 459fcd9b4..dfccdbf6d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -10,6 +10,11 @@ def e2e_environment="hcloud-kubeadm" // Global variable to pass current k8s job between stages def k8s_job="" +xray_projectkey='MQ' +xray_on_demand_testplan='MQ-1' +xray_nightly_testplan='MQ-17' +xray_test_execution_type='10059' + // Searches previous builds to find first non aborted one def getLastNonAbortedBuild(build) { if (build == null) { @@ -44,6 +49,16 @@ def notifySlackUponStateChange(build) { } } +def getTestPlan() { + def causes = currentBuild.getBuildCauses() + for(cause in causes) { + if ("${cause}".contains("hudson.triggers.TimerTrigger\$TimerTriggerCause")) { + return xray_nightly_testplan + } + } + return xray_on_demand_testplan +} + // Will ABORT current job for cases when we don't want to build if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && BRANCH_NAME == "develop") { @@ -247,6 +262,34 @@ pipeline { ) } } + always { // always send the junit results back to Xray and Jenkins + junit 'e2e.*.xml' + script { + def xray_testplan = getTestPlan() + step([ + $class: 'XrayImportBuilder', + endpointName: '/junit/multipart', + importFilePath: 'e2e.*.xml', + importToSameExecution: 'true', + projectKey: "${xray_projectkey}", + testPlanKey: "${xray_testplan}", + serverInstance: "${env.JIRASERVERUUID}", + inputInfoSwitcher: 'fileContent', + importInfo: """{ + "fields": { + "summary": "Build ${env.BUILD_NUMBER}", + "project": { + "key": "${xray_projectkey}" + }, + "issuetype": { + "id": "${xray_test_execution_type}" + }, + "description": "Results for build ${env.BUILD_NUMBER} at ${env.BUILD_URL}" + } + }""" + ]) + } + } } } stage('destroy e2e cluster') { diff --git a/test/e2e/basic_volume_io/basic_volume_io_test.go b/test/e2e/basic_volume_io/basic_volume_io_test.go index 43aab5f48..93874bc24 100644 --- a/test/e2e/basic_volume_io/basic_volume_io_test.go +++ b/test/e2e/basic_volume_io/basic_volume_io_test.go @@ -4,11 +4,10 @@ package basic_volume_io_test import ( "e2e-basic/common" - "os" + rep "e2e-basic/common/reporter" "testing" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -26,9 +25,7 @@ var volNames []volSc func TestBasicVolumeIO(t *testing.T) { RegisterFailHandler(Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/basic-volume-io-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Basic volume IO tests, NVMe-oF TCP and iSCSI", []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "Basic volume IO tests, NVMe-oF TCP and iSCSI", rep.GetReporters("basic-volume-io")) } func basicVolumeIOTest(scName string) { diff --git a/test/e2e/common/reporter/junit.go b/test/e2e/common/reporter/junit.go new file mode 100644 index 000000000..dd45e6a96 --- /dev/null +++ b/test/e2e/common/reporter/junit.go @@ -0,0 +1,19 @@ +package reporter + +import ( + "os" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" +) + +func GetReporters(name string) []Reporter { + reportDir := os.Getenv("e2e_reports_dir") + if reportDir == "" { + panic("reportDir not defined - define via e2e_reports_dir environment variable") + } + testGroupPrefix := "e2e." + xmlFileSpec := reportDir + "/" + testGroupPrefix + name + "-junit.xml" + junitReporter := reporters.NewJUnitReporter(xmlFileSpec) + return []Reporter{junitReporter} +} diff --git a/test/e2e/csi/e2e_suite_test.go b/test/e2e/csi/e2e_suite_test.go index 619e62fd4..22226716a 100644 --- a/test/e2e/csi/e2e_suite_test.go +++ b/test/e2e/csi/e2e_suite_test.go @@ -17,13 +17,13 @@ limitations under the License. package e2e import ( + rep "e2e-basic/common/reporter" "flag" "os" "path/filepath" "testing" "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/framework" @@ -100,7 +100,5 @@ func execTestCmd(cmds []testCmd) { func TestE2E(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/csi-junit.xml") - ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "CSI E2E Suite", []ginkgo.Reporter{junitReporter}) + ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "CSI E2E Suite", rep.GetReporters("csi")) } diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index 7e5906318..7fd6cdb1f 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -13,9 +13,10 @@ import ( "time" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" + rep "e2e-basic/common/reporter" + appsV1 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -270,10 +271,7 @@ func installMayastor() { func TestInstallSuite(t *testing.T) { RegisterFailHandler(Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/install-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Basic Install Suite", - []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "Basic Install Suite", rep.GetReporters("install")) } var _ = Describe("Mayastor setup", func() { diff --git a/test/e2e/node_disconnect/README.md b/test/e2e/node_disconnect/README.md index 7a8738dfe..db4b772b2 100644 --- a/test/e2e/node_disconnect/README.md +++ b/test/e2e/node_disconnect/README.md @@ -1,25 +1,9 @@ -## Note -The tests in directories replica_disconnect and replica_reassign -are not currently deployable by the CI system -as those tests assume a vagrant installation. - ## Pre-requisites for replica_pod_remove -* A Kubernetes cluster with 3 nodes, with mayastor installed. - -## Pre-requisites for the other directories - -* A Kubernetes cluster with at least 3 nodes, with mayastor installed. -* The replica_reassign test requires at least 4 nodes. -* The cluster is deployed using vagrant and KUBESPRAY_REPO is correctly - defined in ``` ./lib/io_connect_node.sh ``` +* A Kubernetes cluster with at least 2 nodes, with mayastor installed. ## Overview -The tests verify the behaviour of the cluster under fault conditions -affecting the availability of resources in the cluster, for example -a missing mayastor pod or a disconnected node. +This directory is for tests verify the behaviour of the cluster under +fault conditions affecting the availability of resources in the +cluster, for example a missing mayastor pod or a disconnected node. -To run, cd to the test directory then: -```bash -go test -``` diff --git a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go index 2e3d92a44..0a14d3886 100644 --- a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go +++ b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go @@ -2,16 +2,16 @@ package replica_pod_remove_test import ( "e2e-basic/common" + rep "e2e-basic/common/reporter" + disconnect_lib "e2e-basic/node_disconnect/lib" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "os" "testing" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" ) @@ -21,10 +21,7 @@ const gStorageClass = "mayastor-nvmf-pod-remove-test-sc" func TestMayastorPodLoss(t *testing.T) { RegisterFailHandler(Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/replica-pod-remove-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Replica pod removal tests", - []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "Replica pod removal tests", rep.GetReporters("replica-pod-remove")) } var _ = Describe("Mayastor replica pod removal test", func() { diff --git a/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go b/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go index 8870973c6..bd4015b0b 100644 --- a/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go +++ b/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go @@ -8,6 +8,7 @@ import ( "testing" Cmn "e2e-basic/common" + rep "e2e-basic/common/reporter" coreV1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -17,7 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" ) @@ -203,10 +203,7 @@ func stressTestPVC(iters int, runFio bool) { func TestPVCStress(t *testing.T) { RegisterFailHandler(Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/pvc-stress-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "PVC Stress Test Suite", - []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "PVC Stress Test Suite", rep.GetReporters("pvc-stress")) } var _ = Describe("Mayastor PVC Stress test", func() { diff --git a/test/e2e/rebuild/basic_rebuild_test.go b/test/e2e/rebuild/basic_rebuild_test.go index cdc35efa1..94b432975 100644 --- a/test/e2e/rebuild/basic_rebuild_test.go +++ b/test/e2e/rebuild/basic_rebuild_test.go @@ -1,13 +1,12 @@ package basic_rebuild_test import ( - "os" "testing" "e2e-basic/common" + rep "e2e-basic/common/reporter" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -73,10 +72,7 @@ func basicRebuildTest() { func TestRebuild(t *testing.T) { RegisterFailHandler(Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/rebuild-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Rebuild Test Suite", - []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "Rebuild Test Suite", rep.GetReporters("rebuild")) } var _ = Describe("Mayastor rebuild test", func() { diff --git a/test/e2e/replica/replica_test.go b/test/e2e/replica/replica_test.go index b5af4703c..530213627 100644 --- a/test/e2e/replica/replica_test.go +++ b/test/e2e/replica/replica_test.go @@ -1,13 +1,12 @@ package replica_test import ( - "os" "testing" "e2e-basic/common" + rep "e2e-basic/common/reporter" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -63,11 +62,7 @@ func addUnpublishedReplicaTest() { func TestReplica(t *testing.T) { RegisterFailHandler(Fail) - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/replica-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Replica Test Suite", - []Reporter{junitReporter}) - + RunSpecsWithDefaultAndCustomReporters(t, "Replica Test Suite", rep.GetReporters("replica")) } var _ = Describe("Mayastor replica tests", func() { diff --git a/test/e2e/resource_check/resource_check_test.go b/test/e2e/resource_check/resource_check_test.go index 728a402c3..add81dde9 100644 --- a/test/e2e/resource_check/resource_check_test.go +++ b/test/e2e/resource_check/resource_check_test.go @@ -2,11 +2,10 @@ package basic_test import ( "e2e-basic/common" - "os" + rep "e2e-basic/common/reporter" "testing" . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -44,11 +43,7 @@ func resourceCheck() { func TestResourceCheck(t *testing.T) { RegisterFailHandler(Fail) - - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/resource_check-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Resource Check Suite", - []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "Resource Check Suite", rep.GetReporters("resource_check")) } var _ = Describe("Mayastor resource check", func() { diff --git a/test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go index 3d5aa389b..c27f403f6 100644 --- a/test/e2e/uninstall/uninstall_test.go +++ b/test/e2e/uninstall/uninstall_test.go @@ -2,9 +2,8 @@ package basic_test import ( "e2e-basic/common" - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" - . "github.com/onsi/gomega" + rep "e2e-basic/common/reporter" + "os" "os/exec" "path" @@ -12,6 +11,9 @@ import ( "testing" "time" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) @@ -165,10 +167,7 @@ func TestTeardownSuite(t *testing.T) { if os.Getenv("e2e_uninstall_cleanup") != "0" { cleanup = true } - reportDir := os.Getenv("e2e_reports_dir") - junitReporter := reporters.NewJUnitReporter(reportDir + "/uninstall-junit.xml") - RunSpecsWithDefaultAndCustomReporters(t, "Basic Teardown Suite", - []Reporter{junitReporter}) + RunSpecsWithDefaultAndCustomReporters(t, "Basic Teardown Suite", rep.GetReporters("uninstall")) } var _ = Describe("Mayastor setup", func() { From 74d02af7fb38f3861cca95374271b0268ff981d8 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 11 Feb 2021 17:15:12 +0000 Subject: [PATCH 03/78] refactor(json): tweak the jsongrpc methods Allows for a JSON Value to traverse the stack allowing swagger to display it as JSON and with colours. Also make sure the rest client returns the correct errors. --- .../agents/common/src/wrapper/v0/mod.rs | 2 + control-plane/agents/jsongrpc/src/service.rs | 7 +- control-plane/mbus-api/src/message_bus/v0.rs | 4 +- control-plane/mbus-api/src/v0.rs | 4 +- control-plane/rest/service/src/v0/jsongrpc.rs | 6 +- control-plane/rest/service/src/v0/mod.rs | 2 +- control-plane/rest/src/lib.rs | 69 +++++++++++++++++-- 7 files changed, 79 insertions(+), 15 deletions(-) diff --git a/control-plane/agents/common/src/wrapper/v0/mod.rs b/control-plane/agents/common/src/wrapper/v0/mod.rs index 9aa6a6cda..bf5bac2bf 100644 --- a/control-plane/agents/common/src/wrapper/v0/mod.rs +++ b/control-plane/agents/common/src/wrapper/v0/mod.rs @@ -97,6 +97,8 @@ pub enum SvcError { params: String, error: String, }, + #[snafu(display("Failed to deserialise JsonRpc response"))] + JsonRpcDeserialise { source: serde_json::Error }, } impl From for SvcError { diff --git a/control-plane/agents/jsongrpc/src/service.rs b/control-plane/agents/jsongrpc/src/service.rs index 7b9348c8d..1235058de 100644 --- a/control-plane/agents/jsongrpc/src/service.rs +++ b/control-plane/agents/jsongrpc/src/service.rs @@ -2,7 +2,7 @@ #![allow(clippy::unit_arg)] use ::rpc::mayastor::{JsonRpcReply, JsonRpcRequest}; -use common::wrapper::v0::{BusGetNode, SvcError}; +use common::wrapper::v0::{BusGetNode, JsonRpcDeserialise, SvcError}; use mbus_api::message_bus::v0::{MessageBus, *}; use rpc::mayastor::json_rpc_client::JsonRpcClient; use snafu::ResultExt; @@ -15,7 +15,7 @@ impl JsonGrpcSvc { /// Generic JSON gRPC call issued to Mayastor using the JsonRpcClient. pub(super) async fn json_grpc_call( request: &JsonGrpcRequest, - ) -> Result { + ) -> Result { let node = MessageBus::get_node(&request.node) .await @@ -39,6 +39,7 @@ impl JsonGrpcSvc { })? .into_inner(); - Ok(response.result) + Ok(serde_json::from_str(&response.result) + .context(JsonRpcDeserialise)?) } } diff --git a/control-plane/mbus-api/src/message_bus/v0.rs b/control-plane/mbus-api/src/message_bus/v0.rs index ec42f29ab..7a92a80fb 100644 --- a/control-plane/mbus-api/src/message_bus/v0.rs +++ b/control-plane/mbus-api/src/message_bus/v0.rs @@ -243,7 +243,9 @@ pub trait MessageBusTrait: Sized { /// Generic JSON gRPC call #[tracing::instrument(level = "debug", err)] - async fn json_grpc_call(request: JsonGrpcRequest) -> BusResult { + async fn json_grpc_call( + request: JsonGrpcRequest, + ) -> BusResult { Ok(request.request().await?) } } diff --git a/control-plane/mbus-api/src/v0.rs b/control-plane/mbus-api/src/v0.rs index 97084241a..5e754cece 100644 --- a/control-plane/mbus-api/src/v0.rs +++ b/control-plane/mbus-api/src/v0.rs @@ -3,6 +3,7 @@ use super::*; use paperclip::actix::Apiv2Schema; use percent_encoding::percent_decode_str; use serde::{Deserialize, Serialize}; +use serde_json::value::Value; use std::{cmp::Ordering, fmt::Debug}; use strum_macros::{EnumString, ToString}; @@ -974,4 +975,5 @@ pub struct JsonGrpcRequest { /// parameters to be passed to the above method pub params: JsonGrpcParams, } -bus_impl_message_all!(JsonGrpcRequest, JsonGrpc, String, JsonGrpc); + +bus_impl_message_all!(JsonGrpcRequest, JsonGrpc, Value, JsonGrpc); diff --git a/control-plane/rest/service/src/v0/jsongrpc.rs b/control-plane/rest/service/src/v0/jsongrpc.rs index 46ad574fc..9bde51a7c 100644 --- a/control-plane/rest/service/src/v0/jsongrpc.rs +++ b/control-plane/rest/service/src/v0/jsongrpc.rs @@ -2,7 +2,6 @@ //! These methods are typically used to control SPDK directly. use super::*; -use mbus_api::v0::JsonGrpcRequest; /// Configure the functions that this service supports. pub(crate) fn configure(cfg: &mut paperclip::actix::web::ServiceConfig) { @@ -22,8 +21,8 @@ pub(crate) fn configure(cfg: &mut paperclip::actix::web::ServiceConfig) { #[put("/v0", "/nodes/{node}/jsongrpc/{method}", tags(JsonGrpc))] async fn json_grpc_call( web::Path((node, method)): web::Path<(NodeId, JsonGrpcMethod)>, - body: web::Json, -) -> Result, RestError> { + body: web::Json, +) -> Result, RestError> { RestRespond::result( MessageBus::json_grpc_call(JsonGrpcRequest { node, @@ -32,4 +31,5 @@ async fn json_grpc_call( }) .await, ) + .map(|x| web::Json(JsonGeneric::from(x.into_inner()))) } diff --git a/control-plane/rest/service/src/v0/mod.rs b/control-plane/rest/service/src/v0/mod.rs index f5f36f265..71966ab4b 100644 --- a/control-plane/rest/service/src/v0/mod.rs +++ b/control-plane/rest/service/src/v0/mod.rs @@ -11,7 +11,7 @@ pub mod replicas; pub mod swagger_ui; pub mod volumes; -use rest_client::versions::v0::*; +use rest_client::{versions::v0::*, JsonGeneric}; use actix_service::ServiceFactory; use actix_web::{ diff --git a/control-plane/rest/src/lib.rs b/control-plane/rest/src/lib.rs index ff2c9a07a..0153804e5 100644 --- a/control-plane/rest/src/lib.rs +++ b/control-plane/rest/src/lib.rs @@ -1,4 +1,5 @@ #![warn(missing_docs)] +#![allow(clippy::field_reassign_with_default)] //! Client library which exposes information from the different mayastor //! control plane services through REST //! Different versions are exposed through `versions` @@ -16,7 +17,8 @@ pub mod versions; use actix_web::{body::Body, client::Client}; use actix_web_opentelemetry::ClientExt; -use serde::Deserialize; +use paperclip::actix::Apiv2Schema; +use serde::{Deserialize, Serialize}; use std::{io::BufReader, string::ToString}; /// Actix Rest Client @@ -93,9 +95,15 @@ impl ActixRestClient { })?; let rest_body = rest_response.body().await?; - match serde_json::from_slice(&rest_body) { - Ok(result) => Ok(result), - Err(_) => Ok(vec![serde_json::from_slice::(&rest_body)?]), + if rest_response.status().is_success() { + match serde_json::from_slice(&rest_body) { + Ok(result) => Ok(result), + Err(_) => Ok(vec![serde_json::from_slice::(&rest_body)?]), + } + } else { + let error: serde_json::value::Value = + serde_json::from_slice(&rest_body)?; + Err(anyhow::anyhow!(error.to_string())) } } async fn put>( @@ -132,7 +140,13 @@ impl ActixRestClient { })?; let rest_body = rest_response.body().await?; - Ok(serde_json::from_slice::(&rest_body)?) + if rest_response.status().is_success() { + Ok(serde_json::from_slice::(&rest_body)?) + } else { + let error: serde_json::value::Value = + serde_json::from_slice(&rest_body)?; + Err(anyhow::anyhow!(error.to_string())) + } } async fn del(&self, urn: String) -> anyhow::Result where @@ -155,6 +169,49 @@ impl ActixRestClient { })?; let rest_body = rest_response.body().await?; - Ok(serde_json::from_slice::(&rest_body)?) + if rest_response.status().is_success() { + Ok(serde_json::from_slice::(&rest_body)?) + } else { + let error: serde_json::value::Value = + serde_json::from_slice(&rest_body)?; + Err(anyhow::anyhow!(error.to_string())) + } + } +} + +/// Generic JSON value eg: { "size": 1024 } +#[derive(Debug, Clone, Apiv2Schema)] +pub struct JsonGeneric { + inner: serde_json::Value, +} +impl Serialize for JsonGeneric { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.inner.serialize(serializer) + } +} +impl<'de> Deserialize<'de> for JsonGeneric { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = serde_json::Value::deserialize(deserializer)?; + Ok(JsonGeneric::from(value)) + } +} +impl std::fmt::Display for JsonGeneric { + /// Get inner JSON value as a string + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.inner.to_string()) + } +} +impl JsonGeneric { + /// New JsonGeneric from a JSON value + pub fn from(value: serde_json::Value) -> Self { + Self { + inner: value, + } } } From 67814372171651a3939759fd32ae28808c6c2396 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 11 Feb 2021 18:11:42 +0000 Subject: [PATCH 04/78] refactor(deployer): split into library and binary Split the deployer into a library and a binary allowing for the library to be used by the coming rest test project. --- control-plane/deployer/Cargo.toml | 8 +- control-plane/deployer/bin/src/deployer.rs | 10 ++ control-plane/deployer/src/infra/mayastor.rs | 17 ++- control-plane/deployer/src/infra/mod.rs | 143 +++++++++++++----- control-plane/deployer/src/{bin.rs => lib.rs} | 110 +++++++++----- 5 files changed, 213 insertions(+), 75 deletions(-) create mode 100644 control-plane/deployer/bin/src/deployer.rs rename control-plane/deployer/src/{bin.rs => lib.rs} (73%) diff --git a/control-plane/deployer/Cargo.toml b/control-plane/deployer/Cargo.toml index 0d38119f1..92d91182c 100644 --- a/control-plane/deployer/Cargo.toml +++ b/control-plane/deployer/Cargo.toml @@ -8,7 +8,11 @@ edition = "2018" [[bin]] name = "deployer" -path = "src/bin.rs" +path = "bin/src/deployer.rs" + +[lib] +name = "deployer_lib" +path = "src/lib.rs" [dependencies] mbus_api = { path = "../mbus-api" } @@ -20,4 +24,4 @@ async-trait = "0.1.36" rpc = { path = "../../rpc" } strum = "0.19" strum_macros = "0.19" -paste = "1.0.4" \ No newline at end of file +paste = "1.0.4" diff --git a/control-plane/deployer/bin/src/deployer.rs b/control-plane/deployer/bin/src/deployer.rs new file mode 100644 index 000000000..052e83960 --- /dev/null +++ b/control-plane/deployer/bin/src/deployer.rs @@ -0,0 +1,10 @@ +use deployer_lib::{infra::Error, *}; +use structopt::StructOpt; + +#[tokio::main] +async fn main() -> Result<(), Error> { + let cli_args = CliArgs::from_args(); + println!("Using options: {:?}", &cli_args); + + cli_args.act().await +} diff --git a/control-plane/deployer/src/infra/mayastor.rs b/control-plane/deployer/src/infra/mayastor.rs index d156e4a22..07570c4ca 100644 --- a/control-plane/deployer/src/infra/mayastor.rs +++ b/control-plane/deployer/src/infra/mayastor.rs @@ -38,10 +38,25 @@ impl ComponentAction for Mayastor { } Ok(()) } + async fn wait_on( + &self, + options: &StartOptions, + cfg: &ComposeTest, + ) -> Result<(), Error> { + for i in 0 .. options.mayastors { + let mut hdl = + cfg.grpc_handle(&Self::name(i, options)).await.unwrap(); + hdl.mayastor + .list_nexus(rpc::mayastor::Null {}) + .await + .unwrap(); + } + Ok(()) + } } impl Mayastor { - fn name(i: u32, options: &StartOptions) -> String { + pub fn name(i: u32, options: &StartOptions) -> String { if options.mayastors == 1 { "mayastor".into() } else { diff --git a/control-plane/deployer/src/infra/mod.rs b/control-plane/deployer/src/infra/mod.rs index 341eb9ab3..ca5f34203 100644 --- a/control-plane/deployer/src/infra/mod.rs +++ b/control-plane/deployer/src/infra/mod.rs @@ -1,16 +1,9 @@ pub mod dns; mod empty; -pub mod jaeger; -pub mod mayastor; -pub mod nats; -pub mod rest; - -pub use ::nats::*; -pub use dns::*; -pub use empty::*; -pub use jaeger::*; -pub use mayastor::*; -pub use rest::*; +mod jaeger; +mod mayastor; +mod nats; +mod rest; use super::StartOptions; use async_trait::async_trait; @@ -20,27 +13,50 @@ use mbus_api::{ Message, }; use paste::paste; -use std::{cmp::Ordering, str::FromStr}; +use std::{cmp::Ordering, convert::TryFrom, str::FromStr}; use structopt::StructOpt; use strum::VariantNames; use strum_macros::{EnumVariantNames, ToString}; -pub(crate) type Error = Box; + +/// Error type used by the deployer +pub type Error = Box; #[macro_export] macro_rules! impl_ctrlp_agents { ($($name:ident,)+) => { + /// List of Control Plane Agents to deploy #[derive(Debug, Clone)] - pub(crate) struct ControlPlaneAgents(Vec); + pub struct ControlPlaneAgents(Vec); + + impl ControlPlaneAgents { + /// Get inner vector of ControlPlaneAgent's + pub fn into_inner(self) -> Vec { + self.0 + } + } + /// All the Control Plane Agents #[derive(Debug, Clone, StructOpt, ToString, EnumVariantNames)] #[structopt(about = "Control Plane Agents")] - pub(crate) enum ControlPlaneAgent { + pub enum ControlPlaneAgent { Empty(Empty), $( $name($name), )+ } + impl TryFrom> for ControlPlaneAgents { + type Error = String; + + fn try_from(src: Vec<&str>) -> Result { + let mut vec = vec![]; + for src in src { + vec.push(ControlPlaneAgent::from_str(src)?); + } + Ok(ControlPlaneAgents(vec)) + } + } + impl From<&ControlPlaneAgent> for Component { fn from(ctrlp_svc: &ControlPlaneAgent) -> Self { match ctrlp_svc { @@ -86,6 +102,9 @@ macro_rules! impl_ctrlp_agents { async fn start(&self, _options: &StartOptions, cfg: &ComposeTest) -> Result<(), Error> { let name = stringify!($name).to_ascii_lowercase(); cfg.start(&name).await?; + Ok(()) + } + async fn wait_on(&self, _options: &StartOptions, _cfg: &ComposeTest) -> Result<(), Error> { Liveness {}.request_on(ChannelVs::$name).await?; Ok(()) } @@ -99,12 +118,14 @@ macro_rules! impl_ctrlp_agents { #[macro_export] macro_rules! impl_ctrlp_operators { ($($name:ident,)+) => { + /// List of Control Plane Operators to deploy #[derive(Debug, Clone)] - pub(crate) struct ControlPlaneOperators(Vec); + pub struct ControlPlaneOperators(Vec); + /// All the Control Plane Operators #[derive(Debug, Clone, StructOpt, ToString, EnumVariantNames)] #[structopt(about = "Control Plane Operators")] - pub(crate) enum ControlPlaneOperator { + pub enum ControlPlaneOperator { Empty(Empty), $( $name(paste!{[<$name Op>]}), @@ -198,10 +219,7 @@ macro_rules! impl_ctrlp_operators { }; } -pub(crate) fn build_error( - name: &str, - status: Option, -) -> Result<(), Error> { +pub fn build_error(name: &str, status: Option) -> Result<(), Error> { let make_error = |extra: &str| { let error = format!("Failed to build {}: {}", name, extra); std::io::Error::new(std::io::ErrorKind::Other, error) @@ -216,19 +234,43 @@ pub(crate) fn build_error( } } +impl Components { + pub async fn start(&self, cfg: &ComposeTest) -> Result<(), Error> { + let mut last_done = None; + for component in &self.0 { + if let Some(last_done) = last_done { + if component.boot_order() == last_done { + continue; + } + } + let components = self + .0 + .iter() + .filter(|c| c.boot_order() == component.boot_order()); + for component in components { + component.start(&self.1, &cfg).await?; + } + last_done = Some(component.boot_order()); + } + Ok(()) + } +} + #[macro_export] macro_rules! impl_component { ($($name:ident,$order:literal,)+) => { + /// All the Control Plane Components #[derive(Debug, Clone, StructOpt, ToString, EnumVariantNames, Eq, PartialEq)] #[structopt(about = "Control Plane Components")] - pub(crate) enum Component { + pub enum Component { $( $name($name), )+ } + /// List of Control Plane Components to deploy #[derive(Debug, Clone)] - pub(crate) struct Components(Vec, StartOptions); + pub struct Components(Vec, StartOptions); impl BuilderConfigure for Components { fn configure(&self, cfg: Builder) -> Result { let mut cfg = cfg; @@ -240,13 +282,13 @@ macro_rules! impl_component { } impl Components { - pub(crate) fn push_generic_components(&mut self, name: &str, component: Component) { + pub fn push_generic_components(&mut self, name: &str, component: Component) { if !ControlPlaneAgent::VARIANTS.iter().any(|&s| s == name) && !ControlPlaneOperator::VARIANTS.iter().any(|&s| &format!("{}Op", s) == name) { self.0.push(component); } } - pub(crate) fn new(options: StartOptions) -> Components { + pub fn new(options: StartOptions) -> Components { let agents = options.agents.clone(); let operators = options.operators.clone().unwrap_or_default(); let mut components = agents @@ -263,18 +305,35 @@ macro_rules! impl_component { components.0.sort(); components } - pub(crate) async fn start(&self, cfg: &ComposeTest) -> Result<(), Error> { + pub async fn wait_on( + &self, + cfg: &ComposeTest, + timeout: std::time::Duration, + ) -> Result<(), Error> { + match tokio::time::timeout(timeout, self.wait_on_inner(cfg)).await { + Ok(result) => result, + Err(_) => { + let error = format!("Timed out of {:?} expired", timeout); + Err(std::io::Error::new(std::io::ErrorKind::TimedOut, error).into()) + } + } + } + async fn wait_on_inner(&self, cfg: &ComposeTest) -> Result<(), Error> { for component in &self.0 { - component.start(&self.1, cfg).await?; + component.wait_on(&self.1, cfg).await?; } Ok(()) } } + /// Trait to manage a component startup sequence #[async_trait] - pub(crate) trait ComponentAction { + pub trait ComponentAction { fn configure(&self, options: &StartOptions, cfg: Builder) -> Result; async fn start(&self, options: &StartOptions, cfg: &ComposeTest) -> Result<(), Error>; + async fn wait_on(&self, _options: &StartOptions, _cfg: &ComposeTest) -> Result<(), Error> { + Ok(()) + } } #[async_trait] @@ -289,6 +348,11 @@ macro_rules! impl_component { $(Self::$name(obj) => obj.start(options, cfg).await,)+ } } + async fn wait_on(&self, options: &StartOptions, cfg: &ComposeTest) -> Result<(), Error> { + match self { + $(Self::$name(obj) => obj.wait_on(options, cfg).await,)+ + } + } } $(impl From<$name> for Component { @@ -297,8 +361,9 @@ macro_rules! impl_component { } })+ + /// Control Plane Component $(#[derive(Default, Debug, Clone, StructOpt, Eq, PartialEq)] - pub(crate) struct $name {})+ + pub struct $name {})+ impl Component { fn boot_order(&self) -> u32 { @@ -328,16 +393,18 @@ macro_rules! impl_component { // from lower to high impl_component! { Empty, 0, - Dns, 0, - Jaeger, 0, + // Note: NATS needs to be the first to support usage of this library in cargo tests + // to make sure that the IP does not change between tests Nats, 0, - Rest, 1, - Mayastor, 1, - Node, 2, - Pool, 3, - Volume, 3, - JsonGrpc, 3, - NodeOp, 4, + Dns, 1, + Jaeger, 1, + Rest, 2, + Node, 3, + Pool, 4, + Volume, 4, + JsonGrpc, 4, + Mayastor, 5, + NodeOp, 6, } // Message Bus Control Plane Agents diff --git a/control-plane/deployer/src/bin.rs b/control-plane/deployer/src/lib.rs similarity index 73% rename from control-plane/deployer/src/bin.rs rename to control-plane/deployer/src/lib.rs index be8bc3107..ea9d17463 100644 --- a/control-plane/deployer/src/bin.rs +++ b/control-plane/deployer/src/lib.rs @@ -1,11 +1,13 @@ pub mod infra; -use composer::Builder; use infra::*; + +use composer::Builder; +use std::convert::TryInto; use structopt::StructOpt; #[derive(Debug, StructOpt)] -struct CliArgs { +pub struct CliArgs { #[structopt(subcommand)] action: Action, } @@ -22,31 +24,35 @@ const DEFAULT_CLUSTER_NAME: &str = "cluster"; #[derive(Debug, StructOpt)] #[structopt(about = "Stop and delete all components")] -pub(crate) struct StopOptions { +pub struct StopOptions { /// Name of the cluster #[structopt(short, long, default_value = DEFAULT_CLUSTER_NAME)] - cluster_name: String, + pub cluster_name: String, } #[derive(Debug, Default, StructOpt)] #[structopt(about = "List all running components")] -pub(crate) struct ListOptions { +pub struct ListOptions { /// Simple list without using the docker executable #[structopt(short, long)] - no_docker: bool, + pub no_docker: bool, /// Format the docker output #[structopt(short, long, conflicts_with = "no_docker")] - format: Option, + pub format: Option, /// Name of the cluster #[structopt(short, long, default_value = DEFAULT_CLUSTER_NAME)] - cluster_name: String, + pub cluster_name: String, } -#[derive(Debug, Clone, StructOpt)] +pub fn default_agents() -> &'static str { + "Node, Pool, Volume" +} + +#[derive(Debug, Default, Clone, StructOpt)] #[structopt(about = "Create and start all components")] -pub(crate) struct StartOptions { +pub struct StartOptions { /// Use the following Control Plane Agents /// Specify one agent at a time or as a list. /// ( "" for no agents ) @@ -54,57 +60,99 @@ pub(crate) struct StartOptions { #[structopt( short, long, - default_value = "Node, Pool, Volume", + default_value = default_agents(), value_delimiter = "," )] - agents: Vec, + pub agents: Vec, /// Use the following Control Plane Operators /// Specify one operator at a time or as a list #[structopt(short, long, value_delimiter = ",")] - operators: Option>, + pub operators: Option>, /// Kubernetes Config file if using operators /// [default: "~/.kube/config"] #[structopt(short, long)] - kube_config: Option, + pub kube_config: Option, /// Use a base image for the binary components (eg: alpine:latest) #[structopt(long)] - base_image: Option, + pub base_image: Option, /// Use a jaeger tracing service #[structopt(short, long)] - jaeger: bool, + pub jaeger: bool, /// Disable the REST Server #[structopt(long)] - no_rest: bool, + pub no_rest: bool, /// Use `N` mayastor instances #[structopt(short, long, default_value = "1")] - mayastors: u32, + pub mayastors: u32, /// Use this custom image for the jaeger tracing service #[structopt(long, requires = "jaeger")] - jaeger_image: Option, + pub jaeger_image: Option, /// Cargo Build each component before deploying #[structopt(short, long)] - build: bool, + pub build: bool, /// Use a dns resolver for the cluster: defreitas/dns-proxy-server /// Note this messes with your /etc/resolv.conf so use at your own risk #[structopt(short, long)] - dns: bool, + pub dns: bool, /// Show information from the cluster after creation #[structopt(short, long)] - show_info: bool, + pub show_info: bool, /// Name of the cluster - currently only one allowed at a time #[structopt(short, long, default_value = DEFAULT_CLUSTER_NAME)] - cluster_name: String, + pub cluster_name: String, +} + +impl StartOptions { + pub fn with_agents(mut self, agents: Vec<&str>) -> Self { + let agents: ControlPlaneAgents = agents.try_into().unwrap(); + self.agents = agents.into_inner(); + self + } + pub fn with_jaeger(mut self, jaeger: bool) -> Self { + self.jaeger = jaeger; + self + } + pub fn with_build(mut self, build: bool) -> Self { + self.build = build; + self + } + pub fn with_mayastors(mut self, mayastors: i32) -> Self { + self.mayastors = mayastors as u32; + self + } + pub fn with_show_info(mut self, show_info: bool) -> Self { + self.show_info = show_info; + self + } + pub fn with_cluster_name(mut self, cluster_name: &str) -> Self { + self.cluster_name = cluster_name.to_string(); + self + } + pub fn with_base_image( + mut self, + base_image: impl Into>, + ) -> Self { + self.base_image = base_image.into(); + self + } +} + +impl CliArgs { + /// Act upon the requested action + pub async fn act(&self) -> Result<(), Error> { + self.action.act().await + } } impl Action { @@ -130,6 +178,7 @@ impl StartOptions { .await?; components.start(&composer).await?; + if self.show_info { let lister = ListOptions { cluster_name: self.cluster_name.clone(), @@ -168,15 +217,16 @@ impl ListOptions { std::process::Command::new("docker").args(args).status()?; build_error("docker", status.code()) } - async fn list_simple(&self) -> Result<(), Error> { + /// Simple listing of all started components + pub async fn list_simple(&self) -> Result<(), Error> { let cfg = Builder::new() .name(&self.cluster_name) - .with_prune(false) + .with_reuse(true) .with_clean(false) .build() .await?; - for component in cfg.list_containers().await? { + for component in cfg.list_cluster_containers().await? { let ip = match component.network_settings.clone() { None => None, Some(networks) => match networks.networks { @@ -214,11 +264,3 @@ fn option_str(input: Option) -> String { None => "?".into(), } } - -#[tokio::main] -async fn main() -> Result<(), Error> { - let cli_args = CliArgs::from_args(); - println!("Using options: {:?}", &cli_args); - - cli_args.action.act().await -} From ab0d429f9f54902fb2decdc8620c25352b24e810 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 11 Feb 2021 18:14:19 +0000 Subject: [PATCH 05/78] fix(mayastor): fallible replica share (CAS-714) Make replica share protocol conversion from gRPC fallible as it was causing mayastor to crash on the REST tests. --- mayastor/src/core/share.rs | 29 +++++++++++++++-------------- mayastor/src/grpc/pool_grpc.rs | 11 +++++++---- mayastor/src/lvs/error.rs | 2 ++ 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/mayastor/src/core/share.rs b/mayastor/src/core/share.rs index 3ed6e3fe6..722e56902 100644 --- a/mayastor/src/core/share.rs +++ b/mayastor/src/core/share.rs @@ -1,6 +1,7 @@ +use crate::lvs::Error; use async_trait::async_trait; use pin_utils::core_reexport::fmt::Formatter; -use std::fmt::Display; +use std::{convert::TryFrom, fmt::Display}; #[derive(Debug, PartialOrd, PartialEq)] /// Indicates what protocol the bdev is shared as @@ -13,19 +14,19 @@ pub enum Protocol { Iscsi, } -impl From for Protocol { - fn from(p: i32) -> Self { - match p { - 0 => Self::Off, - 1 => Self::Nvmf, - 2 => Self::Iscsi, - // we have to handle the whole range of u32 here - // We panic here because the gRPC interface should - // have never deserialized into something that is invalid. A - // different approach is would be to set this to - // something that is invalid but that would - // open the flood gates, to much and leak into the code base. - _ => panic!("invalid share value"), +impl TryFrom for Protocol { + type Error = Error; + + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(Self::Off), + 1 => Ok(Self::Nvmf), + 2 => Ok(Self::Iscsi), + // the gRPC code does not validate enum's so we have + // to do it here + _ => Err(Error::ReplicaShareProtocol { + value, + }), } } } diff --git a/mayastor/src/grpc/pool_grpc.rs b/mayastor/src/grpc/pool_grpc.rs index 064007e8e..2d9982712 100644 --- a/mayastor/src/grpc/pool_grpc.rs +++ b/mayastor/src/grpc/pool_grpc.rs @@ -135,7 +135,10 @@ pub async fn create_replica(args: CreateReplicaRequest) -> GrpcResult { return Ok(Response::new(Replica::from(lvol))); } - if !matches!(Protocol::from(args.share), Protocol::Off | Protocol::Nvmf) { + if !matches!( + Protocol::try_from(args.share)?, + Protocol::Off | Protocol::Nvmf + ) { return Err(Status::invalid_argument(format!( "invalid protocol {}", args.share @@ -145,7 +148,7 @@ pub async fn create_replica(args: CreateReplicaRequest) -> GrpcResult { rpc_call(async move { let p = Lvs::lookup(&args.pool).unwrap(); match p.create_lvol(&args.uuid, args.size, false).await { - Ok(lvol) if Protocol::from(args.share) == Protocol::Nvmf => { + Ok(lvol) if Protocol::try_from(args.share)? == Protocol::Nvmf => { match lvol.share_nvmf().await { Ok(s) => { debug!("created and shared {} as {}", lvol, s); @@ -216,12 +219,12 @@ pub async fn share_replica( let lvol = Lvol::try_from(b)?; // if we are already shared return OK - if lvol.shared() == Some(Protocol::from(args.share)) { + if lvol.shared() == Some(Protocol::try_from(args.share)?) { return Ok(ShareReplicaReply { uri: lvol.share_uri().unwrap(), }); } - match Protocol::from(args.share) { + match Protocol::try_from(args.share)? { Protocol::Off => { lvol.unshare().await.map(|_| ShareReplicaReply { uri: format!("bdev:///{}", lvol.name()), diff --git a/mayastor/src/lvs/error.rs b/mayastor/src/lvs/error.rs index a77fd6740..0ef1ddc69 100644 --- a/mayastor/src/lvs/error.rs +++ b/mayastor/src/lvs/error.rs @@ -68,4 +68,6 @@ pub enum Error { SyncProperty { source: Errno, name: String }, #[snafu(display("invalid property value: {}", name))] Property { source: Errno, name: String }, + #[snafu(display("invalid replica share protocol value: {}", value))] + ReplicaShareProtocol { value: i32 }, } From 0e8332e28daca229676f7d24467761770ddccf97 Mon Sep 17 00:00:00 2001 From: Glenn Bullingham <35334134+GlennBullingham@users.noreply.github.com> Date: Sat, 13 Feb 2021 22:28:27 +0000 Subject: [PATCH 06/78] chore(release): revert k8s deployment config files (#713) Set mayastor image tags back to 'develop', after merging back from master branch following GitFlow release of v0.7.1 --- deploy/csi-daemonset.yaml | 2 +- deploy/mayastor-daemonset.yaml | 2 +- deploy/moac-deployment.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index 6d340e705..43b8a4cc1 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -30,7 +30,7 @@ spec: # the same. containers: - name: mayastor-csi - image: mayadata/mayastor-csi:v0.7.1 + image: mayadata/mayastor-csi:develop imagePullPolicy: Always # we need privileged because we mount filesystems and use mknod securityContext: diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 0982779e9..bc2b85e41 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -33,7 +33,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:v0.7.1 + image: mayadata/mayastor:develop imagePullPolicy: Always env: - name: MY_NODE_NAME diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index 3d09e47e3..61450ae78 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -45,7 +45,7 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ - name: moac - image: mayadata/moac:v0.7.1 + image: mayadata/moac:develop imagePullPolicy: Always args: - "--csi-address=$(CSI_ENDPOINT)" From cb095e791de5997b704fe5988a4b7ecadde3b102 Mon Sep 17 00:00:00 2001 From: shubham Date: Sun, 14 Feb 2021 15:34:44 +0530 Subject: [PATCH 07/78] fix: correct dead links in build docs Signed-off-by: shubham --- doc/build.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build.md b/doc/build.md index 80c4ee56e..31c53bbdf 100644 --- a/doc/build.md +++ b/doc/build.md @@ -133,7 +133,7 @@ There are a few ways to build Mayastor! If you're hacking on Mayastor, it's best > [Nix paper][nix-paper]. ### Building non-portable Nix derivations -You can build release binaries of Mayastor with [`nix build`](nix-build): +You can build release binaries of Mayastor with [`nix build`][nix-build]: ```bash for PKG in moac mayastor; do @@ -173,7 +173,7 @@ done ### Building Docker images -Build the Docker images with [`nix build`](nix-build): +Build the Docker images with [`nix build`][nix-build]: ```bash for IMAGE in \ From 305292c278b22842c67bc76c14efe11deeab2d97 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Mon, 8 Feb 2021 11:18:02 +0000 Subject: [PATCH 08/78] ci: generate logs as jenkins artefacts CAS-692 --- Jenkinsfile | 3 ++- scripts/e2e-cluster-dump.sh | 4 ++++ scripts/e2e-test.sh | 34 ++++++++++++++++++++++++---------- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index dfccdbf6d..fa4a8633a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -224,7 +224,7 @@ pipeline { ) sh 'kubectl get nodes -o wide' script { - def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\"" + def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\" --logs --logsdir \"./logs/mayastor\" " if (run_extended_e2e_tests) { cmd = cmd + " --extended" } @@ -263,6 +263,7 @@ pipeline { } } always { // always send the junit results back to Xray and Jenkins + archiveArtifacts 'logs/**/*.*' junit 'e2e.*.xml' script { def xray_testplan = getTestPlan() diff --git a/scripts/e2e-cluster-dump.sh b/scripts/e2e-cluster-dump.sh index 912777cdf..ad0755111 100755 --- a/scripts/e2e-cluster-dump.sh +++ b/scripts/e2e-cluster-dump.sh @@ -194,7 +194,11 @@ function getLogs { then cluster-get >& "$dest/cluster.get.txt" cluster-describe >& "$dest/cluster.describe.txt" + echo "logfiles generated in $dest" + ls -l "$dest" + echo "" + else cluster-get cluster-describe diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 5e60d94bc..dd783f7d1 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -18,14 +18,16 @@ TESTS="install basic_volume_io csi resource_check uninstall" EXTENDED_TESTS="" # Global state variables -tests="" -run_extended_tests= +# test configuration state variables device= registry= tag="ci" -generate_logs=0 +# sript state variables +tests="" +run_extended_tests= on_fail="stop" uninstall_cleanup="n" +generate_logs=0 logsdir="" help() { @@ -86,6 +88,9 @@ while [ "$#" -gt 0 ]; do --logsdir) shift logsdir="$1" + if [[ "${logsdir:0:1}" == '.' ]]; then + logsdir="$PWD/$logsdir" + fi ;; -e|--extended) run_extended_tests=1 @@ -162,19 +167,22 @@ test_failed=0 # Run go test in directory specified as $1 (relative path) function runGoTest { - cd "$TESTDIR" + pushd "$TESTDIR" echo "Running go test in $PWD/\"$1\"" if [ -z "$1" ] || [ ! -d "$1" ]; then echo "Unable to locate test directory $PWD/\"$1\"" + popd return 1 fi cd "$1" if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0; then generate_logs=1 + popd return 1 fi + popd return 0 } @@ -189,8 +197,14 @@ echo " e2e_image_tag=$e2e_image_tag" echo " e2e_docker_registry=$e2e_docker_registry" echo " e2e_reports_dir=$e2e_reports_dir" echo " e2e_uninstall_cleanup=$e2e_uninstall_cleanup" - - +echo "" +echo "Script control settings:" +echo " run_extended_tests=$run_extended_tests" +echo " on_fail=$on_fail" +echo " uninstall_cleanup=$uninstall_cleanup" +echo " generate_logs=$generate_logs" +echo " logsdir=$logsdir" +echo "" echo "list of tests: $tests" for testname in $tests; do # defer uninstall till after other tests have been run. @@ -201,7 +215,7 @@ for testname in $tests; do break fi - if ! ("$SCRIPTDIR"/e2e_check_pod_restarts.sh) ; then + if ! ("$SCRIPTDIR/e2e_check_pod_restarts.sh") ; then echo "Test \"$testname\" Failed!! mayastor pods were restarted." test_failed=1 generate_logs=1 @@ -213,12 +227,12 @@ done if [ "$generate_logs" -ne 0 ]; then if [ -n "$logsdir" ]; then - if ! "$SCRIPTDIR"/e2e-cluster-dump.sh --destdir "$logsdir" ; then + if ! "$SCRIPTDIR/e2e-cluster-dump.sh" --destdir "$logsdir" ; then # ignore failures in the dump script : fi else - if ! "$SCRIPTDIR"/e2e-cluster-dump.sh ; then + if ! "$SCRIPTDIR/e2e-cluster-dump.sh" ; then # ignore failures in the dump script : fi @@ -236,7 +250,7 @@ if contains "$tests" "uninstall" ; then test_failed=1 # Dump to the screen only, we do NOT want to overwrite # logfiles that may have been generated. - if ! "$SCRIPTDIR"/e2e-cluster-dump.sh --clusteronly ; then + if ! "$SCRIPTDIR/e2e-cluster-dump.sh" --clusteronly ; then # ignore failures in the dump script : fi From 5d48d78664027efe493ebc39999649991eb2b319 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Mon, 15 Feb 2021 17:33:36 +0000 Subject: [PATCH 09/78] feat: build against SPDK 21.01 Sync with upstream changes to spdk_bdev_opts (buffer pool size), spdk_app_opts_init, and spdk_nvmf_subsystem_pause, which now has the ability to pause individual namespace so we pause namespace 1 which is all Mayastor uses. Includes a fix in openebs/spdk for upstream spdk/spdk@95d8e7a which delays bdev_register notification until after the examine completes to avoid a null pointer dereference if the bdev was destroyed too soon after being registered. Fixes the segfault in the core_4 cargo test, though only if it is run after core_2. Also includes a reversion of openebs/spdk@d2704a0 which causes the nbd device to sometimes hang on disconnection in the core_5 test. --- mayastor/src/bin/spdk.rs | 5 ++++- mayastor/src/subsys/config/opts.rs | 12 ++++++++++++ mayastor/src/subsys/nvmf/subsystem.rs | 1 + nix/pkgs/libspdk/default.nix | 6 +++--- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/mayastor/src/bin/spdk.rs b/mayastor/src/bin/spdk.rs index f7e8cf117..23f738986 100644 --- a/mayastor/src/bin/spdk.rs +++ b/mayastor/src/bin/spdk.rs @@ -39,7 +39,10 @@ fn main() -> Result<(), std::io::Error> { let mut opts: spdk_app_opts = Default::default(); unsafe { - spdk_app_opts_init(&mut opts as *mut spdk_app_opts); + spdk_app_opts_init( + &mut opts as *mut spdk_app_opts, + std::mem::size_of::() as u64, + ); if spdk_app_parse_args( (c_args.len() as c_int) - 1, diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index 05cc97b73..b14bd7476 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -332,6 +332,10 @@ pub struct BdevOpts { bdev_io_pool_size: u32, /// number of bdev IO structures cached per thread bdev_io_cache_size: u32, + /// small buffer pool size + small_buf_pool_size: u32, + /// large buffer pool size + large_buf_pool_size: u32, } impl GetOpts for BdevOpts { @@ -340,6 +344,7 @@ impl GetOpts for BdevOpts { unsafe { spdk_sys::spdk_bdev_get_opts( &opts as *const _ as *mut spdk_bdev_opts, + std::mem::size_of::() as u64, ) }; opts.into() @@ -359,6 +364,8 @@ impl Default for BdevOpts { Self { bdev_io_pool_size: try_from_env("BDEV_IO_POOL_SIZE", 65535), bdev_io_cache_size: try_from_env("BDEV_IO_CACHE_SIZE", 512), + small_buf_pool_size: try_from_env("BDEV_SMALL_BUF_POOL_SIZE", 8191), + large_buf_pool_size: try_from_env("BDEV_LARGE_BUF_POOL_SIZE", 1023), } } } @@ -368,6 +375,8 @@ impl From for BdevOpts { Self { bdev_io_pool_size: o.bdev_io_pool_size, bdev_io_cache_size: o.bdev_io_cache_size, + small_buf_pool_size: o.small_buf_pool_size, + large_buf_pool_size: o.large_buf_pool_size, } } } @@ -378,6 +387,9 @@ impl From<&BdevOpts> for spdk_bdev_opts { bdev_io_pool_size: o.bdev_io_pool_size, bdev_io_cache_size: o.bdev_io_cache_size, bdev_auto_examine: false, + opts_size: std::mem::size_of::() as u64, + small_buf_pool_size: o.small_buf_pool_size, + large_buf_pool_size: o.large_buf_pool_size, } } } diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 64ffed88e..3a21913b7 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -398,6 +398,7 @@ impl NvmfSubsystem { unsafe { spdk_nvmf_subsystem_pause( self.0.as_ptr(), + 1, Some(pause_cb), cb_arg(s), ) diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index d7f4d18b4..0bc15b549 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -19,13 +19,13 @@ let # Derivation attributes for production version of libspdk drvAttrs = rec { - version = "21.01-pre"; + version = "21.01"; src = fetchFromGitHub { owner = "openebs"; repo = "spdk"; - rev = "285a96fb4bd5fb53876635ec86ebe55089b1ffde"; - sha256 = "0bn40y28iafma19q7fh15ga651d7bcpx85ih5lyi4azvb0l0zjqv"; + rev = "37164626e403cca75afac7e8a47cd53b730bc921"; + sha256 = "0gkdqqs990hgblz0rlkg8355klxnxi2cdvy5p6ws9nqz8cxwrg14"; #sha256 = stdenv.lib.fakeSha256; fetchSubmodules = true; }; From f1546a97ea4f3f9e31f29d750a0960cc84e77046 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 11 Feb 2021 18:20:04 +0000 Subject: [PATCH 10/78] feat(rest-test): new rest test project These are the initial bits of a new test project which leverages the new control plane to test mayastor. It has already revealed a few issues. --- Cargo.lock | 17 ++ Cargo.toml | 3 +- composer/src/lib.rs | 130 ++++++-- control-plane/deployer/bin/src/deployer.rs | 2 +- control-plane/deployer/src/infra/mod.rs | 40 ++- control-plane/deployer/src/lib.rs | 6 +- control-plane/mbus-api/Cargo.toml | 1 + control-plane/mbus-api/src/mbus_nats.rs | 11 +- control-plane/mbus-api/src/v0.rs | 17 +- control-plane/rest/src/lib.rs | 64 ++-- control-plane/tests/Cargo.toml | 22 ++ control-plane/tests/tests/common/mod.rs | 329 +++++++++++++++++++++ control-plane/tests/tests/nexus.rs | 25 ++ control-plane/tests/tests/pools.rs | 218 ++++++++++++++ control-plane/tests/tests/replicas.rs | 131 ++++++++ nix/pkgs/control-plane/cargo-project.nix | 2 +- nix/pkgs/mayastor/default.nix | 2 +- scripts/cargo-test.sh | 6 +- scripts/ctrlp-cargo-test.sh | 22 ++ 19 files changed, 982 insertions(+), 66 deletions(-) create mode 100644 control-plane/tests/Cargo.toml create mode 100644 control-plane/tests/tests/common/mod.rs create mode 100644 control-plane/tests/tests/nexus.rs create mode 100644 control-plane/tests/tests/pools.rs create mode 100644 control-plane/tests/tests/replicas.rs create mode 100755 scripts/ctrlp-cargo-test.sh diff --git a/Cargo.lock b/Cargo.lock index 3c9391f47..c2e2772d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1288,6 +1288,22 @@ dependencies = [ "sct", ] +[[package]] +name = "ctrlp-tests" +version = "0.1.0" +dependencies = [ + "actix-rt", + "actix-web-opentelemetry", + "anyhow", + "composer", + "deployer", + "opentelemetry", + "opentelemetry-jaeger", + "rest", + "tracing", + "tracing-opentelemetry", +] + [[package]] name = "curve25519-dalek" version = "3.0.0" @@ -2668,6 +2684,7 @@ dependencies = [ "tracing", "tracing-futures", "tracing-subscriber", + "uuid", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 5a901b448..06bdc6388 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,5 +20,6 @@ members = [ "control-plane/rest", "control-plane/operators", "control-plane/macros", - "control-plane/deployer" + "control-plane/deployer", + "control-plane/tests" ] diff --git a/composer/src/lib.rs b/composer/src/lib.rs index 9f1e65e03..b25912103 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -113,11 +113,11 @@ impl Binary { let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); let srcdir = path.parent().unwrap().to_string_lossy(); - Self::new(format!("{}/target/debug/{}", srcdir, name), vec![]) + Self::new(&format!("{}/target/debug/{}", srcdir, name), vec![]) } /// Setup nix shell binary from path and arguments pub fn from_nix(name: &str) -> Self { - Self::new(Self::which(name).expect("binary should exist"), vec![]) + Self::new(name, vec![]) } /// Add single argument /// Only one argument can be passed per use. So instead of: @@ -171,11 +171,17 @@ impl Binary { } fn which(name: &str) -> std::io::Result { let output = std::process::Command::new("which").arg(name).output()?; + if !output.status.success() { + return Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + name, + )); + } Ok(String::from_utf8_lossy(&output.stdout).trim().into()) } - fn new(path: String, args: Vec) -> Self { + fn new(path: &str, args: Vec) -> Self { Self { - path, + path: Self::which(path).expect("Binary path should exist!"), arguments: args, ..Default::default() } @@ -310,6 +316,10 @@ pub struct Builder { containers: Vec, /// the network for the tests used network: String, + /// reuse existing containers + reuse: bool, + /// allow cleaning up on a panic (if clean is true) + allow_clean_on_panic: bool, /// delete the container and network when dropped clean: bool, /// destroy existing containers if any @@ -343,6 +353,8 @@ impl Builder { name: TEST_NET_NAME.to_string(), containers: Default::default(), network: "10.1.0.0/16".to_string(), + reuse: false, + allow_clean_on_panic: true, clean: true, prune: true, autorun: true, @@ -420,8 +432,7 @@ impl Builder { } /// add a generic container which runs a local binary - pub fn add_container_bin(self, name: &str, mut bin: Binary) -> Builder { - bin.setup_nats(&self.name); + pub fn add_container_bin(self, name: &str, bin: Binary) -> Builder { self.add_container_spec(ContainerSpec::from_binary(name, bin)) } @@ -430,12 +441,25 @@ impl Builder { self.add_container_spec(ContainerSpec::from_binary(name, image)) } + /// attempt to reuse and restart containers instead of starting new ones + pub fn with_reuse(mut self, reuse: bool) -> Builder { + self.reuse = reuse; + self.prune = !reuse; + self + } + /// clean on drop? pub fn with_clean(mut self, enable: bool) -> Builder { self.clean = enable; self } + /// allow clean on panic if clean is set + pub fn with_clean_on_panic(mut self, enable: bool) -> Builder { + self.allow_clean_on_panic = enable; + self + } + /// prune containers and networks on start pub fn with_prune(mut self, enable: bool) -> Builder { self.prune = enable; @@ -463,14 +487,16 @@ impl Builder { } /// setup tracing for the cargo test code with `filter` + /// ignore when called multiple times pub fn with_tracing(self, filter: &str) -> Self { - if let Ok(filter) = + let builder = if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); + tracing_subscriber::fmt().with_env_filter(filter) } else { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } + tracing_subscriber::fmt().with_env_filter(filter) + }; + builder.try_init().ok(); self } @@ -517,6 +543,8 @@ impl Builder { containers: Default::default(), ipam, label_prefix: "io.mayastor.test".to_string(), + reuse: self.reuse, + allow_clean_on_panic: self.allow_clean_on_panic, clean: self.clean, prune: self.prune, image: self.image, @@ -526,14 +554,37 @@ impl Builder { compose.network_id = compose.network_create().await.map_err(|e| e.to_string())?; - // containers are created where the IPs are ordinal - for (i, spec) in self.containers.iter().enumerate() { - compose - .create_container( - spec, - &net.nth((i + 2) as u32).unwrap().to_string(), - ) - .await?; + if self.reuse { + let containers = + compose.list_network_containers(&self.name).await?; + + for container in containers { + let networks = container + .network_settings + .unwrap_or_default() + .networks + .unwrap_or_default(); + if let Some(n) = container.names.unwrap_or_default().first() { + if let Some(endpoint) = networks.get(&self.name) { + if let Some(ip) = endpoint.ip_address.clone() { + compose.containers.insert( + n[1 ..].into(), + (container.id.unwrap_or_default(), ip.parse()?), + ); + } + } + } + } + } else { + // containers are created where the IPs are ordinal + for (i, spec) in self.containers.iter().enumerate() { + compose + .create_container( + spec, + &net.nth((i + 2) as u32).unwrap().to_string(), + ) + .await?; + } } Ok(compose) @@ -569,6 +620,10 @@ pub struct ComposeTest { /// prefix for labels set on containers and networks /// $prefix.name = $name will be created automatically label_prefix: String, + /// reuse existing containers + reuse: bool, + /// allow cleaning up on a panic (if clean is set) + allow_clean_on_panic: bool, /// automatically clean up the things we have created for this test clean: bool, /// remove existing containers upon creation @@ -591,10 +646,10 @@ impl Drop for ComposeTest { }); } - if self.clean { + if self.clean && (!thread::panicking() || self.allow_clean_on_panic) { self.containers.keys().for_each(|c| { std::process::Command::new("docker") - .args(&["stop", c]) + .args(&["kill", c]) .output() .unwrap(); std::process::Command::new("docker") @@ -723,7 +778,7 @@ impl ComposeTest { } /// list containers - pub async fn list_containers( + pub async fn list_cluster_containers( &self, ) -> Result, Error> { self.docker @@ -974,9 +1029,11 @@ impl ComposeTest { ), }, )?; - self.docker - .start_container::<&str>(id.0.as_str(), None) - .await?; + if !self.reuse { + self.docker + .start_container::<&str>(id.0.as_str(), None) + .await?; + } Ok(()) } @@ -1010,11 +1067,16 @@ impl ComposeTest { /// restart the container pub async fn restart(&self, name: &str) -> Result<(), Error> { - let id = self.containers.get(name).unwrap(); + let (id, _) = self.containers.get(name).unwrap(); + self.restart_id(id.as_str()).await + } + + /// restart the container id + pub async fn restart_id(&self, id: &str) -> Result<(), Error> { if let Err(e) = self .docker .restart_container( - id.0.as_str(), + id, Some(RestartContainerOptions { t: 3, }), @@ -1115,6 +1177,22 @@ impl ComposeTest { result } + /// restart all the containers part of the network + /// returns the last error, if any or Ok + pub async fn restart_network_containers(&self) -> Result<(), Error> { + let mut result = Ok(()); + let containers = self.list_network_containers(&self.name).await?; + for container in containers { + if let Some(id) = container.id { + if let Err(e) = self.restart_id(&id).await { + println!("Failed to restart container id {:?}", id); + result = Err(e); + } + } + } + result + } + /// inspect the given container pub async fn inspect( &self, diff --git a/control-plane/deployer/bin/src/deployer.rs b/control-plane/deployer/bin/src/deployer.rs index 052e83960..90841692b 100644 --- a/control-plane/deployer/bin/src/deployer.rs +++ b/control-plane/deployer/bin/src/deployer.rs @@ -6,5 +6,5 @@ async fn main() -> Result<(), Error> { let cli_args = CliArgs::from_args(); println!("Using options: {:?}", &cli_args); - cli_args.act().await + cli_args.execute().await } diff --git a/control-plane/deployer/src/infra/mod.rs b/control-plane/deployer/src/infra/mod.rs index ca5f34203..1d7a3f85b 100644 --- a/control-plane/deployer/src/infra/mod.rs +++ b/control-plane/deployer/src/infra/mod.rs @@ -235,6 +235,20 @@ pub fn build_error(name: &str, status: Option) -> Result<(), Error> { } impl Components { + pub async fn start_wait( + &self, + cfg: &ComposeTest, + timeout: std::time::Duration, + ) -> Result<(), Error> { + match tokio::time::timeout(timeout, self.start_wait_inner(cfg)).await { + Ok(result) => result, + Err(_) => { + let error = format!("Time out of {:?} expired", timeout); + Err(std::io::Error::new(std::io::ErrorKind::TimedOut, error) + .into()) + } + } + } pub async fn start(&self, cfg: &ComposeTest) -> Result<(), Error> { let mut last_done = None; for component in &self.0 { @@ -254,6 +268,30 @@ impl Components { } Ok(()) } + async fn start_wait_inner(&self, cfg: &ComposeTest) -> Result<(), Error> { + let mut last_done = None; + for component in &self.0 { + if let Some(last_done) = last_done { + if component.boot_order() == last_done { + continue; + } + } + let components = self + .0 + .iter() + .filter(|c| c.boot_order() == component.boot_order()) + .collect::>(); + + for component in &components { + component.start(&self.1, &cfg).await?; + } + for component in &components { + component.wait_on(&self.1, &cfg).await?; + } + last_done = Some(component.boot_order()); + } + Ok(()) + } } #[macro_export] @@ -313,7 +351,7 @@ macro_rules! impl_component { match tokio::time::timeout(timeout, self.wait_on_inner(cfg)).await { Ok(result) => result, Err(_) => { - let error = format!("Timed out of {:?} expired", timeout); + let error = format!("Time out of {:?} expired", timeout); Err(std::io::Error::new(std::io::ErrorKind::TimedOut, error).into()) } } diff --git a/control-plane/deployer/src/lib.rs b/control-plane/deployer/src/lib.rs index ea9d17463..dd524f392 100644 --- a/control-plane/deployer/src/lib.rs +++ b/control-plane/deployer/src/lib.rs @@ -150,13 +150,13 @@ impl StartOptions { impl CliArgs { /// Act upon the requested action - pub async fn act(&self) -> Result<(), Error> { - self.action.act().await + pub async fn execute(&self) -> Result<(), Error> { + self.action.execute().await } } impl Action { - async fn act(&self) -> Result<(), Error> { + async fn execute(&self) -> Result<(), Error> { match self { Action::Start(options) => options.start(self).await, Action::Stop(options) => options.stop(self).await, diff --git a/control-plane/mbus-api/Cargo.toml b/control-plane/mbus-api/Cargo.toml index 47b551b32..027b730c2 100644 --- a/control-plane/mbus-api/Cargo.toml +++ b/control-plane/mbus-api/Cargo.toml @@ -23,6 +23,7 @@ tracing-futures = "0.2.4" tracing-subscriber = "0.2.0" paperclip = { version = "0.5.0", features = ["actix3"] } percent-encoding = "2.1.0" +uuid = { version = "0.7", features = ["v4"] } [dev-dependencies] composer = { path = "../../composer" } diff --git a/control-plane/mbus-api/src/mbus_nats.rs b/control-plane/mbus-api/src/mbus_nats.rs index 8c2d79668..340b0a105 100644 --- a/control-plane/mbus-api/src/mbus_nats.rs +++ b/control-plane/mbus-api/src/mbus_nats.rs @@ -31,15 +31,16 @@ pub async fn message_bus_init(server: String) { } /// Initialise the Nats Message Bus with Options +/// IGNORES all but the first initialisation of NATS_MSG_BUS pub async fn message_bus_init_options( server: String, timeouts: TimeoutOptions, ) { - let nc = NatsMessageBus::new(&server, BusOptions::new(), timeouts).await; - NATS_MSG_BUS - .set(nc) - .ok() - .expect("Expect to be initialised only once"); + if NATS_MSG_BUS.get().is_none() { + let nc = + NatsMessageBus::new(&server, BusOptions::new(), timeouts).await; + NATS_MSG_BUS.set(nc).ok(); + } } /// Get the static `NatsMessageBus` as a boxed `MessageBus` diff --git a/control-plane/mbus-api/src/v0.rs b/control-plane/mbus-api/src/v0.rs index 5e754cece..1d299be78 100644 --- a/control-plane/mbus-api/src/v0.rs +++ b/control-plane/mbus-api/src/v0.rs @@ -296,7 +296,7 @@ impl Default for Filter { macro_rules! bus_impl_string_id_inner { ($Name:ident, $Doc:literal) => { #[doc = $Doc] - #[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq, Hash, Apiv2Schema)] + #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash, Apiv2Schema)] pub struct $Name(String); impl std::fmt::Display for $Name { @@ -340,11 +340,21 @@ macro_rules! bus_impl_string_id_inner { macro_rules! bus_impl_string_id { ($Name:ident, $Doc:literal) => { bus_impl_string_id_inner!($Name, $Doc); + impl Default for $Name { + /// Generates new blank identifier + fn default() -> Self { + $Name(uuid::Uuid::default().to_string()) + } + } impl $Name { /// Build Self from a string trait id pub fn from>(id: T) -> Self { $Name(id.into()) } + /// Generates new random identifier + pub fn new() -> Self { + $Name(uuid::Uuid::new_v4().to_string()) + } } }; } @@ -352,6 +362,11 @@ macro_rules! bus_impl_string_id { macro_rules! bus_impl_string_id_percent_decoding { ($Name:ident, $Doc:literal) => { bus_impl_string_id_inner!($Name, $Doc); + impl Default for $Name { + fn default() -> Self { + $Name("".to_string()) + } + } impl $Name { /// Build Self from a string trait id pub fn from>(id: T) -> Self { diff --git a/control-plane/rest/src/lib.rs b/control-plane/rest/src/lib.rs index 0153804e5..023420188 100644 --- a/control-plane/rest/src/lib.rs +++ b/control-plane/rest/src/lib.rs @@ -15,8 +15,13 @@ /// expose different versions of the client pub mod versions; -use actix_web::{body::Body, client::Client}; +use actix_web::{ + body::Body, + client::{Client, ClientResponse, PayloadError}, + web::Bytes, +}; use actix_web_opentelemetry::ClientExt; +use futures::Stream; use paperclip::actix::Apiv2Schema; use serde::{Deserialize, Serialize}; use std::{io::BufReader, string::ToString}; @@ -86,7 +91,7 @@ impl ActixRestClient { self.client.get(uri.clone()).send().await }; - let mut rest_response = result.map_err(|error| { + let rest_response = result.map_err(|error| { anyhow::anyhow!( "Failed to get uri '{}' from rest, err={:?}", uri, @@ -94,17 +99,7 @@ impl ActixRestClient { ) })?; - let rest_body = rest_response.body().await?; - if rest_response.status().is_success() { - match serde_json::from_slice(&rest_body) { - Ok(result) => Ok(result), - Err(_) => Ok(vec![serde_json::from_slice::(&rest_body)?]), - } - } else { - let error: serde_json::value::Value = - serde_json::from_slice(&rest_body)?; - Err(anyhow::anyhow!(error.to_string())) - } + Self::rest_vec_result(rest_response).await } async fn put>( &self, @@ -131,7 +126,7 @@ impl ActixRestClient { .await }; - let mut rest_response = result.map_err(|error| { + let rest_response = result.map_err(|error| { anyhow::anyhow!( "Failed to put uri '{}' from rest, err={:?}", uri, @@ -139,14 +134,7 @@ impl ActixRestClient { ) })?; - let rest_body = rest_response.body().await?; - if rest_response.status().is_success() { - Ok(serde_json::from_slice::(&rest_body)?) - } else { - let error: serde_json::value::Value = - serde_json::from_slice(&rest_body)?; - Err(anyhow::anyhow!(error.to_string())) - } + Self::rest_result(rest_response).await } async fn del(&self, urn: String) -> anyhow::Result where @@ -160,7 +148,7 @@ impl ActixRestClient { self.client.delete(uri.clone()).send().await }; - let mut rest_response = result.map_err(|error| { + let rest_response = result.map_err(|error| { anyhow::anyhow!( "Failed to delete uri '{}' from rest, err={:?}", uri, @@ -168,6 +156,36 @@ impl ActixRestClient { ) })?; + Self::rest_result(rest_response).await + } + + async fn rest_vec_result( + mut rest_response: ClientResponse, + ) -> anyhow::Result> + where + S: Stream> + Unpin, + for<'de> R: Deserialize<'de>, + { + let rest_body = rest_response.body().await?; + if rest_response.status().is_success() { + match serde_json::from_slice(&rest_body) { + Ok(result) => Ok(result), + Err(_) => Ok(vec![serde_json::from_slice::(&rest_body)?]), + } + } else { + let error: serde_json::value::Value = + serde_json::from_slice(&rest_body)?; + Err(anyhow::anyhow!(error.to_string())) + } + } + + async fn rest_result( + mut rest_response: ClientResponse, + ) -> anyhow::Result + where + S: Stream> + Unpin, + for<'de> R: Deserialize<'de>, + { let rest_body = rest_response.body().await?; if rest_response.status().is_success() { Ok(serde_json::from_slice::(&rest_body)?) diff --git a/control-plane/tests/Cargo.toml b/control-plane/tests/Cargo.toml new file mode 100644 index 000000000..c26125240 --- /dev/null +++ b/control-plane/tests/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "ctrlp-tests" +version = "0.1.0" +authors = ["Tiago Castro "] +edition = "2018" +description = "Control Plane 'Compose' Tests" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] + +[dev-dependencies] +composer = { path = "../../composer" } +deployer = { path = "../deployer" } +rest = { path = "../rest" } +actix-rt = "1.1.1" +opentelemetry-jaeger = { version = "0.10", features = ["tokio"] } +tracing-opentelemetry = "0.10.0" +tracing = "0.1" +opentelemetry = "0.11.2" +actix-web-opentelemetry = "0.9.0" +anyhow = "1.0.32" \ No newline at end of file diff --git a/control-plane/tests/tests/common/mod.rs b/control-plane/tests/tests/common/mod.rs new file mode 100644 index 000000000..f8ecc399f --- /dev/null +++ b/control-plane/tests/tests/common/mod.rs @@ -0,0 +1,329 @@ +use composer::*; +use deployer_lib::{ + infra::{Components, Error, Mayastor}, + *, +}; +use opentelemetry::{ + global, + sdk::{propagation::TraceContextPropagator, trace::Tracer}, +}; + +use opentelemetry_jaeger::Uninstall; +pub use rest_client::{ + versions::v0::{self, RestClient}, + ActixRestClient, +}; + +#[actix_rt::test] +#[ignore] +async fn smoke_test() { + // make sure the cluster can bootstrap properly + let _cluster = ClusterBuilder::builder() + .build() + .await + .expect("Should bootstrap the cluster!"); +} + +/// Default options to create a cluster +pub fn default_options() -> StartOptions { + StartOptions::default() + .with_agents(default_agents().split(',').collect()) + .with_jaeger(true) + .with_mayastors(1) + .with_show_info(true) + .with_cluster_name("rest_cluster") +} + +/// Cluster with the composer, the rest client and the jaeger pipeline# +#[allow(unused)] +pub struct Cluster { + composer: ComposeTest, + rest_client: ActixRestClient, + jaeger: (Tracer, Uninstall), + builder: ClusterBuilder, +} + +impl Cluster { + /// node id for `index` + pub fn node(&self, index: u32) -> v0::NodeId { + Mayastor::name(index, &self.builder.opts).into() + } + + /// pool id for `pool` index on `node` index + pub fn pool(&self, node: u32, pool: u32) -> v0::PoolId { + format!("{}-pool-{}", self.node(node), pool + 1).into() + } + + /// replica id with index for `pool` index and `replica` index + pub fn replica(pool: u32, replica: u32) -> v0::ReplicaId { + let mut uuid = v0::ReplicaId::default().to_string(); + let _ = uuid.drain(27 .. uuid.len()); + format!("{}{:01x}{:08x}", uuid, pool as u8, replica).into() + } + + /// rest client v0 + pub fn rest_v0(&self) -> impl RestClient { + self.rest_client.v0() + } + + /// New cluster + async fn new( + trace_rest: bool, + components: Components, + composer: ComposeTest, + jaeger: (Tracer, Uninstall), + ) -> Result { + let rest_client = + ActixRestClient::new("https://localhost:8080", trace_rest).unwrap(); + + components + .start_wait(&composer, std::time::Duration::from_secs(10)) + .await?; + + let cluster = Cluster { + composer, + rest_client, + jaeger, + builder: ClusterBuilder::builder(), + }; + + Ok(cluster) + } +} + +fn option_str(input: Option) -> String { + match input { + Some(input) => input.to_string(), + None => "?".into(), + } +} + +/// Run future and compare result with what's expected +/// Expected result should be in the form Result +/// where TestValue is a useful value which will be added to the returned error +/// string Eg, testing the replica share protocol: +/// test_result(Ok(Nvmf), async move { ... }) +/// test_result(Err(NBD), async move { ... }) +pub async fn test_result( + expected: &Result, + future: F, +) -> Result<(), anyhow::Error> +where + F: std::future::Future>, + R: std::fmt::Display, + E: std::fmt::Debug, + O: std::fmt::Debug, +{ + match future.await { + Ok(_) if expected.is_ok() => Ok(()), + Err(_) if expected.is_err() => Ok(()), + Err(error) => Err(anyhow::anyhow!( + "Expected '{:#?}' but failed with '{}'!", + expected, + error + )), + Ok(_) => { + Err(anyhow::anyhow!("Expected '{:#?}' but succeeded!", expected)) + } + } +} + +#[macro_export] +macro_rules! result_either { + ($test:expr) => { + match $test { + Ok(v) => v, + Err(v) => v, + } + }; +} + +/// Builder for the Cluster +pub struct ClusterBuilder { + opts: StartOptions, + pools: u32, + replicas: (u32, u64, v0::Protocol), + trace: bool, +} + +impl ClusterBuilder { + /// Cluster Builder with default options + pub fn builder() -> Self { + ClusterBuilder { + opts: default_options(), + pools: 0, + replicas: (0, 0, v0::Protocol::Off), + trace: true, + } + } + /// Update the start options + pub fn with_options(mut self, set: F) -> Self + where + F: Fn(StartOptions) -> StartOptions, + { + self.opts = set(self.opts); + self + } + /// Enable/Disable jaeger tracing + pub fn with_tracing(mut self, enabled: bool) -> Self { + self.trace = enabled; + self + } + /// Add `count` malloc pools (100MiB size) to each node + pub fn with_pools(mut self, count: u32) -> Self { + self.pools = count; + self + } + /// Add `count` replicas to each node per pool + pub fn with_replicas( + mut self, + count: u32, + size: u64, + share: v0::Protocol, + ) -> Self { + self.replicas = (count, size, share); + self + } + /// Build into the resulting Cluster using a composer closure, eg: + /// .compose_build(|c| c.with_logs(false)) + pub async fn compose_build(self, set: F) -> Result + where + F: Fn(Builder) -> Builder, + { + let (components, composer) = self.build_prepare()?; + let composer = set(composer); + let mut cluster = self.new_cluster(components, composer).await?; + cluster.builder = self; + Ok(cluster) + } + /// Build into the resulting Cluster + pub async fn build(self) -> Result { + let (components, composer) = self.build_prepare()?; + let mut cluster = self.new_cluster(components, composer).await?; + cluster.builder = self; + Ok(cluster) + } + fn build_prepare(&self) -> Result<(Components, Builder), Error> { + let components = Components::new(self.opts.clone()); + let composer = Builder::new() + .name(&self.opts.cluster_name) + .configure(components.clone())? + .with_base_image(self.opts.base_image.clone()) + .autorun(false) + .with_default_tracing() + .with_clean(true) + // test script will clean up containers if ran on CI/CD + .with_clean_on_panic(false) + .with_logs(true); + Ok((components, composer)) + } + async fn new_cluster( + &self, + components: Components, + compose_builder: Builder, + ) -> Result { + global::set_text_map_propagator(TraceContextPropagator::new()); + let jaeger = opentelemetry_jaeger::new_pipeline() + .with_service_name("tests-client") + .install() + .unwrap(); + + let composer = compose_builder.build().await?; + let cluster = + Cluster::new(self.trace, components, composer, jaeger).await?; + + if self.opts.show_info { + for container in cluster.composer.list_cluster_containers().await? { + let networks = + container.network_settings.unwrap().networks.unwrap(); + let ip = networks + .get(&self.opts.cluster_name) + .unwrap() + .ip_address + .clone(); + tracing::debug!( + "{:?} [{}] {}", + container.names.clone().unwrap_or_default(), + ip.clone().unwrap_or_default(), + option_str(container.command.clone()) + ); + } + } + for pool in &self.pools() { + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: pool.node.clone().into(), + id: pool.id(), + disks: vec![pool.disk()], + }) + .await + .unwrap(); + for replica in &pool.replicas { + cluster + .rest_v0() + .create_replica(replica.clone()) + .await + .unwrap(); + } + } + + Ok(cluster) + } + fn pools(&self) -> Vec { + let mut pools = vec![]; + for pool_index in 0 .. self.pools { + for node in 0 .. self.opts.mayastors { + let mut pool = Pool { + node: Mayastor::name(node, &self.opts), + kind: PoolKind::Malloc, + size_mb: 100, + index: (pool_index + 1) as u32, + replicas: vec![], + }; + for replica_index in 0 .. self.replicas.0 { + pool.replicas.push(v0::CreateReplica { + node: pool.node.clone().into(), + uuid: Cluster::replica(pool_index, replica_index), + pool: pool.id(), + size: self.replicas.1, + thin: false, + share: self.replicas.2.clone(), + }); + } + pools.push(pool); + } + } + pools + } +} + +#[allow(dead_code)] +enum PoolKind { + Malloc, + Aio, + Uring, + Nvmf, +} + +struct Pool { + node: String, + kind: PoolKind, + size_mb: u32, + index: u32, + replicas: Vec, +} + +impl Pool { + fn id(&self) -> v0::PoolId { + format!("{}-pool-{}", self.node, self.index).into() + } + fn disk(&self) -> String { + match self.kind { + PoolKind::Malloc => { + format!("malloc:///disk{}?size_mb={}", self.index, self.size_mb) + } + _ => panic!("kind not supported!"), + } + } +} diff --git a/control-plane/tests/tests/nexus.rs b/control-plane/tests/tests/nexus.rs new file mode 100644 index 000000000..d1497c57a --- /dev/null +++ b/control-plane/tests/tests/nexus.rs @@ -0,0 +1,25 @@ +#![feature(allow_fail)] + +pub mod common; +use common::*; + +#[actix_rt::test] +async fn create_nexus() { + let cluster = ClusterBuilder::builder() + .with_pools(1) + .with_replicas(2, 5 * 1024 * 1024, v0::Protocol::Off) + .build() + .await + .unwrap(); + + cluster + .rest_v0() + .create_nexus(v0::CreateNexus { + node: cluster.node(0), + uuid: v0::NexusId::new(), + size: 10 * 1024 * 1024, + children: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); +} diff --git a/control-plane/tests/tests/pools.rs b/control-plane/tests/tests/pools.rs new file mode 100644 index 000000000..2268f9ac9 --- /dev/null +++ b/control-plane/tests/tests/pools.rs @@ -0,0 +1,218 @@ +#![feature(allow_fail)] + +pub mod common; +use common::*; + +#[actix_rt::test] +async fn create_pool_malloc() { + let cluster = ClusterBuilder::builder().build().await.unwrap(); + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); +} + +#[actix_rt::test] +async fn create_pool_with_missing_disk() { + let cluster = ClusterBuilder::builder().build().await.unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["/dev/c/3po".into()], + }) + .await + .expect_err("Device should not exist"); +} + +#[actix_rt::test] +async fn create_pool_with_existing_disk() { + let cluster = ClusterBuilder::builder().build().await.unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop-new".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .expect_err("Disk should be used by another pool"); + + cluster + .rest_v0() + .destroy_pool(v0::DestroyPool { + node: "mayastor".into(), + id: "pooloop".into(), + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop-new".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .expect("Should now be able to create the new pool"); +} + +#[actix_rt::test] +async fn create_pool_idempotent() { + let cluster = ClusterBuilder::builder().build().await.unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); +} + +/// FIXME: CAS-710 +#[actix_rt::test] +#[allow_fail] +async fn create_pool_idempotent_same_disk_different_query() { + let cluster = ClusterBuilder::builder() + // don't log whilst we have the allow_fail + .compose_build(|c| c.with_logs(false)) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk?size_mb=100&blk_size=512".into()], + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor".into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk?size_mb=200&blk_size=4096".into()], + }) + .await + .expect_err("Different query not allowed!"); +} + +#[actix_rt::test] +async fn create_pool_idempotent_different_nvmf_host() { + let cluster = ClusterBuilder::builder() + .with_options(|opts| opts.with_mayastors(3)) + .build() + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor-1".into(), + id: "pooloop-1".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); + + let replica1 = cluster + .rest_v0() + .create_replica(v0::CreateReplica { + node: "mayastor-1".into(), + uuid: "0aa4a830-a971-4e96-a97c-15c39dd8f162".into(), + pool: "pooloop-1".into(), + size: 10 * 1024 * 1024, + thin: true, + share: v0::Protocol::Nvmf, + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor-2".into(), + id: "pooloop-2".into(), + disks: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); + + let replica2 = cluster + .rest_v0() + .create_replica(v0::CreateReplica { + node: "mayastor-2".into(), + uuid: "0aa4a830-a971-4e96-a97c-15c39dd8f162".into(), + pool: "pooloop-2".into(), + size: 10 * 1024 * 1024, + thin: true, + share: v0::Protocol::Nvmf, + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor-3".into(), + id: "pooloop".into(), + disks: vec![replica1.uri.clone()], + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor-3".into(), + id: "pooloop".into(), + disks: vec![replica1.uri], + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_pool(v0::CreatePool { + node: "mayastor-3".into(), + id: "pooloop".into(), + disks: vec![replica2.uri], + }) + .await + .expect_err("Different host!"); +} diff --git a/control-plane/tests/tests/replicas.rs b/control-plane/tests/tests/replicas.rs new file mode 100644 index 000000000..ebe121116 --- /dev/null +++ b/control-plane/tests/tests/replicas.rs @@ -0,0 +1,131 @@ +#![feature(allow_fail)] + +pub mod common; +use common::*; + +// FIXME: CAS-721 +#[actix_rt::test] +#[allow_fail] +async fn create_replica() { + let cluster = ClusterBuilder::builder() + .with_pools(1) + // don't log whilst we have the allow_fail + .compose_build(|c| c.with_logs(false)) + .await + .unwrap(); + + let replica = v0::CreateReplica { + node: cluster.node(0), + uuid: Default::default(), + pool: cluster.pool(0, 0), + size: 5 * 1024 * 1024, + thin: true, + share: v0::Protocol::Off, + }; + let created_replica = cluster + .rest_v0() + .create_replica(replica.clone()) + .await + .unwrap(); + assert_eq!(created_replica.node, replica.node); + assert_eq!(created_replica.uuid, replica.uuid); + assert_eq!(created_replica.pool, replica.pool); + + // todo: why is this not the same? + // assert_eq!(created_replica.size, replica.size); + // fixme: replicas are always created without thin provisioning + assert_eq!(created_replica.thin, replica.thin); + assert_eq!(created_replica.share, replica.share); +} + +#[actix_rt::test] +async fn create_replica_protocols() { + let cluster = ClusterBuilder::builder() + .with_pools(1) + .build() + .await + .unwrap(); + + let protocols = vec![ + Err(v0::Protocol::Nbd), + Err(v0::Protocol::Iscsi), + Ok(v0::Protocol::Nvmf), + Ok(v0::Protocol::Off), + ]; + + for test in protocols { + let protocol = result_either!(&test); + test_result( + &test, + cluster.rest_v0().create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: v0::ReplicaId::new(), + pool: cluster.pool(0, 0), + size: 5 * 1024 * 1024, + thin: true, + share: protocol.clone(), + }), + ) + .await + .unwrap(); + } +} + +// FIXME: CAS-731 +#[actix_rt::test] +#[allow_fail] +async fn create_replica_idempotent_different_sizes() { + let cluster = ClusterBuilder::builder() + .with_pools(1) + // don't log whilst we have the allow_fail + .compose_build(|c| c.with_logs(false)) + .await + .unwrap(); + + let uuid = v0::ReplicaId::new(); + let size = 5 * 1024 * 1024; + let replica = cluster + .rest_v0() + .create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: uuid.clone(), + pool: cluster.pool(0, 0), + size, + thin: false, + share: v0::Protocol::Off, + }) + .await + .unwrap(); + assert_eq!(&replica.uuid, &uuid); + + cluster + .rest_v0() + .create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: uuid.clone(), + pool: cluster.pool(0, 0), + size, + thin: replica.thin, + share: v0::Protocol::Off, + }) + .await + .unwrap(); + + let sizes = vec![Ok(size), Err(size / 2), Err(size * 2)]; + for test in sizes { + let size = result_either!(test); + test_result( + &test, + cluster.rest_v0().create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: v0::ReplicaId::new(), + pool: cluster.pool(0, 0), + size, + thin: replica.thin, + share: v0::Protocol::Off, + }), + ) + .await + .unwrap(); + } +} diff --git a/nix/pkgs/control-plane/cargo-project.nix b/nix/pkgs/control-plane/cargo-project.nix index aa700c632..2cef59bc7 100644 --- a/nix/pkgs/control-plane/cargo-project.nix +++ b/nix/pkgs/control-plane/cargo-project.nix @@ -29,7 +29,7 @@ let buildProps = rec { name = "control-plane"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "02qf9pnja4cn31qnzawbrqhny88ja19sqm68zy12ly4vmg6dd3lf"; + cargoSha256 = "1iqmrl8qm8nw1hg219kdyxd1zk9c58p1avymjis3snxnlagafx37"; inherit version; src = whitelistSource ../../../. (pkgs.callPackage ../mayastor { }).src_list; cargoBuildFlags = [ "-p mbus_api" "-p agents" "-p rest" "-p operators" ]; diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 94b78b47d..c490e4703 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -56,7 +56,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1c5zwaivwsx7gznjvsd0gfhbvjji5q1qbjacdm6vfapqv9i79yfn"; + cargoSha256 = "1ynd6fmdr89f0g9vqsbz2rfl6ld23qv92lqcma5m4xcyhblbv5g0"; inherit version; src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; diff --git a/scripts/cargo-test.sh b/scripts/cargo-test.sh index 242149a95..02dba8ed8 100755 --- a/scripts/cargo-test.sh +++ b/scripts/cargo-test.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +SCRIPTDIR=$(dirname "$0") + cleanup_handler() { for c in $(docker ps -a --filter "label=io.mayastor.test.name" --format '{{.ID}}') ; do docker kill "$c" || true @@ -19,7 +21,5 @@ export PATH=$PATH:${HOME}/.cargo/bin # test dependencies cargo build --bins ( cd mayastor && cargo test -- --test-threads=1 ) -for test in composer agents rest; do - cargo test -p ${test} -- --test-threads=1 -done ( cd nvmeadm && cargo test ) +"$SCRIPTDIR/ctrlp-cargo-test.sh" diff --git a/scripts/ctrlp-cargo-test.sh b/scripts/ctrlp-cargo-test.sh new file mode 100755 index 000000000..680532303 --- /dev/null +++ b/scripts/ctrlp-cargo-test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +cleanup_handler() { + for c in $(docker ps -a --filter "label=io.mayastor.test.name" --format '{{.ID}}') ; do + docker kill "$c" || true + docker rm "$c" || true + done + + for n in $(docker network ls --filter "label=io.mayastor.test.name" --format '{{.ID}}') ; do + docker network rm "$n" || true + done +} + +trap cleanup_handler ERR INT QUIT TERM HUP + +set -euxo pipefail +export PATH=$PATH:${HOME}/.cargo/bin +# test dependencies +cargo build --bins +for test in composer agents rest ctrlp-tests; do + cargo test -p ${test} -- --test-threads=1 +done From 91a75efc42d244f40ebb20d6b317e97b84484b6b Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 15 Feb 2021 11:16:22 +0000 Subject: [PATCH 11/78] chore(rest): add openapi spec to the tree Add pre-commit hook to make sure we update the spec as we change the rest code. Add another step to the CI linter to check that the spec is up to date. (Note the linter here isn't quite right for this) --- .pre-commit-config.yaml | 7 +++ Jenkinsfile | 1 + .../rest/openapi-specs/v0_api_spec.json | 1 + control-plane/rest/service/src/main.rs | 43 +++++++++++++------ control-plane/rest/service/src/v0/mod.rs | 14 ++++++ scripts/openapi-check.sh | 22 ++++++++++ 6 files changed, 76 insertions(+), 12 deletions(-) create mode 100644 control-plane/rest/openapi-specs/v0_api_spec.json create mode 100755 scripts/openapi-check.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a03312fa0..48f050306 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,4 +37,11 @@ repos: entry: bash -c "npm install @commitlint/config-conventional @commitlint/cli; cat $1 | npx commitlint" args: [$1] stages: [commit-msg] + - id: openapi-check + name: OpenApi Generator + description: Ensures OpenApi spec is up to date + entry: ./scripts/openapi-check.sh + args: ["--changes"] + pass_filenames: false + language: system diff --git a/Jenkinsfile b/Jenkinsfile index fa4a8633a..0fe189cc0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -110,6 +110,7 @@ pipeline { sh 'nix-shell --run "cargo fmt --all -- --check"' sh 'nix-shell --run "cargo clippy --all-targets -- -D warnings"' sh 'nix-shell --run "./scripts/js-check.sh"' + sh 'nix-shell --run "./scripts/openapi-check.sh"' } } stage('test') { diff --git a/control-plane/rest/openapi-specs/v0_api_spec.json b/control-plane/rest/openapi-specs/v0_api_spec.json new file mode 100644 index 000000000..042c1927d --- /dev/null +++ b/control-plane/rest/openapi-specs/v0_api_spec.json @@ -0,0 +1 @@ +{"swagger":"2.0","definitions":{"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file diff --git a/control-plane/rest/service/src/main.rs b/control-plane/rest/service/src/main.rs index 10995a947..6dc18be44 100644 --- a/control-plane/rest/service/src/main.rs +++ b/control-plane/rest/service/src/main.rs @@ -12,7 +12,7 @@ use rustls::{ NoClientAuth, ServerConfig, }; -use std::{fs::File, io::BufReader}; +use std::{fs::File, io::BufReader, str::FromStr}; use structopt::StructOpt; #[derive(Debug, StructOpt)] @@ -40,17 +40,29 @@ pub(crate) struct CliArgs { #[structopt(long, short, required_unless = "cert-file")] dummy_certificates: bool, + /// Output the OpenApi specs to this directory + #[structopt(long, short, parse(try_from_str = parse_dir))] + output_specs: Option, + /// Trace rest requests to the Jaeger endpoint agent #[structopt(long, short)] jaeger: Option, } +fn parse_dir(src: &str) -> anyhow::Result { + let path = std::path::PathBuf::from_str(src)?; + anyhow::ensure!(path.exists(), "does not exist!"); + anyhow::ensure!(path.is_dir(), "must be a directory!"); + Ok(path) +} + use actix_web_opentelemetry::RequestTracing; use opentelemetry::{ global, sdk::{propagation::TraceContextPropagator, trace::Tracer}, }; use opentelemetry_jaeger::Uninstall; +use std::path::PathBuf; fn init_tracing() -> Option<(Tracer, Uninstall)> { if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { @@ -155,21 +167,28 @@ async fn main() -> anyhow::Result<()> { // need to keep the jaeger pipeline tracer alive, if enabled let _tracer = init_tracing(); - mbus_api::message_bus_init(CliArgs::from_args().nats).await; - - let server = HttpServer::new(move || { + let app = move || { App::new() .wrap(RequestTracing::new()) .wrap(middleware::Logger::default()) .configure_api(&v0::configure_api) - }) - .bind_rustls(CliArgs::from_args().https, get_certificates()?)?; - if let Some(http) = CliArgs::from_args().http { - server.bind(http).map_err(anyhow::Error::from)? + }; + + if CliArgs::from_args().output_specs.is_some() { + // call the app which will write out the api specs to files + let _ = app(); + Ok(()) } else { - server + mbus_api::message_bus_init(CliArgs::from_args().nats).await; + let server = HttpServer::new(app) + .bind_rustls(CliArgs::from_args().https, get_certificates()?)?; + if let Some(http) = CliArgs::from_args().http { + server.bind(http).map_err(anyhow::Error::from)? + } else { + server + } + .run() + .await + .map_err(|e| e.into()) } - .run() - .await - .map_err(|e| e.into()) } diff --git a/control-plane/rest/service/src/v0/mod.rs b/control-plane/rest/service/src/v0/mod.rs index 71966ab4b..e89af1777 100644 --- a/control-plane/rest/service/src/v0/mod.rs +++ b/control-plane/rest/service/src/v0/mod.rs @@ -21,6 +21,9 @@ use actix_web::{ }; use macros::actix::{delete, get, put}; use paperclip::actix::OpenApiExt; +use std::io::Write; +use structopt::StructOpt; +use tracing::info; fn version() -> String { "v0".into() @@ -64,6 +67,17 @@ where { api.wrap_api_with_spec(get_api()) .configure(configure) + .with_raw_json_spec(|app, spec| { + if let Some(dir) = super::CliArgs::from_args().output_specs { + let file = dir.join(&format!("{}_api_spec.json", version())); + info!("Writing {} to {}", spec_uri(), file.to_string_lossy()); + let mut file = std::fs::File::create(file) + .expect("Should create the spec file"); + file.write_all(spec.to_string().as_ref()) + .expect("Should write the spec to file"); + } + app + }) .with_json_spec_at(&spec_uri()) .build() .configure(swagger_ui::configure) diff --git a/scripts/openapi-check.sh b/scripts/openapi-check.sh new file mode 100755 index 000000000..9d52ab3c7 --- /dev/null +++ b/scripts/openapi-check.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -e + +SCRIPTDIR=$(dirname "$0") +SRC="$SCRIPTDIR/../control-plane/rest" +SPECS="$SCRIPTDIR/../control-plane/rest/openapi-specs" + +# Regenerate the spec only if the rest src changed +check_rest_src="no" + +case "$1" in + --changes) + check_rest_src="yes" + ;; +esac + +if [[ $check_rest_src = "yes" ]]; then + git diff --cached --exit-code $SRC 1>/dev/null && exit 0 +fi + +cargo run --bin rest -- -d -o $SPECS From 0994dbc122da3537f0936194470797aaae8a25b3 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Mon, 15 Feb 2021 11:27:52 +0000 Subject: [PATCH 12/78] feat(rest): expose list_block_devices method Implement a REST call for listing block devices. The REST call uri can optionally include the 'all' query parameter. This determines whether all devices should be listed or just those that are deemed usable. Omitting the query parameter results in all devices being listed. Also added traits for converting messages between rpc and message bus formats as this will be a common occurrence for control plane agents. --- .../agents/common/src/wrapper/v0/mod.rs | 1 + .../common/src/wrapper/v0/msg_translation.rs | 271 ++++++++++++++++++ .../agents/common/src/wrapper/v0/pool.rs | 101 +------ .../agents/common/src/wrapper/v0/volume.rs | 97 +------ control-plane/agents/node/src/server.rs | 69 ++++- control-plane/mbus-api/src/message_bus/v0.rs | 8 + control-plane/mbus-api/src/v0.rs | 78 +++++ .../rest/service/src/v0/block_devices.rs | 36 +++ control-plane/rest/service/src/v0/mod.rs | 2 + control-plane/rest/src/versions/v0.rs | 9 + 10 files changed, 495 insertions(+), 177 deletions(-) create mode 100644 control-plane/agents/common/src/wrapper/v0/msg_translation.rs create mode 100644 control-plane/rest/service/src/v0/block_devices.rs diff --git a/control-plane/agents/common/src/wrapper/v0/mod.rs b/control-plane/agents/common/src/wrapper/v0/mod.rs index bf5bac2bf..4d1518c7f 100644 --- a/control-plane/agents/common/src/wrapper/v0/mod.rs +++ b/control-plane/agents/common/src/wrapper/v0/mod.rs @@ -143,6 +143,7 @@ macro_rules! impl_no_nexus { }; } +pub mod msg_translation; mod node_traits; mod pool; mod volume; diff --git a/control-plane/agents/common/src/wrapper/v0/msg_translation.rs b/control-plane/agents/common/src/wrapper/v0/msg_translation.rs new file mode 100644 index 000000000..a511610af --- /dev/null +++ b/control-plane/agents/common/src/wrapper/v0/msg_translation.rs @@ -0,0 +1,271 @@ +//! Converts rpc messages to message bus messages and vice versa. + +use mbus_api::{ + v0 as mbus, + v0::{ChildState, NexusState, Protocol}, +}; +use rpc::mayastor as rpc; + +/// Trait for converting rpc messages to message bus messages. +pub trait RpcToMessageBus { + /// Message bus message type. + type BusMessage; + /// Conversion of rpc message to message bus message. + fn to_mbus(&self) -> Self::BusMessage; +} + +impl RpcToMessageBus for rpc::block_device::Partition { + type BusMessage = mbus::Partition; + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + parent: self.parent.clone(), + number: self.number, + name: self.name.clone(), + scheme: self.scheme.clone(), + typeid: self.typeid.clone(), + uuid: self.uuid.clone(), + } + } +} + +impl RpcToMessageBus for rpc::block_device::Filesystem { + type BusMessage = mbus::Filesystem; + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + fstype: self.fstype.clone(), + label: self.label.clone(), + uuid: self.uuid.clone(), + mountpoint: self.mountpoint.clone(), + } + } +} + +/// Node Agent Conversions + +impl RpcToMessageBus for rpc::BlockDevice { + type BusMessage = mbus::BlockDevice; + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + devname: self.devname.clone(), + devtype: self.devtype.clone(), + devmajor: self.devmajor, + devminor: self.devminor, + model: self.model.clone(), + devpath: self.devpath.clone(), + devlinks: self.devlinks.clone(), + size: self.size, + partition: match &self.partition { + Some(partition) => partition.to_mbus(), + None => mbus::Partition { + ..Default::default() + }, + }, + filesystem: match &self.filesystem { + Some(filesystem) => filesystem.to_mbus(), + None => mbus::Filesystem { + ..Default::default() + }, + }, + available: self.available, + } + } +} + +/// Pool Agent conversions + +impl RpcToMessageBus for rpc::Pool { + type BusMessage = mbus::Pool; + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + node: Default::default(), + id: self.name.clone().into(), + disks: self.disks.clone(), + state: self.state.into(), + capacity: self.capacity, + used: self.used, + } + } +} + +impl RpcToMessageBus for rpc::Replica { + type BusMessage = mbus::Replica; + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + node: Default::default(), + uuid: self.uuid.clone().into(), + pool: self.pool.clone().into(), + thin: self.thin, + size: self.size, + share: self.share.into(), + uri: self.uri.clone(), + } + } +} + +/// Volume Agent conversions + +impl RpcToMessageBus for rpc::Nexus { + type BusMessage = mbus::Nexus; + + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + node: Default::default(), + uuid: self.uuid.clone().into(), + size: self.size, + state: NexusState::from(self.state), + children: self.children.iter().map(|c| c.to_mbus()).collect(), + device_uri: self.device_uri.clone(), + rebuilds: self.rebuilds, + } + } +} + +impl RpcToMessageBus for rpc::Child { + type BusMessage = mbus::Child; + + fn to_mbus(&self) -> Self::BusMessage { + Self::BusMessage { + uri: self.uri.clone().into(), + state: ChildState::from(self.state), + rebuild_progress: if self.rebuild_progress >= 0 { + Some(self.rebuild_progress) + } else { + None + }, + } + } +} + +/// Trait for converting message bus messages to rpc messages. +pub trait MessageBusToRpc { + /// RPC message type. + type RpcMessage; + /// Conversion of message bus message to rpc message. + fn to_rpc(&self) -> Self::RpcMessage; +} + +/// Pool Agent Conversions + +impl MessageBusToRpc for mbus::CreateReplica { + type RpcMessage = rpc::CreateReplicaRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + pool: self.pool.clone().into(), + thin: self.thin, + size: self.size, + share: self.share.clone() as i32, + } + } +} + +impl MessageBusToRpc for mbus::ShareReplica { + type RpcMessage = rpc::ShareReplicaRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + share: self.protocol.clone() as i32, + } + } +} + +impl MessageBusToRpc for mbus::UnshareReplica { + type RpcMessage = rpc::ShareReplicaRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + share: Protocol::Off as i32, + } + } +} + +impl MessageBusToRpc for mbus::CreatePool { + type RpcMessage = rpc::CreatePoolRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + name: self.id.clone().into(), + disks: self.disks.clone(), + } + } +} + +impl MessageBusToRpc for mbus::DestroyReplica { + type RpcMessage = rpc::DestroyReplicaRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + } + } +} + +impl MessageBusToRpc for mbus::DestroyPool { + type RpcMessage = rpc::DestroyPoolRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + name: self.id.clone().into(), + } + } +} + +/// Volume Agent Conversions + +impl MessageBusToRpc for mbus::CreateNexus { + type RpcMessage = rpc::CreateNexusRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + size: self.size, + children: self.children.iter().map(|c| c.to_string()).collect(), + } + } +} + +impl MessageBusToRpc for mbus::ShareNexus { + type RpcMessage = rpc::PublishNexusRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + key: self.key.clone().unwrap_or_default(), + share: self.protocol.clone() as i32, + } + } +} + +impl MessageBusToRpc for mbus::UnshareNexus { + type RpcMessage = rpc::UnpublishNexusRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + } + } +} + +impl MessageBusToRpc for mbus::DestroyNexus { + type RpcMessage = rpc::DestroyNexusRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.uuid.clone().into(), + } + } +} + +impl MessageBusToRpc for mbus::AddNexusChild { + type RpcMessage = rpc::AddChildNexusRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.nexus.clone().into(), + uri: self.uri.clone().into(), + norebuild: !self.auto_rebuild, + } + } +} + +impl MessageBusToRpc for mbus::RemoveNexusChild { + type RpcMessage = rpc::RemoveChildNexusRequest; + fn to_rpc(&self) -> Self::RpcMessage { + Self::RpcMessage { + uuid: self.nexus.clone().into(), + uri: self.uri.clone().into(), + } + } +} diff --git a/control-plane/agents/common/src/wrapper/v0/pool.rs b/control-plane/agents/common/src/wrapper/v0/pool.rs index fddbeab1d..4165efa89 100644 --- a/control-plane/agents/common/src/wrapper/v0/pool.rs +++ b/control-plane/agents/common/src/wrapper/v0/pool.rs @@ -1,4 +1,5 @@ use super::{node_traits::*, *}; +use crate::wrapper::v0::msg_translation::{MessageBusToRpc, RpcToMessageBus}; /// Implementation of the trait NodeWrapperPool for the pool service #[derive(Debug, Default, Clone)] @@ -33,7 +34,7 @@ impl NodePoolTrait for NodeWrapperPool { let mut ctx = self.grpc_client().await?; let rpc_pool = ctx .client - .create_pool(bus_pool_to_rpc(&request)) + .create_pool(request.to_rpc()) .await .context(GrpcCreatePool {})?; @@ -48,7 +49,7 @@ impl NodePoolTrait for NodeWrapperPool { let mut ctx = self.grpc_client().await?; let _ = ctx .client - .destroy_pool(bus_pool_destroy_to_rpc(request)) + .destroy_pool(request.to_rpc()) .await .context(GrpcDestroyPool {})?; @@ -91,7 +92,7 @@ impl NodeReplicaTrait for NodeWrapperPool { let mut ctx = self.grpc_client().await?; let rpc_replica = ctx .client - .create_replica(bus_replica_to_rpc(request)) + .create_replica(request.to_rpc()) .await .context(GrpcCreateReplica {})?; @@ -106,7 +107,7 @@ impl NodeReplicaTrait for NodeWrapperPool { let mut ctx = self.grpc_client().await?; let share = ctx .client - .share_replica(bus_replica_share_to_rpc(request)) + .share_replica(request.to_rpc()) .await .context(GrpcShareReplica {})?; @@ -121,7 +122,7 @@ impl NodeReplicaTrait for NodeWrapperPool { let mut ctx = self.grpc_client().await?; let _ = ctx .client - .share_replica(bus_replica_unshare_to_rpc(request)) + .share_replica(request.to_rpc()) .await .context(GrpcUnshareReplica {})?; @@ -136,7 +137,7 @@ impl NodeReplicaTrait for NodeWrapperPool { let mut ctx = self.grpc_client().await?; let _ = ctx .client - .destroy_replica(bus_replica_destroy_to_rpc(request)) + .destroy_replica(request.to_rpc()) .await .context(GrpcDestroyReplica {})?; @@ -281,15 +282,9 @@ impl_no_nexus!(NodeWrapperPool); /// convert rpc pool to a message bus pool fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: NodeId) -> Pool { - let rpc_pool = rpc_pool.clone(); - Pool { - node: id, - id: rpc_pool.name.into(), - disks: rpc_pool.disks.clone(), - state: rpc_pool.state.into(), - capacity: rpc_pool.capacity, - used: rpc_pool.used, - } + let mut pool = rpc_pool.to_mbus(); + pool.node = id; + pool } /// convert rpc replica to a message bus replica @@ -297,77 +292,7 @@ fn rpc_replica_to_bus( rpc_replica: &rpc::mayastor::Replica, id: NodeId, ) -> Replica { - let rpc_replica = rpc_replica.clone(); - Replica { - node: id, - uuid: rpc_replica.uuid.into(), - pool: rpc_replica.pool.into(), - thin: rpc_replica.thin, - size: rpc_replica.size, - share: rpc_replica.share.into(), - uri: rpc_replica.uri, - } -} - -/// convert a message bus replica to an rpc replica -fn bus_replica_to_rpc( - request: &CreateReplica, -) -> rpc::mayastor::CreateReplicaRequest { - let request = request.clone(); - rpc::mayastor::CreateReplicaRequest { - uuid: request.uuid.into(), - pool: request.pool.into(), - thin: request.thin, - size: request.size, - share: request.share as i32, - } -} - -/// convert a message bus replica share to an rpc replica share -fn bus_replica_share_to_rpc( - request: &ShareReplica, -) -> rpc::mayastor::ShareReplicaRequest { - let request = request.clone(); - rpc::mayastor::ShareReplicaRequest { - uuid: request.uuid.into(), - share: request.protocol as i32, - } -} - -/// convert a message bus replica unshare to an rpc replica unshare -fn bus_replica_unshare_to_rpc( - request: &UnshareReplica, -) -> rpc::mayastor::ShareReplicaRequest { - let request = request.clone(); - rpc::mayastor::ShareReplicaRequest { - uuid: request.uuid.into(), - share: Protocol::Off as i32, - } -} - -/// convert a message bus pool to an rpc pool -fn bus_pool_to_rpc(request: &CreatePool) -> rpc::mayastor::CreatePoolRequest { - let request = request.clone(); - rpc::mayastor::CreatePoolRequest { - name: request.id.into(), - disks: request.disks, - } -} - -/// convert a message bus replica destroy to an rpc replica destroy -fn bus_replica_destroy_to_rpc( - request: &DestroyReplica, -) -> rpc::mayastor::DestroyReplicaRequest { - rpc::mayastor::DestroyReplicaRequest { - uuid: request.uuid.clone().into(), - } -} - -/// convert a message bus pool destroy to an rpc pool destroy -fn bus_pool_destroy_to_rpc( - request: &DestroyPool, -) -> rpc::mayastor::DestroyPoolRequest { - rpc::mayastor::DestroyPoolRequest { - name: request.id.clone().into(), - } + let mut replica = rpc_replica.to_mbus(); + replica.node = id; + replica } diff --git a/control-plane/agents/common/src/wrapper/v0/volume.rs b/control-plane/agents/common/src/wrapper/v0/volume.rs index 96a8f2822..646ced2da 100644 --- a/control-plane/agents/common/src/wrapper/v0/volume.rs +++ b/control-plane/agents/common/src/wrapper/v0/volume.rs @@ -1,4 +1,5 @@ use super::{node_traits::*, *}; +use crate::wrapper::v0::msg_translation::{MessageBusToRpc, RpcToMessageBus}; use mbus_api::Message; /// Implementation of the trait NodeWrapperVolume for the pool service @@ -144,7 +145,7 @@ impl NodeNexusTrait for NodeWrapperVolume { let mut ctx = self.grpc_client().await?; let rpc_nexus = ctx .client - .create_nexus(bus_nexus_to_rpc(request)) + .create_nexus(request.to_rpc()) .await .context(GrpcCreateNexus {})?; Ok(rpc_nexus_to_bus( @@ -161,7 +162,7 @@ impl NodeNexusTrait for NodeWrapperVolume { let mut ctx = self.grpc_client().await?; let _ = ctx .client - .destroy_nexus(bus_nexus_destroy_to_rpc(request)) + .destroy_nexus(request.to_rpc()) .await .context(GrpcDestroyNexus {})?; Ok(()) @@ -175,7 +176,7 @@ impl NodeNexusTrait for NodeWrapperVolume { let mut ctx = self.grpc_client().await?; let share = ctx .client - .publish_nexus(bus_nexus_share_to_rpc(request)) + .publish_nexus(request.to_rpc()) .await .context(GrpcShareNexus {})?; Ok(share.into_inner().device_uri) @@ -189,7 +190,7 @@ impl NodeNexusTrait for NodeWrapperVolume { let mut ctx = self.grpc_client().await?; let _ = ctx .client - .unpublish_nexus(bus_nexus_unshare_to_rpc(request)) + .unpublish_nexus(request.to_rpc()) .await .context(GrpcUnshareNexus {})?; Ok(()) @@ -224,10 +225,10 @@ impl NodeNexusChildTrait for NodeWrapperVolume { let mut ctx = self.grpc_client().await?; let rpc_child = ctx .client - .add_child_nexus(bus_nexus_child_add_to_rpc(request)) + .add_child_nexus(request.to_rpc()) .await .context(GrpcDestroyNexus {})?; - Ok(rpc_child_to_bus(&rpc_child.into_inner())) + Ok(rpc_child.into_inner().to_mbus()) } /// Remove a child from its parent nexus via gRPC @@ -238,7 +239,7 @@ impl NodeNexusChildTrait for NodeWrapperVolume { let mut ctx = self.grpc_client().await?; let _ = ctx .client - .remove_child_nexus(bus_nexus_child_remove_to_rpc(request)) + .remove_child_nexus(request.to_rpc()) .await .context(GrpcDestroyNexus {})?; Ok(()) @@ -368,83 +369,7 @@ impl NodeWrapperVolume { } fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: NodeId) -> Nexus { - let rpc_nexus = rpc_nexus.clone(); - Nexus { - node: id, - uuid: rpc_nexus.uuid.into(), - size: rpc_nexus.size, - state: NexusState::from(rpc_nexus.state), - children: rpc_nexus - .children - .iter() - .map(|c| rpc_child_to_bus(&c)) - .collect(), - device_uri: rpc_nexus.device_uri.clone(), - rebuilds: rpc_nexus.rebuilds, - } -} -fn rpc_child_to_bus(rpc_child: &rpc::mayastor::Child) -> Child { - let rpc_child = rpc_child.clone(); - Child { - uri: rpc_child.uri.into(), - state: ChildState::from(rpc_child.state), - rebuild_progress: if rpc_child.rebuild_progress >= 0 { - Some(rpc_child.rebuild_progress) - } else { - None - }, - } -} -fn bus_nexus_to_rpc( - request: &CreateNexus, -) -> rpc::mayastor::CreateNexusRequest { - let request = request.clone(); - rpc::mayastor::CreateNexusRequest { - uuid: request.uuid.into(), - size: request.size, - children: request.children.iter().map(|c| c.to_string()).collect(), - } -} -fn bus_nexus_share_to_rpc( - request: &ShareNexus, -) -> rpc::mayastor::PublishNexusRequest { - let request = request.clone(); - rpc::mayastor::PublishNexusRequest { - uuid: request.uuid.into(), - key: request.key.clone().unwrap_or_default(), - share: request.protocol as i32, - } -} -fn bus_nexus_unshare_to_rpc( - request: &UnshareNexus, -) -> rpc::mayastor::UnpublishNexusRequest { - rpc::mayastor::UnpublishNexusRequest { - uuid: request.uuid.clone().into(), - } -} -fn bus_nexus_destroy_to_rpc( - request: &DestroyNexus, -) -> rpc::mayastor::DestroyNexusRequest { - rpc::mayastor::DestroyNexusRequest { - uuid: request.uuid.clone().into(), - } -} -fn bus_nexus_child_add_to_rpc( - request: &AddNexusChild, -) -> rpc::mayastor::AddChildNexusRequest { - let request = request.clone(); - rpc::mayastor::AddChildNexusRequest { - uuid: request.nexus.into(), - uri: request.uri.into(), - norebuild: !request.auto_rebuild, - } -} -fn bus_nexus_child_remove_to_rpc( - request: &RemoveNexusChild, -) -> rpc::mayastor::RemoveChildNexusRequest { - let request = request.clone(); - rpc::mayastor::RemoveChildNexusRequest { - uuid: request.nexus.into(), - uri: request.uri.into(), - } + let mut nexus = rpc_nexus.to_mbus(); + nexus.node = id; + nexus } diff --git a/control-plane/agents/node/src/server.rs b/control-plane/agents/node/src/server.rs index 280976d7a..254af7e81 100644 --- a/control-plane/agents/node/src/server.rs +++ b/control-plane/agents/node/src/server.rs @@ -1,11 +1,17 @@ -use async_trait::async_trait; -use common::*; -use mbus_api::{v0::*, *}; use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; + +use async_trait::async_trait; use structopt::StructOpt; use tokio::sync::Mutex; use tracing::{error, info}; +use ::rpc::mayastor::{ + mayastor_client::MayastorClient, + ListBlockDevicesRequest, +}; +use common::{wrapper::v0::msg_translation::RpcToMessageBus, *}; +use mbus_api::{v0::*, *}; + #[derive(Debug, StructOpt)] struct CliArgs { /// The Nats Server URL to connect to @@ -176,6 +182,62 @@ impl ServiceSubscriber for ServiceHandler { } } +#[async_trait] +impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let request: ReceivedMessage = + args.request.try_into()?; + let store: &NodeStore = args.context.get_state()?; + let nodes = store + .get_nodes() + .await + .into_iter() + .filter(|n| n.id == request.inner().node) + .collect::>(); + + if nodes.is_empty() { + return Err(Error::ServiceError { + message: format!( + "Node with id {} not found", + request.inner().node + ), + }); + } + + // Only expect one node to match the given ID. + assert_eq!(nodes.len(), 1); + + let mut client = MayastorClient::connect(format!( + "http://{}", + nodes[0].grpc_endpoint + )) + .await + .unwrap(); + + // Issue the gRPC request + let response = client + .list_block_devices(ListBlockDevicesRequest { + all: request.inner().all, + }) + .await + .unwrap() + .into_inner(); + + // Convert the rpc types into message bus types before sending the + // reply. + let bdevs = response + .devices + .iter() + .map(|rpc_bdev| rpc_bdev.to_mbus()) + .collect::>(); + request.reply(BlockDevices(bdevs)).await + } + + fn filter(&self) -> Vec { + vec![GetBlockDevices::default().id()] + } +} + fn init_tracing() { if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(filter).init(); @@ -202,6 +264,7 @@ async fn server(cli_args: CliArgs) { .with_channel(ChannelVs::Node) .with_default_liveness() .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) .run() .await; } diff --git a/control-plane/mbus-api/src/message_bus/v0.rs b/control-plane/mbus-api/src/message_bus/v0.rs index 7a92a80fb..a418681e3 100644 --- a/control-plane/mbus-api/src/message_bus/v0.rs +++ b/control-plane/mbus-api/src/message_bus/v0.rs @@ -248,6 +248,14 @@ pub trait MessageBusTrait: Sized { ) -> BusResult { Ok(request.request().await?) } + + /// Get block devices on a node + #[tracing::instrument(level = "debug", err)] + async fn get_block_devices( + request: GetBlockDevices, + ) -> BusResult { + Ok(request.request().await?) + } } /// Implementation of the bus interface trait diff --git a/control-plane/mbus-api/src/v0.rs b/control-plane/mbus-api/src/v0.rs index 1d299be78..e4b56ab90 100644 --- a/control-plane/mbus-api/src/v0.rs +++ b/control-plane/mbus-api/src/v0.rs @@ -107,6 +107,8 @@ pub enum MessageIdVs { RemoveVolumeNexus, /// Generic JSON gRPC message JsonGrpc, + /// Get block devices + GetBlockDevices, } // Only V0 should export this macro @@ -992,3 +994,79 @@ pub struct JsonGrpcRequest { } bus_impl_message_all!(JsonGrpcRequest, JsonGrpc, Value, JsonGrpc); + +/// Partition information +#[derive( + Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq, Apiv2Schema, +)] +pub struct Partition { + /// devname of parent device to which this partition belongs + pub parent: String, + /// partition number + pub number: u32, + /// partition name + pub name: String, + /// partition scheme: gpt, dos, ... + pub scheme: String, + /// partition type identifier + pub typeid: String, + /// UUID identifying partition + pub uuid: String, +} + +/// Filesystem information +#[derive( + Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq, Apiv2Schema, +)] +pub struct Filesystem { + /// filesystem type: ext3, ntfs, ... + pub fstype: String, + /// volume label + pub label: String, + /// UUID identifying the volume (filesystem) + pub uuid: String, + /// path where filesystem is currently mounted + pub mountpoint: String, +} + +/// Block device information +#[derive( + Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq, Apiv2Schema, +)] +#[serde(rename_all = "camelCase")] +pub struct BlockDevice { + /// entry in /dev associated with device + pub devname: String, + /// currently "disk" or "partition" + pub devtype: String, + /// major device number + pub devmajor: u32, + /// minor device number + pub devminor: u32, + /// device model - useful for identifying mayastor devices + pub model: String, + /// official device path + pub devpath: String, + /// list of udev generated symlinks by which device may be identified + pub devlinks: Vec, + /// size of device in (512 byte) blocks + pub size: u64, + /// partition information in case where device represents a partition + pub partition: Partition, + /// filesystem information in case where a filesystem is present + pub filesystem: Filesystem, + /// identifies if device is available for use (ie. is not "currently" in + /// use) + pub available: bool, +} +/// Get block devices +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct GetBlockDevices { + /// id of the mayastor instance + pub node: NodeId, + /// specifies whether to get all devices or only usable devices + pub all: bool, +} +bus_impl_vector_request!(BlockDevices, BlockDevice); +bus_impl_message_all!(GetBlockDevices, GetBlockDevices, BlockDevices, Node); diff --git a/control-plane/rest/service/src/v0/block_devices.rs b/control-plane/rest/service/src/v0/block_devices.rs new file mode 100644 index 000000000..96d9baff1 --- /dev/null +++ b/control-plane/rest/service/src/v0/block_devices.rs @@ -0,0 +1,36 @@ +use super::*; +use mbus_api::v0::GetBlockDevices; + +pub(super) fn configure(cfg: &mut paperclip::actix::web::ServiceConfig) { + cfg.service(get_block_devices); +} + +// Get block devices takes a query parameter 'all' which is used to determine +// whether to return all found devices or only those that are usable. +// Omitting the query parameter will result in all block devices being shown. +// +// # Examples +// Get only usable block devices with query parameter: +// curl -X GET "https://localhost:8080/v0/nodes/mayastor/block_devices?all=false" \ +// -H "accept: application/json" +// +// Get all block devices with query parameter: +// curl -X GET "https://localhost:8080/v0/nodes/mayastor/block_devices?all=true" \ +// -H "accept: application/json" -k +// +// Get all block devices without query parameter: +// curl -X GET "https://localhost:8080/v0/nodes/mayastor/block_devices" \ +// -H "accept: application/json" -k +// +#[get("/v0", "/nodes/{node}/block_devices", tags(BlockDevices))] +async fn get_block_devices( + web::Query(info): web::Query, + web::Path(node): web::Path, +) -> Result>, RestError> { + RestRespond::result(Ok(MessageBus::get_block_devices(GetBlockDevices { + node, + all: info.all.unwrap_or(true), + }) + .await? + .into_inner())) +} diff --git a/control-plane/rest/service/src/v0/mod.rs b/control-plane/rest/service/src/v0/mod.rs index 71966ab4b..877fb1820 100644 --- a/control-plane/rest/service/src/v0/mod.rs +++ b/control-plane/rest/service/src/v0/mod.rs @@ -2,6 +2,7 @@ //! Version 0 of the URI's //! Ex: /v0/nodes +pub mod block_devices; pub mod children; pub mod jsongrpc; pub mod nexuses; @@ -47,6 +48,7 @@ fn configure(cfg: &mut paperclip::actix::web::ServiceConfig) { children::configure(cfg); volumes::configure(cfg); jsongrpc::configure(cfg); + block_devices::configure(cfg); } pub(super) fn configure_api( diff --git a/control-plane/rest/src/versions/v0.rs b/control-plane/rest/src/versions/v0.rs index 45c192131..23e57c73d 100644 --- a/control-plane/rest/src/versions/v0.rs +++ b/control-plane/rest/src/versions/v0.rs @@ -624,3 +624,12 @@ impl Into for RestRespond { } } } + +/// Contains the query parameters that can be passed when calling +/// get_block_devices +#[derive(Deserialize, Apiv2Schema)] +#[serde(rename_all = "camelCase")] +pub struct GetBlockDeviceQueryParams { + /// specifies whether to list all devices or only usable ones + pub all: Option, +} From e2f7b8dab9158e8d70c226a99ab7bb5e2136ac56 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Tue, 16 Feb 2021 17:00:59 +0000 Subject: [PATCH 13/78] fix(ci): openapi-check should fail The pre-commit gives us this for free but not on Jenkins so manually test if the spec files changed by using git diff. --- control-plane/rest/openapi-specs/v0_api_spec.json | 2 +- scripts/openapi-check.sh | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/control-plane/rest/openapi-specs/v0_api_spec.json b/control-plane/rest/openapi-specs/v0_api_spec.json index 042c1927d..26027f0fd 100644 --- a/control-plane/rest/openapi-specs/v0_api_spec.json +++ b/control-plane/rest/openapi-specs/v0_api_spec.json @@ -1 +1 @@ -{"swagger":"2.0","definitions":{"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file +{"swagger":"2.0","definitions":{"BlockDevice":{"description":"Block device information","type":"object","properties":{"available":{"description":"identifies if device is available for use (ie. is not \"currently\" in\n use)","type":"boolean"},"devlinks":{"description":"list of udev generated symlinks by which device may be identified","type":"array","items":{"type":"string"}},"devmajor":{"description":"major device number","type":"integer","format":"int32"},"devminor":{"description":"minor device number","type":"integer","format":"int32"},"devname":{"description":"entry in /dev associated with device","type":"string"},"devpath":{"description":"official device path","type":"string"},"devtype":{"description":"currently \"disk\" or \"partition\"","type":"string"},"filesystem":{"description":"filesystem information in case where a filesystem is present","type":"object","properties":{"fstype":{"description":"filesystem type: ext3, ntfs, ...","type":"string"},"label":{"description":"volume label","type":"string"},"mountpoint":{"description":"path where filesystem is currently mounted","type":"string"},"uuid":{"description":"UUID identifying the volume (filesystem)","type":"string"}},"required":["fstype","label","mountpoint","uuid"]},"model":{"description":"device model - useful for identifying mayastor devices","type":"string"},"partition":{"description":"partition information in case where device represents a partition","type":"object","properties":{"name":{"description":"partition name","type":"string"},"number":{"description":"partition number","type":"integer","format":"int32"},"parent":{"description":"devname of parent device to which this partition belongs","type":"string"},"scheme":{"description":"partition scheme: gpt, dos, ...","type":"string"},"typeid":{"description":"partition type identifier","type":"string"},"uuid":{"description":"UUID identifying partition","type":"string"}},"required":["name","number","parent","scheme","typeid","uuid"]},"size":{"description":"size of device in (512 byte) blocks","type":"integer","format":"int64"}},"required":["available","devlinks","devmajor","devminor","devname","devpath","devtype","filesystem","model","partition","size"]},"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/block_devices":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/BlockDevice"}}}},"parameters":[{"description":"specifies whether to list all devices or only usable ones","in":"query","name":"all","type":"boolean"},{"in":"path","name":"node","required":true,"type":"string"}],"tags":["BlockDevices"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file diff --git a/scripts/openapi-check.sh b/scripts/openapi-check.sh index 9d52ab3c7..694de11d8 100755 --- a/scripts/openapi-check.sh +++ b/scripts/openapi-check.sh @@ -20,3 +20,6 @@ if [[ $check_rest_src = "yes" ]]; then fi cargo run --bin rest -- -d -o $SPECS + +# If the spec was modified then fail the check +git diff --exit-code $SPECS From c1f87624598a11d08c55b0af2f26725d6d607bcd Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Wed, 17 Feb 2021 10:06:17 +0000 Subject: [PATCH 14/78] feat(rest client): add support for new rest calls Add rest client support and test cases for calling JSON gRPC methods and listing block devices. --- control-plane/rest/src/lib.rs | 5 +++++ control-plane/rest/src/versions/v0.rs | 28 +++++++++++++++++++++++++++ control-plane/rest/tests/v0_test.rs | 24 ++++++++++++++++++++++- 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/control-plane/rest/src/lib.rs b/control-plane/rest/src/lib.rs index 023420188..697241ed9 100644 --- a/control-plane/rest/src/lib.rs +++ b/control-plane/rest/src/lib.rs @@ -232,4 +232,9 @@ impl JsonGeneric { inner: value, } } + + /// Get inner value + pub fn into_inner(self) -> serde_json::Value { + self.inner + } } diff --git a/control-plane/rest/src/versions/v0.rs b/control-plane/rest/src/versions/v0.rs index 23e57c73d..57f2958d7 100644 --- a/control-plane/rest/src/versions/v0.rs +++ b/control-plane/rest/src/versions/v0.rs @@ -1,5 +1,6 @@ #![allow(clippy::field_reassign_with_default)] use super::super::ActixRestClient; +use crate::JsonGeneric; use actix_web::{ body::Body, http::StatusCode, @@ -228,6 +229,16 @@ pub trait RestClient { -> anyhow::Result; /// Destroy volume async fn destroy_volume(&self, args: DestroyVolume) -> anyhow::Result<()>; + /// Generic JSON gRPC call + async fn json_grpc( + &self, + args: JsonGrpcRequest, + ) -> anyhow::Result; + /// Get block devices + async fn get_block_devices( + &self, + args: GetBlockDevices, + ) -> anyhow::Result>; } #[derive(Display, Debug)] @@ -495,6 +506,23 @@ impl RestClient for ActixRestClient { self.del(urn).await?; Ok(()) } + + async fn json_grpc( + &self, + args: JsonGrpcRequest, + ) -> anyhow::Result { + let urn = format!("/v0/nodes/{}/jsongrpc/{}", args.node, args.method); + self.put(urn, Body::from(args.params.to_string())).await + } + + async fn get_block_devices( + &self, + args: GetBlockDevices, + ) -> anyhow::Result> { + let urn = + format!("/v0/nodes/{}/block_devices?all={}", args.node, args.all); + self.get_vec(urn).await + } } impl Into for CreatePoolBody { diff --git a/control-plane/rest/tests/v0_test.rs b/control-plane/rest/tests/v0_test.rs index 9af46e5ca..8d27a14cb 100644 --- a/control-plane/rest/tests/v0_test.rs +++ b/control-plane/rest/tests/v0_test.rs @@ -12,12 +12,13 @@ async fn wait_for_services() { Liveness {}.request_on(ChannelVs::Node).await.unwrap(); Liveness {}.request_on(ChannelVs::Pool).await.unwrap(); Liveness {}.request_on(ChannelVs::Volume).await.unwrap(); + Liveness {}.request_on(ChannelVs::JsonGrpc).await.unwrap(); } // to avoid waiting for timeouts async fn orderly_start(test: &ComposeTest) { test.start_containers(vec![ - "nats", "node", "pool", "volume", "rest", "jaeger", + "nats", "node", "pool", "volume", "jsongrpc", "rest", "jaeger", ]) .await .unwrap(); @@ -80,6 +81,10 @@ async fn client() { .with_portmap("6831/udp", "6831/udp") .with_portmap("6832/udp", "6832/udp"), ) + .add_container_bin( + "jsongrpc", + Binary::from_dbg("jsongrpc").with_nats("-n"), + ) .with_default_tracing() .autorun(false) .build() @@ -265,6 +270,23 @@ async fn client_test(mayastor: &NodeId, test: &ComposeTest) { .unwrap(); assert!(client.get_pools(Filter::None).await.unwrap().is_empty()); + client + .json_grpc(JsonGrpcRequest { + node: mayastor.into(), + method: "rpc_get_methods".into(), + params: serde_json::json!({}).to_string().into(), + }) + .await + .expect("Failed to call JSON gRPC method"); + + client + .get_block_devices(GetBlockDevices { + node: mayastor.into(), + all: true, + }) + .await + .expect("Failed to get block devices"); + test.stop("mayastor").await.unwrap(); tokio::time::delay_for(std::time::Duration::from_millis(250)).await; assert!(client.get_nodes().await.unwrap().is_empty()); From c63a7a38dbc0670ffa8713cf7ec0d735dbd894ab Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Wed, 17 Feb 2021 14:24:06 +0000 Subject: [PATCH 15/78] fix: update CSI sidecars used with mayastor CAS-651 Update for bug fixes csi-provisioner 1.6.0 -> 2.1.0 csi-attacher 2.2.0 -> 3.1.0 E2E: Re-enable the checks for mayastor pods on uninstall Turn off the topology feature gate, this will be addressed in the future. --- chart/templates/moac-deployment.yaml | 8 ++++---- csi/moac/csi.ts | 4 ++-- deploy/moac-deployment.yaml | 6 +++--- test/e2e/uninstall/uninstall_test.go | 14 ++------------ 4 files changed, 11 insertions(+), 21 deletions(-) diff --git a/chart/templates/moac-deployment.yaml b/chart/templates/moac-deployment.yaml index 40d0d6cbb..83b429566 100644 --- a/chart/templates/moac-deployment.yaml +++ b/chart/templates/moac-deployment.yaml @@ -16,11 +16,11 @@ spec: serviceAccount: moac containers: - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.6.0 + image: quay.io/k8scsi/csi-provisioner:v2.1.0 args: - "--v=2" - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" + - "--feature-gates=Topology=false" env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -30,7 +30,7 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.2.0 + image: quay.io/k8scsi/csi-attacher:v3.1.0 args: - "--v=2" - "--csi-address=$(ADDRESS)" @@ -63,4 +63,4 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ volumes: - name: socket-dir - emptyDir: \ No newline at end of file + emptyDir: diff --git a/csi/moac/csi.ts b/csi/moac/csi.ts index 3c0285baa..06fece962 100644 --- a/csi/moac/csi.ts +++ b/csi/moac/csi.ts @@ -481,13 +481,13 @@ class CsiServer { } // This was used in the old days for NBD protocol - const accessibleTopology: TopologyKeys[] = []; + const topologies: TopologyKeys[] = []; this._endRequest(request, null, { volume: { capacityBytes: volume.getSize(), volumeId: uuid, - accessibleTopology, + accessibleTopology: topologies, // parameters defined in the storage class are only presented // to the CSI driver createVolume method. // Propagate them to other CSI driver methods involved in diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index 61450ae78..e4c096b1f 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -18,11 +18,11 @@ spec: serviceAccount: moac containers: - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.6.0 + image: quay.io/k8scsi/csi-provisioner:v2.1.0 args: - "--v=2" - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" + - "--feature-gates=Topology=false" env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -32,7 +32,7 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.2.0 + image: quay.io/k8scsi/csi-attacher:v3.1.0 args: - "--v=2" - "--csi-address=$(ADDRESS)" diff --git a/test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go index c27f403f6..92b4061a9 100644 --- a/test/e2e/uninstall/uninstall_test.go +++ b/test/e2e/uninstall/uninstall_test.go @@ -133,11 +133,6 @@ func teardownMayastor() { if cleanup { // Attempt to forcefully delete mayastor pods forceDeleted := common.ForceDeleteMayastorPods() - // FIXME: Temporarily disable this assert CAS-651 has been fixed - // Expect(forceDeleted).To(BeFalse()) - if forceDeleted { - logf.Log.Info("WARNING: Mayastor pods were force deleted at uninstall!!!") - } deleteNamespace() // delete the namespace prior to possibly failing the uninstall // to yield a reusable cluster on fail. @@ -145,14 +140,9 @@ func teardownMayastor() { Expect(podCount).To(BeZero()) Expect(pvcsFound).To(BeFalse()) Expect(pvcsDeleted).To(BeTrue()) + Expect(forceDeleted).To(BeFalse()) } else { - // FIXME: Temporarily disable this assert CAS-651 has been fixed - // and force delete lingering mayastor pods. - // Expect(common.MayastorUndeletedPodCount()).To(Equal(0)) - if common.MayastorUndeletedPodCount() != 0 { - logf.Log.Info("WARNING: Mayastor pods not deleted at uninstall, forcing deletion.") - common.ForceDeleteMayastorPods() - } + Expect(common.MayastorUndeletedPodCount()).To(Equal(0)) // More verbose here as deleting the namespace is often where this // test hangs. logf.Log.Info("Deleting the mayastor namespace") From 75843d1fb9ebe3ba71ff88ca74b8faeef06f30be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Feb 2021 17:16:29 +0000 Subject: [PATCH 16/78] chore(deps): bump systeminformation This fixes security vulnerability and originally was submitted by dependabot, however someone needs to teach him about new rules for format of commit messages ... --- test/grpc/package-lock.json | 6 +++--- test/grpc/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/grpc/package-lock.json b/test/grpc/package-lock.json index 037e3c612..ac4908586 100644 --- a/test/grpc/package-lock.json +++ b/test/grpc/package-lock.json @@ -3169,9 +3169,9 @@ } }, "systeminformation": { - "version": "4.31.1", - "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.31.1.tgz", - "integrity": "sha512-dVCDWNMN8ncMZo5vbMCA5dpAdMgzafK2ucuJy5LFmGtp1cG6farnPg8QNvoOSky9SkFoEX1Aw0XhcOFV6TnLYA==" + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-5.3.1.tgz", + "integrity": "sha512-1xG+6xfKXSowtZsAlUp6jVaV0q+5rq9yUN+ds6Hp2dR3/EdEU5r0v74qi5YF8BCGZfmddiOwDNB0h7ge70fmYA==" }, "table": { "version": "5.4.6", diff --git a/test/grpc/package.json b/test/grpc/package.json index 57b31aef6..2cc9546d8 100644 --- a/test/grpc/package.json +++ b/test/grpc/package.json @@ -24,7 +24,7 @@ "read": "^1.0.7", "semistandard": "^14.2.0", "sleep-promise": "^8.0.1", - "systeminformation": "^4.31.1", + "systeminformation": "^5.3.1", "wtfnode": "^0.8.1" }, "author": "Jan Kryl ", From 002a6652a3b3055798f7fa73182596a02c79ec43 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Wed, 17 Feb 2021 16:24:51 +0000 Subject: [PATCH 17/78] fix(openapi): parameter names should match On OpenApiV2 The name of each resource should be the same among all the operations (eg in this case get/delete) --- control-plane/rest/openapi-specs/v0_api_spec.json | 2 +- control-plane/rest/service/src/v0/pools.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/control-plane/rest/openapi-specs/v0_api_spec.json b/control-plane/rest/openapi-specs/v0_api_spec.json index 26027f0fd..441f4118c 100644 --- a/control-plane/rest/openapi-specs/v0_api_spec.json +++ b/control-plane/rest/openapi-specs/v0_api_spec.json @@ -1 +1 @@ -{"swagger":"2.0","definitions":{"BlockDevice":{"description":"Block device information","type":"object","properties":{"available":{"description":"identifies if device is available for use (ie. is not \"currently\" in\n use)","type":"boolean"},"devlinks":{"description":"list of udev generated symlinks by which device may be identified","type":"array","items":{"type":"string"}},"devmajor":{"description":"major device number","type":"integer","format":"int32"},"devminor":{"description":"minor device number","type":"integer","format":"int32"},"devname":{"description":"entry in /dev associated with device","type":"string"},"devpath":{"description":"official device path","type":"string"},"devtype":{"description":"currently \"disk\" or \"partition\"","type":"string"},"filesystem":{"description":"filesystem information in case where a filesystem is present","type":"object","properties":{"fstype":{"description":"filesystem type: ext3, ntfs, ...","type":"string"},"label":{"description":"volume label","type":"string"},"mountpoint":{"description":"path where filesystem is currently mounted","type":"string"},"uuid":{"description":"UUID identifying the volume (filesystem)","type":"string"}},"required":["fstype","label","mountpoint","uuid"]},"model":{"description":"device model - useful for identifying mayastor devices","type":"string"},"partition":{"description":"partition information in case where device represents a partition","type":"object","properties":{"name":{"description":"partition name","type":"string"},"number":{"description":"partition number","type":"integer","format":"int32"},"parent":{"description":"devname of parent device to which this partition belongs","type":"string"},"scheme":{"description":"partition scheme: gpt, dos, ...","type":"string"},"typeid":{"description":"partition type identifier","type":"string"},"uuid":{"description":"UUID identifying partition","type":"string"}},"required":["name","number","parent","scheme","typeid","uuid"]},"size":{"description":"size of device in (512 byte) blocks","type":"integer","format":"int64"}},"required":["available","devlinks","devmajor","devminor","devname","devpath","devtype","filesystem","model","partition","size"]},"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/block_devices":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/BlockDevice"}}}},"parameters":[{"description":"specifies whether to list all devices or only usable ones","in":"query","name":"all","type":"boolean"},{"in":"path","name":"node","required":true,"type":"string"}],"tags":["BlockDevices"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file +{"swagger":"2.0","definitions":{"BlockDevice":{"description":"Block device information","type":"object","properties":{"available":{"description":"identifies if device is available for use (ie. is not \"currently\" in\n use)","type":"boolean"},"devlinks":{"description":"list of udev generated symlinks by which device may be identified","type":"array","items":{"type":"string"}},"devmajor":{"description":"major device number","type":"integer","format":"int32"},"devminor":{"description":"minor device number","type":"integer","format":"int32"},"devname":{"description":"entry in /dev associated with device","type":"string"},"devpath":{"description":"official device path","type":"string"},"devtype":{"description":"currently \"disk\" or \"partition\"","type":"string"},"filesystem":{"description":"filesystem information in case where a filesystem is present","type":"object","properties":{"fstype":{"description":"filesystem type: ext3, ntfs, ...","type":"string"},"label":{"description":"volume label","type":"string"},"mountpoint":{"description":"path where filesystem is currently mounted","type":"string"},"uuid":{"description":"UUID identifying the volume (filesystem)","type":"string"}},"required":["fstype","label","mountpoint","uuid"]},"model":{"description":"device model - useful for identifying mayastor devices","type":"string"},"partition":{"description":"partition information in case where device represents a partition","type":"object","properties":{"name":{"description":"partition name","type":"string"},"number":{"description":"partition number","type":"integer","format":"int32"},"parent":{"description":"devname of parent device to which this partition belongs","type":"string"},"scheme":{"description":"partition scheme: gpt, dos, ...","type":"string"},"typeid":{"description":"partition type identifier","type":"string"},"uuid":{"description":"UUID identifying partition","type":"string"}},"required":["name","number","parent","scheme","typeid","uuid"]},"size":{"description":"size of device in (512 byte) blocks","type":"integer","format":"int64"}},"required":["available","devlinks","devmajor","devminor","devname","devpath","devtype","filesystem","model","partition","size"]},"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/block_devices":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/BlockDevice"}}}},"parameters":[{"description":"specifies whether to list all devices or only usable ones","in":"query","name":"all","type":"boolean"},{"in":"path","name":"node","required":true,"type":"string"}],"tags":["BlockDevices"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file diff --git a/control-plane/rest/service/src/v0/pools.rs b/control-plane/rest/service/src/v0/pools.rs index 6d75a49da..631a34640 100644 --- a/control-plane/rest/service/src/v0/pools.rs +++ b/control-plane/rest/service/src/v0/pools.rs @@ -14,7 +14,7 @@ pub(super) fn configure(cfg: &mut paperclip::actix::web::ServiceConfig) { async fn get_pools() -> Result>, RestError> { RestRespond::result(MessageBus::get_pools(Filter::None).await) } -#[get("/v0", "/pools/{id}", tags(Pools))] +#[get("/v0", "/pools/{pool_id}", tags(Pools))] async fn get_pool( web::Path(pool_id): web::Path, ) -> Result, RestError> { From f1ac63ef30c14186fa34a5302b42e93e5cafe998 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 17 Feb 2021 13:25:06 +0100 Subject: [PATCH 18/78] test: uring add and removal This commit updates the test cases to the MayastorTest model and uses uring for bdevs. note; typically -- our children are nvmf targets. The IO handeling of nvmf targets is different compared to the uring. A test cases that used nvmf targets can be found in nexus_add_remove --- mayastor/tests/nexus_children_add_remove.rs | 148 ++++++++++++++++++++ mayastor/tests/remove_child.rs | 72 ---------- 2 files changed, 148 insertions(+), 72 deletions(-) create mode 100644 mayastor/tests/nexus_children_add_remove.rs delete mode 100644 mayastor/tests/remove_child.rs diff --git a/mayastor/tests/nexus_children_add_remove.rs b/mayastor/tests/nexus_children_add_remove.rs new file mode 100644 index 000000000..35fb54fb3 --- /dev/null +++ b/mayastor/tests/nexus_children_add_remove.rs @@ -0,0 +1,148 @@ +//! +//! This test is roughly the same as the tests in nexus_add_remove. However, +//! this test does not use nvmf targets rather uring bdevs + +use mayastor::bdev::{nexus_create, nexus_lookup}; +use once_cell::sync::OnceCell; + +static DISKNAME1: &str = "/tmp/disk1.img"; +static DISKNAME2: &str = "/tmp/disk2.img"; +static DISKNAME3: &str = "/tmp/disk3.img"; + +use crate::common::MayastorTest; +use mayastor::core::{MayastorCliArgs, Share}; + +pub mod common; + +pub fn mayastor() -> &'static MayastorTest<'static> { + static MAYASTOR: OnceCell = OnceCell::new(); + + MAYASTOR.get_or_init(|| { + MayastorTest::new(MayastorCliArgs { + reactor_mask: "0x2".to_string(), + no_pci: true, + grpc_endpoint: "0.0.0.0".to_string(), + ..Default::default() + }) + }) +} + +/// create a nexus with two file based devices +/// and then, once created, share it and then +/// remove one of the children +#[tokio::test] +async fn remove_children_from_nexus() { + // we can only start mayastor once we run it within the same process, and + // during start mayastor will create a thread for each of the cores + // (0x2) here. + // + // grpc is not used in this case, and we use channels to send work to + // mayastor from the runtime here. + + let ms = mayastor(); + + common::truncate_file(DISKNAME1, 64 * 1024); + common::truncate_file(DISKNAME2, 64 * 1024); + + // create a nexus with two children + ms.spawn(async { + nexus_create( + "remove_from_nexus", + 60 * 1024 * 1024, + None, + &[ + format!("uring:///{}", DISKNAME1), + format!("uring:///{}", DISKNAME2), + ], + ) + .await + }) + .await + .expect("failed to create nexus"); + + // lookup the nexus and share it over nvmf + ms.spawn(async { + let nexus = + nexus_lookup("remove_from_nexus").expect("nexus is not found!"); + nexus.share_nvmf().await + }) + .await + .expect("failed to share nexus over nvmf"); + + // lookup the nexus, and remove a child + ms.spawn(async { + let nexus = + nexus_lookup("remove_from_nexus").expect("nexus is not found!"); + nexus.remove_child(&format!("uring:///{}", DISKNAME1)).await + }) + .await + .expect("failed to remove child from nexus"); + + // destroy it + ms.spawn(async { + let nexus = + nexus_lookup("remove_from_nexus").expect("nexus is not found!"); + nexus.destroy().await.unwrap(); + }) + .await; + + common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]); +} + +/// similar as the above test case however, instead of removal we add one +#[tokio::test] +async fn nexus_add_child() { + let ms = mayastor(); + // we can only start mayastor once + common::truncate_file(DISKNAME1, 64 * 1024); + common::truncate_file(DISKNAME2, 64 * 1024); + common::truncate_file(DISKNAME3, 64 * 1024); + + ms.spawn(async { + nexus_create( + "nexus_add_child", + 60 * 1024 * 1024, + None, + &[ + format!("uring:///{}", DISKNAME1), + format!("uring:///{}", DISKNAME2), + ], + ) + .await + .expect("failed to create nexus"); + }) + .await; + + ms.spawn(async { + let nexus = + nexus_lookup("nexus_add_child").expect("nexus is not found!"); + nexus + .share_nvmf() + .await + .expect("failed to share nexus over nvmf"); + }) + .await; + + ms.spawn(async { + let nexus = + nexus_lookup("nexus_add_child").expect("nexus is not found!"); + nexus + .add_child(&format!("uring:///{}", DISKNAME3), false) + .await + }) + .await + .unwrap(); + + ms.spawn(async { + let nexus = + nexus_lookup("nexus_add_child").expect("nexus is not found!"); + nexus.destroy().await.unwrap(); + }) + .await; + + common::delete_file(&[ + DISKNAME1.into(), + DISKNAME2.into(), + DISKNAME3.into(), + ]); +} diff --git a/mayastor/tests/remove_child.rs b/mayastor/tests/remove_child.rs deleted file mode 100644 index 8e66390ab..000000000 --- a/mayastor/tests/remove_child.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::process::Command; - -use mayastor::{ - bdev::{nexus_create, nexus_lookup, NexusStatus}, - core::{ - mayastor_env_stop, - Bdev, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - }, -}; - -static DISKNAME1: &str = "/tmp/disk1.img"; -static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512"; - -static DISKNAME2: &str = "/tmp/disk2.img"; -static BDEVNAME2: &str = "aio:///tmp/disk2.img?blk_size=512"; -pub mod common; -#[test] -fn remove_child() { - common::mayastor_test_init(); - - // setup our test files - - let output = Command::new("truncate") - .args(&["-s", "64m", DISKNAME1]) - .output() - .expect("failed exec truncate"); - - assert_eq!(output.status.success(), true); - - let output = Command::new("truncate") - .args(&["-s", "64m", DISKNAME2]) - .output() - .expect("failed exec truncate"); - - assert_eq!(output.status.success(), true); - let rc = MayastorEnvironment::new(MayastorCliArgs::default()) - .start(|| Reactor::block_on(works()).unwrap()) - .unwrap(); - - assert_eq!(rc, 0); -} - -async fn works() { - let child1 = BDEVNAME1; - - //"nvmf://192.168.1.4:8420/nqn.2019-05.io.openebs:disk1".to_string(); - let child2 = BDEVNAME2; - //"nvmf://192.168.1.4:8420/nqn.2019-05.io.openebs:disk2".to_string(); - - let children = vec![child1.into(), child2.into()]; - - nexus_create("hello", 512 * 131_072, None, &children) - .await - .unwrap(); - - let nexus = nexus_lookup("hello").unwrap(); - - // open the nexus in read write - let nd_bdev = Bdev::lookup_by_name("hello").expect("failed to lookup bdev"); - let _nd = nd_bdev - .open(true) - .expect("failed open bdev") - .into_handle() - .unwrap(); - assert_eq!(nexus.status(), NexusStatus::Online); - - nexus.remove_child(BDEVNAME1).await.unwrap(); - mayastor_env_stop(0); -} From b329f0842a4949179cbebee89561630adf885050 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Thu, 18 Feb 2021 12:19:24 +0100 Subject: [PATCH 19/78] ci: remove failing tests The tests keep failing where the culprit seems that we run multiple instances. These tests are simply to heavy/complex to run in CI this way and we have to come up with something else. The tests are removed instead of disabled it does not make sense to keep them around as they due get compiled. --- mayastor/tests/bdev_test.rs | 337 -------- mayastor/tests/child_io_error.rs | 207 ----- mayastor/tests/io_job.rs | 198 ----- mayastor/tests/rebuild.rs | 1264 ------------------------------ 4 files changed, 2006 deletions(-) delete mode 100644 mayastor/tests/bdev_test.rs delete mode 100644 mayastor/tests/child_io_error.rs delete mode 100644 mayastor/tests/io_job.rs delete mode 100644 mayastor/tests/rebuild.rs diff --git a/mayastor/tests/bdev_test.rs b/mayastor/tests/bdev_test.rs deleted file mode 100644 index 240e8e474..000000000 --- a/mayastor/tests/bdev_test.rs +++ /dev/null @@ -1,337 +0,0 @@ -//! -//! At a high level this is what is tested during -//! this run. For each core we are assigned we will -//! start a job -//! -//! -//! +------------+ +-------------------------+ -//! | | | | -//! | job | | +--nvmf----> MS1 | -//! | | | | | -//! +------------+ +-------------------------+ -//! | -//! +------------+ +-------------------------+ -//! | | | | | -//! | nvmf | | +--nvmf----> MS2 | -// | | | | | -// +------------+ +-------------------------+ -//! | | -//! | +-------------------------+ -//! | | | -//! | | | | -//! +-+nvmf------>+ nexus +--loop----> MS3 | -//! | | -//! +-------------------------+ -//! -//! -//! The idea is that we then "hot remove" targets while -//! the nexus is still able to process IO. -//! -//! -//! When we encounter an IO problem, we must reconfigure all cores, (unless we -//! use single cores of course) and this multi core reconfiguration is what we -//! are trying to test here, and so we require a certain amount of cores to test -//! this to begin with. Also, typically, no more than one mayastor instance will -//! be bound to a particular core. As such we "spread" out cores as much as -//! possible. -use std::{ - sync::{atomic::Ordering, Arc}, - time::Duration, -}; - -use once_cell::sync::OnceCell; - -const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; - -use common::compose::{Builder, ComposeTest, MayastorTest}; -use mayastor::{ - core::{ - io_driver, - io_driver::JobQueue, - Bdev, - Cores, - MayastorCliArgs, - SIG_RECEIVED, - }, - nexus_uri::bdev_create, -}; -use rpc::mayastor::{ - BdevShareRequest, - BdevUri, - CreateNexusRequest, - CreateReply, - ListNexusReply, - Null, - PublishNexusRequest, -}; - -use composer::Binary; -use mayastor::subsys::{Config, NvmeBdevOpts}; -use tokio::time::interval; - -pub mod common; - -static MAYASTOR: OnceCell = OnceCell::new(); -static DOCKER_COMPOSE: OnceCell = OnceCell::new(); - -/// create a malloc bdev and export them over nvmf, returns the URI of the -/// constructed target. -async fn create_target(container: &str) -> String { - let mut h = DOCKER_COMPOSE - .get() - .unwrap() - .grpc_handle(container) - .await - .unwrap(); - h.bdev - .create(BdevUri { - uri: "malloc:///disk0?size_mb=64".into(), - }) - .await - .unwrap(); - // share it over nvmf - let ep = h - .bdev - .share(BdevShareRequest { - name: "disk0".into(), - proto: "nvmf".into(), - }) - .await - .unwrap(); - - DOCKER_COMPOSE.get().unwrap().logs_all().await.unwrap(); - ep.into_inner().uri -} - -/// create a local malloc bdev, and then use it to create a nexus with the -/// remote targets added. This reflects the current approach where we have -/// children as: bdev:/// and nvmf:// we really should get rid of this -/// asymmetrical composition if we can. -async fn create_nexus(container: &str, mut kiddos: Vec) -> String { - let mut h = DOCKER_COMPOSE - .get() - .unwrap() - .grpc_handle(container) - .await - .unwrap(); - - let bdev = h - .bdev - .create(BdevUri { - uri: "malloc:///disk0?size_mb=64".into(), - }) - .await - .unwrap(); - - kiddos.push(format!("bdev:///{}", bdev.into_inner().name)); - - h.mayastor - .create_nexus(CreateNexusRequest { - uuid: NEXUS_UUID.to_string(), - size: 60 * 1024 * 1024, - children: kiddos, - }) - .await - .unwrap(); - - let endpoint = h - .mayastor - .publish_nexus(PublishNexusRequest { - uuid: NEXUS_UUID.into(), - share: 1, - ..Default::default() - }) - .await - .unwrap(); - - endpoint.into_inner().device_uri -} - -/// create the work -- which means the nexus, replica's and the jobs. on return -/// IO flows through mayastorTest to all 3 containers -async fn create_topology(queue: Arc) { - let r1 = create_target("ms1").await; - // let r2 = create_target("ms2").await; - let endpoint = create_nexus("ms3", vec![r1]).await; - - // the nexus is running on ms3 we will use a 4th instance of mayastor to - // create a nvmf bdev and push IO to it. - - let ms = MAYASTOR.get().unwrap(); - let bdev = ms - .spawn(async move { - let bdev = bdev_create(&endpoint).await.unwrap(); - bdev - }) - .await; - - // start the workload by running a job on each core, this simulates the way - // the targets use multiple cores - ms.spawn(async move { - for c in Cores::count() { - let bdev = Bdev::lookup_by_name(&bdev).unwrap(); - let job = io_driver::Builder::new() - .core(c) - .bdev(bdev) - .qd(8) - .io_size(512) - .build() - .await; - - queue.start(job); - } - }) - .await; -} - -async fn check_nexus(checker: F) { - let mut ms3 = DOCKER_COMPOSE - .get() - .unwrap() - .grpc_handle("ms3") - .await - .unwrap(); - let list = ms3.mayastor.list_nexus(Null {}).await.unwrap().into_inner(); - checker(list) -} - -/// kill replica issues an unshare to the container which more or less amounts -/// to the same thing as killing the container. -async fn kill_replica(container: &str) { - let t = DOCKER_COMPOSE.get().unwrap(); - let mut hdl = t.grpc_handle(container).await.unwrap(); - - hdl.bdev - .unshare(CreateReply { - name: "disk0".to_string(), - }) - .await - .unwrap(); -} - -#[allow(dead_code)] -async fn pause_replica(container: &str) { - let t = DOCKER_COMPOSE.get().unwrap(); - t.pause(container).await.unwrap(); -} - -#[allow(dead_code)] -async fn unpause_replica(container: &str) { - let t = DOCKER_COMPOSE.get().unwrap(); - t.thaw(container).await.unwrap(); -} - -#[allow(dead_code)] -async fn kill_local(container: &str) { - let t = DOCKER_COMPOSE.get().unwrap(); - let mut hdl = t.grpc_handle(container).await.unwrap(); - hdl.bdev - .destroy(BdevUri { - uri: "malloc:///disk0".into(), - }) - .await - .unwrap(); -} - -async fn list_bdevs(container: &str) { - let mut h = DOCKER_COMPOSE - .get() - .unwrap() - .grpc_handle(container) - .await - .unwrap(); - dbg!(h.bdev.list(Null {}).await.unwrap()); -} - -#[tokio::test] -async fn nvmf_bdev_test() { - let queue = Arc::new(JobQueue::new()); - - Config::get_or_init(|| Config { - nvme_bdev_opts: NvmeBdevOpts { - action_on_timeout: 2, - timeout_us: 10_000_000, - retry_count: 5, - ..Default::default() - }, - ..Default::default() - }) - .apply(); - - // create the docker containers each container started with two adjacent CPU - // cores. ms1 will have core mask 0x3, ms3 will have core mask 0xc and so - // on. the justification for this enormous core spreading is we want to - // test and ensure that things do not interfere with one and other and - // yet, still have at least more than one core such that we mimic - // production workloads. - // - - let compose = Builder::new() - .name("cargo-test") - .network("10.1.0.0/16") - .add_container_bin( - "ms1", - Binary::from_dbg("mayastor").with_args(vec!["-l", "1"]), - ) - // .add_container_bin( - // "ms2", - // Binary::from_dbg("mayastor").with_args(vec!["-l", "2"]), - // ) - .add_container_bin( - "ms3", - Binary::from_dbg("mayastor").with_args(vec!["-l", "3"]), - ) - .with_clean(true) - .with_prune(true) - .build() - .await - .unwrap(); - - DOCKER_COMPOSE.set(compose).unwrap(); - // this is based on the number of containers above. - let mask = format!("{:#01x}", (1 << 1) | (1 << 2)); - let ms = MayastorTest::new(MayastorCliArgs { - reactor_mask: mask, - no_pci: true, - grpc_endpoint: "0.0.0.0".to_string(), - ..Default::default() - }); - - let ms = MAYASTOR.get_or_init(|| ms); - - let mut ticker = interval(Duration::from_millis(1000)); - create_topology(Arc::clone(&queue)).await; - - list_bdevs("ms3").await; - - for i in 1 .. 10 { - ticker.tick().await; - if i == 5 { - kill_replica("ms1").await; - } - - ms.spawn(async { - let bdev = Bdev::bdev_first().unwrap(); - dbg!(bdev.stats().await.unwrap()); - }) - .await; - // ctrl was hit so exit the loop here - if SIG_RECEIVED.load(Ordering::Relaxed) { - break; - } - } - - check_nexus(|n| { - n.nexus_list.iter().for_each(|n| { - dbg!(n); - }); - }) - .await; - - list_bdevs("ms3").await; - DOCKER_COMPOSE.get().unwrap().logs("ms3").await.unwrap(); - - queue.stop_all().await; - ms.stop().await; - DOCKER_COMPOSE.get().unwrap().down().await; -} diff --git a/mayastor/tests/child_io_error.rs b/mayastor/tests/child_io_error.rs deleted file mode 100644 index 3a29d1a2d..000000000 --- a/mayastor/tests/child_io_error.rs +++ /dev/null @@ -1,207 +0,0 @@ -use composer::{Builder, RpcHandle}; -use crossbeam::channel::{unbounded, Receiver}; -use rpc::mayastor::{ - BdevShareRequest, - BdevUri, - ChildState, - CreateNexusRequest, - CreateReply, - DestroyNexusRequest, - Nexus, - NexusState, - Null, - PublishNexusRequest, - ShareProtocolNexus, -}; -use std::{convert::TryFrom, time::Duration}; - -pub mod common; - -/// Test the states of the nexus and children when an I/O error occurs. -/// A child with a failed I/O is expected to be faulted. -#[tokio::test] -async fn child_io_error() { - let test = Builder::new() - .name("child_io_error") - .network("10.1.0.0/16") - .add_container("ms1") - .add_container("ms2") - .add_container("ms3") - .with_clean(true) - .with_prune(true) - .build() - .await - .unwrap(); - - let nexus_hdl = &mut test.grpc_handle("ms1").await.unwrap(); - let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); - let ms2_share_uri = bdev_create_and_share(ms2).await; - let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); - let ms3_share_uri = bdev_create_and_share(ms3).await; - - const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; - const NEXUS_SIZE: u64 = 50 * 1024 * 1024; // 50MiB - - // Create a nexus and run fio against it. - let nexus_uri = nexus_create_and_publish( - nexus_hdl, - NEXUS_UUID.into(), - NEXUS_SIZE, - vec![ms2_share_uri.clone(), ms3_share_uri.clone()], - ) - .await; - let nexus_tgt = nvmf_connect(nexus_uri.clone()); - let fio_receiver = run_fio(nexus_tgt, NEXUS_SIZE); - // Let fio run for a bit. - std::thread::sleep(Duration::from_secs(2)); - - // Cause an I/O error by unsharing a child then wait for fio to complete. - bdev_unshare(ms3).await; - let fio_result = fio_receiver.recv().unwrap(); - assert_eq!(fio_result, 0, "Failed to run fio_verify_size"); - - // Check the state of the nexus and children. - assert_eq!( - get_nexus_state(nexus_hdl, &NEXUS_UUID).await, - NexusState::NexusDegraded as i32 - ); - assert_eq!( - get_child_state(nexus_hdl, &NEXUS_UUID, &ms2_share_uri).await, - ChildState::ChildOnline as i32 - ); - assert_eq!( - get_child_state(nexus_hdl, &NEXUS_UUID, &ms3_share_uri).await, - ChildState::ChildFaulted as i32 - ); - - // Teardown. - nvmf_disconnect(nexus_uri); - nexus_hdl - .mayastor - .destroy_nexus(DestroyNexusRequest { - uuid: NEXUS_UUID.into(), - }) - .await - .expect("Failed to destroy nexus"); -} - -/// Create and publish a nexus with the given uuid and size. -/// The nexus is published over NVMf and the nexus uri is returned. -async fn nexus_create_and_publish( - hdl: &mut RpcHandle, - uuid: String, - size: u64, - children: Vec, -) -> String { - hdl.mayastor - .create_nexus(CreateNexusRequest { - uuid: uuid.clone(), - size, - children, - }) - .await - .unwrap(); - hdl.mayastor - .publish_nexus(PublishNexusRequest { - uuid: uuid.clone(), - key: "".into(), - share: ShareProtocolNexus::NexusNvmf as i32, - }) - .await - .unwrap() - .into_inner() - .device_uri -} - -/// Create and share a bdev over NVMf. -async fn bdev_create_and_share(hdl: &mut RpcHandle) -> String { - const DISK_NAME: &str = "disk0"; - hdl.bdev - .create(BdevUri { - uri: format!("malloc:///{}?size_mb=100", DISK_NAME), - }) - .await - .unwrap(); - hdl.bdev - .share(BdevShareRequest { - name: DISK_NAME.into(), - proto: "nvmf".into(), - }) - .await - .unwrap() - .into_inner() - .uri -} - -/// Unshare a bdev. -async fn bdev_unshare(hdl: &mut RpcHandle) { - hdl.bdev - .unshare(CreateReply { - name: "disk0".to_string(), - }) - .await - .unwrap(); -} - -/// Connect to a NVMf target and return the device name. -fn nvmf_connect(uri: String) -> String { - let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); - let devices = target.connect().unwrap(); - devices[0].path.to_string() -} - -// Disconnect from a NVMf target. -fn nvmf_disconnect(uri: String) { - let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); - target.disconnect().unwrap(); -} - -/// Return the state of the nexus with the given uuid. -async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> i32 { - get_nexus(hdl, uuid).await.state -} - -/// Return the nexus with the given uuid. -async fn get_nexus(hdl: &mut RpcHandle, uuid: &str) -> Nexus { - let nexus_list = hdl - .mayastor - .list_nexus(Null {}) - .await - .unwrap() - .into_inner() - .nexus_list; - let n = nexus_list - .iter() - .filter(|n| n.uuid == uuid) - .collect::>(); - assert_eq!(n.len(), 1); - n[0].clone() -} - -/// Return the state of a child. -async fn get_child_state( - hdl: &mut RpcHandle, - nexus_uuid: &str, - child_uri: &str, -) -> i32 { - let n = get_nexus(hdl, nexus_uuid).await; - let c = n - .children - .iter() - .filter(|c| c.uri == child_uri) - .collect::>(); - assert_eq!(c.len(), 1); - c[0].state -} - -/// Run fio in a spawned thread and return a receiver channel which is signalled -/// when fio completes. -fn run_fio(target: String, target_size: u64) -> Receiver { - let (s, r) = unbounded::(); - std::thread::spawn(move || { - if let Err(e) = s.send(common::fio_verify_size(&target, target_size)) { - tracing::error!("Failed to send fio complete with error {}", e); - } - }); - r -} diff --git a/mayastor/tests/io_job.rs b/mayastor/tests/io_job.rs deleted file mode 100644 index 94602921f..000000000 --- a/mayastor/tests/io_job.rs +++ /dev/null @@ -1,198 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use tokio::time::Duration; - -use mayastor::{ - bdev::{nexus_create, nexus_lookup}, - core::{io_driver, Bdev, MayastorCliArgs}, -}; -use rpc::mayastor::{BdevShareRequest, BdevUri}; - -pub mod common; -use common::compose::{self, Binary, ComposeTest, MayastorTest}; -use mayastor::core::io_driver::JobQueue; - -static DOCKER_COMPOSE: OnceCell = OnceCell::new(); -static MAYASTOR: OnceCell = OnceCell::new(); - -// this functions runs with in the context of the mayastorTest instance -async fn create_work(queue: Arc) { - // get a vector of grpc clients to all containers that are part of this test - let mut hdls = DOCKER_COMPOSE.get().unwrap().grpc_handles().await.unwrap(); - - // for each grpc client, invoke these methods. - for h in &mut hdls { - // create the bdev - h.bdev - .create(BdevUri { - uri: "malloc:///disk0?size_mb=64".into(), - }) - .await - .unwrap(); - // share it over nvmf - h.bdev - .share(BdevShareRequest { - name: "disk0".into(), - proto: "nvmf".into(), - }) - .await - .unwrap(); - } - - DOCKER_COMPOSE.get().unwrap().logs_all().await.unwrap(); - - // get a reference to mayastor (used later) - let ms = MAYASTOR.get().unwrap(); - - // have ms create our nexus to the targets created above to know the IPs of - // the mayastor instances that run in the container, the handles can be - // used. This avoids hardcoded IPs and having magic constants. - ms.spawn(async move { - nexus_create( - "nexus0", - 1024 * 1024 * 60, - None, - &[ - format!( - "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", - hdls[0].endpoint.ip() - ), - format!( - "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", - hdls[1].endpoint.ip() - ), - ], - ) - .await - .unwrap(); - - let bdev = Bdev::lookup_by_name("nexus0").unwrap(); - - // create a job using the bdev we looked up, we are in the context here - // of the ms instance and not the containers. - let job = io_driver::Builder::new() - .core(1) - .bdev(bdev) - .qd(32) - .io_size(512) - .build() - .await; - - queue.start(job); - }) - .await -} - -async fn stats() { - // we grab an instance to mayastor test - let ms = MAYASTOR.get().unwrap(); - // and spawn a future on it - ms.spawn(async move { - let bdev = Bdev::bdev_first().unwrap().into_iter(); - for b in bdev { - let result = b.stats().await.unwrap(); - println!("{}: {:?}", b.name(), result); - } - }) - .await; -} - -#[tokio::test] -async fn io_driver() { - // - // We are creating 3 mayastor instances in total. Two of them will be - // running in side a container. Once these two instances are running, we - // will create a malloc bdev on each and share that over nvmf. Using - // these targets a 3de mayastor instance will be started. The third one - // however, is started by means of struct MayastorTest. This way, we can - // interact with it using .spawn() and .send(). - // - // The spawn() method returns an awaitable handle and .send() does a fire - // and forget. Using these methods we create a nexus in the mayastor - // test instance (ms). As part of the test, we also create a malloc bdev - // on that instance - // - // Finally, we create 2 jobs, one for the nexus and one for the malloc bdev - // and let the test run for 5 seconds. - - // To make it easy to get access to the ComposeTest and MayastorTest - // instances they are, after creation stored in the static globals - // - - // the queue that holds our jobs once started. As we pass this around - // between this thread the mayastor instance we keep a ref count. We - // need to keep track of the Jobs to avoid them from being dropped. - let queue = Arc::new(JobQueue::new()); - - // create the docker containers - // we are pinning them to 3rd and 4th core spectively to improve stability - // of the test. Be aware that default docker container cpuset is 0-3! - let compose = compose::Builder::new() - .name("cargo-test") - .network("10.1.0.0/16") - .add_container_bin( - "nvmf-target1", - Binary::from_dbg("mayastor").with_args(vec!["-l", "2"]), - ) - .add_container_bin( - "nvmf-target2", - Binary::from_dbg("mayastor").with_args(vec!["-l", "3"]), - ) - .with_prune(true) - .with_clean(true) - .build() - .await - .unwrap(); - - // create the mayastor test instance - let mayastor_test = MayastorTest::new(MayastorCliArgs { - log_components: vec!["all".into()], - reactor_mask: "0x3".to_string(), - no_pci: true, - grpc_endpoint: "0.0.0.0".to_string(), - ..Default::default() - }); - - // set the created instances to the globals here such that we can access - // them whenever we want by "getting" them. Because some code is async - // we cannot do this one step as the async runtime cannot be used during - // init. - DOCKER_COMPOSE.set(compose).unwrap(); - - // later down the road we use the ms instance (to spawn futures) so here we - // use get_or_init() it is a shorter way of writing: - // ```rust - // MAYASTOR.set(mayastor); - // let ms = MAYASTOR.get().unwrap(); - // ``` - let ms = MAYASTOR.get_or_init(|| mayastor_test); - - // the creation of the targets -- is done by grpc handles. Subsequently, we - // create the nexus and the malloc bdev (using futures). To keep things - // a bit organised we do that in a single function notice we pass queue - // here as an argument. We could also make a static queue here if we wanted - // too to avoid passing arguments around. - - create_work(queue.clone()).await; - - // the devices have been created and they are pumping IO - tokio::time::delay_for(Duration::from_secs(5)).await; - - // we must stop all jobs otherwise mayastor would never exit (unless we - // signal it) - queue.stop_all().await; - // grab some stats of the bdevs in the ms instance - stats().await; - - // Both ComposeTest and MayastorTest impl Drop. However, we want to control - // the sequence of shut down here, so we destroy the nexus to avoid that - // the system destroys the containers before it destroys mayastor. - ms.spawn(nexus_lookup("nexus0").unwrap().destroy()) - .await - .unwrap(); - // now we manually destroy the docker containers - DOCKER_COMPOSE.get().unwrap().down().await; - - // ms gets dropped and will call mayastor_env_stop() -} diff --git a/mayastor/tests/rebuild.rs b/mayastor/tests/rebuild.rs deleted file mode 100644 index 49c33af8c..000000000 --- a/mayastor/tests/rebuild.rs +++ /dev/null @@ -1,1264 +0,0 @@ -use composer::{Builder, ComposeTest, RpcHandle}; - -use rpc::mayastor::{ - AddChildNexusRequest, - BdevShareRequest, - BdevUri, - Child, - ChildState, - CreateNexusRequest, - CreateReply, - DestroyNexusRequest, - Nexus, - NexusState, - Null, - PauseRebuildRequest, - PublishNexusRequest, - RebuildProgressRequest, - RebuildStateRequest, - RemoveChildNexusRequest, - ResumeRebuildRequest, - ShareProtocolNexus, - StartRebuildRequest, - StopRebuildRequest, -}; - -use std::time::Duration; - -use crossbeam::channel::unbounded; -use spdk_sys::SPDK_BDEV_LARGE_BUF_MAX_SIZE; -use std::convert::TryFrom; - -pub mod common; - -const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; -const NEXUS_SIZE: u64 = 50 * 1024 * 1024; // 50MiB - -/// Test that a child added to a nexus can be successfully rebuild. -#[tokio::test] -async fn rebuild_basic() { - let test = start_infrastructure("rebuild_basic").await; - let (mut ms1, _, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Check a rebuild is started for a newly added child. - add_child(nexus_hdl, child, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "running", - Duration::from_secs(1), - ) - .await - .unwrap()); - - // Check nexus is healthy after rebuild completion. - assert!(wait_for_successful_rebuild(nexus_hdl, child).await); - check_nexus_state(nexus_hdl, NexusState::NexusOnline).await; -} - -/// Test the "norebuild" flag when adding a child. -#[tokio::test] -async fn rebuild_add_flag() { - let test = start_infrastructure("rebuild_add_flag").await; - let (mut ms1, _, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Add child but don't rebuild. - add_child(nexus_hdl, child, false).await; - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - - // Start rebuild. - start_rebuild(nexus_hdl, child).await.unwrap(); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "running", - Duration::from_secs(1), - ) - .await - .unwrap()); -} - -/// Test the rebuild progress gets updated. -#[tokio::test] -async fn rebuild_progress() { - let test = start_infrastructure("rebuild_progress").await; - let (mut ms1, _, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Start a rebuild and give it some time to run. - add_child(nexus_hdl, child, true).await; - std::thread::sleep(Duration::from_millis(100)); - - // Pause rebuild and get current progress. - pause_rebuild(nexus_hdl, child).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "paused", - Duration::from_secs(1), - ) - .await - .unwrap()); - let progress1 = get_rebuild_progress(nexus_hdl, child).await; - - // Resume rebuild and give it some time to run. - resume_rebuild(nexus_hdl, child).await.unwrap(); - std::thread::sleep(Duration::from_millis(100)); - - // Pause rebuild and check for further progress. - pause_rebuild(nexus_hdl, child).await; - let progress2 = get_rebuild_progress(nexus_hdl, child).await; - assert!(progress2 > progress1); -} - -/// Test cases where a rebuild should not be started. -#[tokio::test] -async fn rebuild_not_required() { - let test = start_infrastructure("rebuild_not_required").await; - let (mut ms1, ms2, ms3) = setup_test(&test, 2).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Attempt to rebuild a healthy child. - start_rebuild(nexus_hdl, child) - .await - .expect_err("Shouldn't rebuild"); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - - // Remove one of the healthy children. - remove_child(nexus_hdl, child).await; - - // Can't rebuild a single child which is healthy. - let last_child = &get_share_uri(&ms2); - start_rebuild(nexus_hdl, last_child) - .await - .expect_err("Shouldn't rebuild"); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); -} - -/// Test removing the source of a rebuild. -#[tokio::test] -async fn rebuild_src_removal() { - let test = start_infrastructure("rebuild_src_removal").await; - let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Pause rebuild for added child. - add_child(nexus_hdl, child, true).await; - pause_rebuild(nexus_hdl, child).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "paused", - Duration::from_secs(1), - ) - .await - .unwrap()); - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - - // Remove the rebuild source. - let src_child = &get_share_uri(&ms2); - remove_child(nexus_hdl, src_child).await; - // Give a little time for the rebuild to fail. - std::thread::sleep(Duration::from_secs(1)); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - // Nexus must be faulted because it doesn't have any healthy children. - check_nexus_state(nexus_hdl, NexusState::NexusFaulted).await; -} - -/// Test removing the destination of a rebuild. -#[tokio::test] -async fn rebuild_dst_removal() { - let test = start_infrastructure("rebuild_dst_removal").await; - let (mut ms1, _, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Pause rebuild for added child. - add_child(nexus_hdl, child, true).await; - pause_rebuild(nexus_hdl, child).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "paused", - Duration::from_secs(1), - ) - .await - .unwrap()); - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - - // Remove the child that is being rebuilt. - remove_child(nexus_hdl, child).await; - // Give a little time for the rebuild to fail. - std::thread::sleep(Duration::from_secs(1)); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - // Nexus must be online because it has a single healthy child. - check_nexus_state(nexus_hdl, NexusState::NexusOnline).await; -} - -/// Test faulting the source of a rebuild. -#[tokio::test] -async fn rebuild_fault_src() { - let test = start_infrastructure("rebuild_fault_src").await; - let (mut ms1, mut ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Check a rebuild is started for the added child. - add_child(nexus_hdl, child, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "running", - Duration::from_millis(500), - ) - .await - .unwrap()); - - // Fault the rebuild source by unsharing the bdev. - bdev_unshare(&mut ms2).await; - - // The rebuild failed so the destination should be faulted. - assert!( - wait_for_child_state( - nexus_hdl, - child, - ChildState::ChildFaulted, - Duration::from_millis(500), - ) - .await - ); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); -} - -/// Test faulting the destination of a rebuild. -#[tokio::test] -async fn rebuild_fault_dst() { - let test = start_infrastructure("rebuild_fault_dst").await; - let (mut ms1, _, mut ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let child = &get_share_uri(&ms3); - - // Check a rebuild is started for the added child. - add_child(nexus_hdl, child, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child, - "running", - Duration::from_millis(500), - ) - .await - .unwrap()); - - // Fault the rebuild destination by unsharing the bdev. - bdev_unshare(&mut ms3).await; - - // Check the state of the destination child. - // Give a sufficiently high timeout time as unsharing an NVMf bdev can take - // some time to propagate up as an error from the rebuild job. - assert!( - wait_for_child_state( - nexus_hdl, - child, - ChildState::ChildFaulted, - Duration::from_secs(20), - ) - .await - ); - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); -} - -/// Test rebuild with different sizes of source and destination children. -#[tokio::test] -async fn rebuild_sizes() { - struct TestCase { - child1_size: u64, - child2_size: u64, - child3_size: u64, - } - - // Test cases where the child sizes include space for the metadata. - - const META_SIZE_MB: u64 = 5; - let default_size: u64 = 50 + META_SIZE_MB; - - let mut test_cases = vec![]; - // Children with same size. - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size, - child3_size: default_size, - }); - // 2nd child larger - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size * 2, - child3_size: default_size, - }); - // 3rd child larger - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size, - child3_size: default_size * 2, - }); - // 2nd and 3rd child larger - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size * 2, - child3_size: default_size * 2, - }); - - // Test cases where the metadata size is not included. This will result in - // the nexus size being smaller than requested in order to accommodate the - // metadata on the children. - - let default_size: u64 = 50; - - // Children with same size. - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size, - child3_size: default_size, - }); - // 2nd child larger - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size * 2, - child3_size: default_size, - }); - // 3rd child larger - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size, - child3_size: default_size * 2, - }); - // 2nd and 3rd child larger - test_cases.push(TestCase { - child1_size: default_size, - child2_size: default_size * 2, - child3_size: default_size * 2, - }); - - let test = start_infrastructure("rebuild_sizes").await; - let ms1 = &mut test.grpc_handle("ms1").await.unwrap(); - let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); - let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); - let nexus_hdl = ms1; - - // Run the tests. - for test in test_cases { - let child1 = - bdev_create_and_share(ms2, Some(test.child1_size), None).await; - let child2 = - bdev_create_and_share(ms3, Some(test.child2_size), None).await; - let local_child = - format!("malloc:///disk0?size_mb={}", test.child3_size.to_string()); - - // Create a nexus with 2 remote children. - create_nexus(nexus_hdl, vec![child1.clone(), child2.clone()]).await; - - // Add the local child and wait for rebuild. - add_child(nexus_hdl, &local_child, true).await; - assert!(wait_for_successful_rebuild(nexus_hdl, &local_child).await); - - // Teardown - destroy_nexus(nexus_hdl).await; - bdev_unshare(ms2).await; - bdev_destroy(ms2, "malloc:///disk0".into()).await; - bdev_unshare(ms3).await; - bdev_destroy(ms3, "malloc:///disk0".into()).await; - } -} - -/// Tests the rebuild with different nexus sizes. -#[tokio::test] -async fn rebuild_segment_sizes() { - let test = start_infrastructure("rebuild_segment_sizes").await; - let ms1 = &mut test.grpc_handle("ms1").await.unwrap(); - let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); - let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); - let nexus_hdl = ms1; - - const SEGMENT_SIZE: u64 = SPDK_BDEV_LARGE_BUF_MAX_SIZE as u64; - let test_cases = vec![ - // multiple of SEGMENT_SIZE - SEGMENT_SIZE * 10, - // not multiple of SEGMENT_SIZE - (SEGMENT_SIZE * 10) + 512, - ]; - - // Run the tests. - for test_case in test_cases.iter() { - let child1 = bdev_create_and_share(ms2, None, None).await; - let child2 = bdev_create_and_share(ms3, None, None).await; - - let nexus_size = *test_case; - nexus_hdl - .mayastor - .create_nexus(CreateNexusRequest { - uuid: NEXUS_UUID.into(), - size: nexus_size, - children: vec![child1], - }) - .await - .unwrap(); - - // Wait for rebuild to complete. - add_child(nexus_hdl, &child2, true).await; - assert!(wait_for_successful_rebuild(nexus_hdl, &child2).await); - - // Teardown - destroy_nexus(nexus_hdl).await; - bdev_unshare(ms2).await; - bdev_destroy(ms2, "malloc:///disk0".into()).await; - bdev_unshare(ms3).await; - bdev_destroy(ms3, "malloc:///disk0".into()).await; - } -} - -/// Test the various rebuild operations. -#[tokio::test] -async fn rebuild_operations() { - let test = start_infrastructure("rebuild_operations").await; - let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - - // Rebuilding a healthy child should do nothing. - let child1 = &get_share_uri(&ms2); - resume_rebuild(nexus_hdl, child1) - .await - .expect_err("Should be nothing to rebuild"); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - - // Start a rebuild. - let child2 = &get_share_uri(&ms3); - add_child(nexus_hdl, child2, true).await; - assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); - - // Resuming a running rebuild should do nothing. - resume_rebuild(nexus_hdl, child2).await.unwrap(); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); - - // Pause a running rebuild. - pause_rebuild(nexus_hdl, child2).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child2, - "paused", - Duration::from_secs(1), - ) - .await - .unwrap()); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); - - // Pause the paused rebuild. - pause_rebuild(nexus_hdl, child2).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - child2, - "paused", - Duration::from_secs(1), - ) - .await - .unwrap()); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); - - // Start another rebuild for the same child. - start_rebuild(nexus_hdl, child2) - .await - .expect_err("Should already be rebuilding child"); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); - - // Stop rebuild - this will cause the rebuild job to be removed - stop_rebuild(nexus_hdl, child2).await; - - let mut ticker = tokio::time::interval(Duration::from_millis(1000)); - let mut number = u32::MAX; - let mut retries = 5; - loop { - ticker.tick().await; - if get_num_rebuilds(nexus_hdl).await == 0 { - number = 0; - break; - } - - retries -= 1; - if retries == 0 { - break; - } - } - - if number != 0 { - panic!("retries failed"); - } -} - -/// Test multiple rebuilds running at the same time. -#[tokio::test] -async fn rebuild_multiple() { - let child_names = vec!["ms1", "ms2", "ms3", "ms4", "ms5"]; - let test = Builder::new() - .name("rebuild_multiple") - .network("10.1.0.0/16") - .add_container(child_names[0]) - .add_container(child_names[1]) - .add_container(child_names[2]) - .add_container(child_names[3]) - .add_container(child_names[4]) - .with_clean(true) - .with_prune(true) - .build() - .await - .unwrap(); - - #[derive(Clone)] - struct Child { - hdl: RpcHandle, - share_uri: String, - } - - let mut children = vec![]; - for name in child_names { - let share_uri = bdev_create_and_share( - &mut test.grpc_handle(name).await.unwrap(), - None, - None, - ) - .await; - children.push(Child { - hdl: test.grpc_handle(name).await.unwrap(), - share_uri, - }); - } - - // Create a nexus with a single healthy child. - let nexus_hdl = &mut test.grpc_handle("ms1").await.unwrap(); - create_nexus(nexus_hdl, vec![children[1].share_uri.clone()]).await; - - let degraded_children = children[2 ..= 4].to_vec(); - // Add children and pause rebuilds. - for child in °raded_children { - add_child(nexus_hdl, &child.share_uri, true).await; - pause_rebuild(nexus_hdl, &child.share_uri).await; - } - assert_eq!( - get_num_rebuilds(nexus_hdl).await as usize, - degraded_children.len() - ); - - // Resume rebuilds and wait for completion then remove the children. - for child in °raded_children { - resume_rebuild(nexus_hdl, &child.share_uri) - .await - .expect("Failed to resume rebuild"); - assert!(wait_for_successful_rebuild(nexus_hdl, &child.share_uri).await); - remove_child(nexus_hdl, &child.share_uri).await; - } - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - - // Add the children back again - for child in °raded_children { - add_child(nexus_hdl, &child.share_uri, true).await; - } - - // Wait for rebuilds to complete - for child in °raded_children { - assert!(wait_for_successful_rebuild(nexus_hdl, &child.share_uri).await); - } -} - -/// Test rebuild while running front-end I/O. -/// Note: This test can take some time to complete because it is running fio and -/// then comparing the contents of the children to make sure they are in-sync. -#[tokio::test] -async fn rebuild_with_load() { - init_tracing(); - let test = start_infrastructure("rebuild_with_load").await; - let nexus_hdl = &mut test.grpc_handle("ms1").await.unwrap(); - let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); - let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); - - const CHILD_SIZE_MB: u64 = 100; - - // Create a nexus with 1 child. - let child1 = - bdev_create_and_share(ms2, Some(CHILD_SIZE_MB), Some("disk1".into())) - .await; - create_nexus(nexus_hdl, vec![child1.clone()]).await; - - // Connect to nexus over NVMf. - let nexus_uri = publish_nexus(nexus_hdl).await; - let nexus_tgt = nvmf_connect(nexus_uri.clone()); - - // Run fio against nexus. - let (s, r) = unbounded::(); - let nvmf_tgt = nexus_tgt.clone(); - std::thread::spawn(move || { - if let Err(e) = s.send(common::fio_verify_size(&nvmf_tgt, NEXUS_SIZE)) { - tracing::error!("Failed to send fio complete with error {}", e); - } - }); - - // Let fio run for a bit. - std::thread::sleep(Duration::from_secs(2)); - - // Add a child and rebuild. - let child2 = - bdev_create_and_share(ms3, Some(CHILD_SIZE_MB), Some("disk2".into())) - .await; - add_child(nexus_hdl, &child2, true).await; - - // Wait for fio to complete - let fio_result = r.recv().unwrap(); - assert_eq!(fio_result, 0, "Failed to run fio_verify_size"); - - // Wait for rebuild to complete. - assert!(wait_for_successful_rebuild(nexus_hdl, &child2).await); - - // Disconnect and destroy nexus - nvmf_disconnect(nexus_uri); - destroy_nexus(nexus_hdl).await; - - // Check children are in-sync. - let child1_tgt = nvmf_connect(child1.clone()); - let child2_tgt = nvmf_connect(child2.clone()); - common::compare_devices(&child1_tgt, &child2_tgt, CHILD_SIZE_MB, true); - nvmf_disconnect(child1); - nvmf_disconnect(child2); -} - -/// Test rebuild when restarting the source container. -#[tokio::test] -async fn rebuild_restart_src() { - let test = start_infrastructure("rebuild_restart_src").await; - let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let rebuild_dst = &get_share_uri(&ms3); - - // Check a rebuild is started for a newly added child. - add_child(nexus_hdl, rebuild_dst, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - rebuild_dst, - "running", - Duration::from_secs(1), - ) - .await - .unwrap()); - - // Restart the rebuild source container and check that the rebuild fails. - test.restart("ms2") - .await - .expect("Failed to restart rebuild source"); - assert_eq!( - wait_for_successful_rebuild(nexus_hdl, rebuild_dst).await, - false - ); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - - // Check the states of the nexus and children. - // Note: A failed rebuild will not change the state of the source child - // (even if it fails to read from it), but it will fault the destination - // child. - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - let rebuild_src = &get_share_uri(&ms2); - assert_eq!( - get_child_state(nexus_hdl, rebuild_src).await, - ChildState::ChildOnline as i32 - ); - assert_eq!( - get_child_state(nexus_hdl, rebuild_dst).await, - ChildState::ChildFaulted as i32 - ); -} - -/// Test rebuild when restarting the destination container. -#[tokio::test] -async fn rebuild_restart_dst() { - let test = start_infrastructure("rebuild_restart_dst").await; - let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let rebuild_dst = &get_share_uri(&ms3); - - // Check a rebuild is started for a newly added child. - add_child(nexus_hdl, rebuild_dst, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - rebuild_dst, - "running", - Duration::from_secs(1), - ) - .await - .unwrap()); - - // Restart the rebuild destination container and check the rebuild fails. - test.restart("ms3") - .await - .expect("Failed to restart rebuild destination"); - assert_eq!( - wait_for_successful_rebuild(nexus_hdl, rebuild_dst).await, - false - ); - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); - - // Check the states of the nexus and children. - // Note: A failed rebuild will not change the state of the source child - // (even if it fails to read from it), but it will fault the destination - // child. - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - let rebuild_src = &get_share_uri(&ms2); - assert_eq!( - get_child_state(nexus_hdl, rebuild_src).await, - ChildState::ChildOnline as i32 - ); - assert_eq!( - get_child_state(nexus_hdl, rebuild_dst).await, - ChildState::ChildFaulted as i32 - ); -} - -/// Test rebuild when disconnecting the source container from the network. -#[tokio::test] -async fn rebuild_src_disconnect() { - let test_name = "rebuild_src_disconnect"; - let test = start_infrastructure(test_name).await; - let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let rebuild_dst = &get_share_uri(&ms3); - - // Check a rebuild is started for a newly added child. - add_child(nexus_hdl, rebuild_dst, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - rebuild_dst, - "running", - Duration::from_secs(1), - ) - .await - .unwrap()); - - // Disconnect the rebuild source container from the network and check that - // the rebuild terminates. This requires a large timeout because it takes - // some time for the NVMf subsystem to report the error up. - test.disconnect("ms2") - .await - .expect("Failed to disconnect source container from network"); - assert_eq!( - wait_for_num_rebuilds(nexus_hdl, 0, Duration::from_secs(180)).await, - true - ); - - // Check the states of the nexus and children. - // Note: A failed rebuild will not change the state of the source child - // (even if it fails to read from it), but it will fault the destination - // child. - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - let rebuild_src = &get_share_uri(&ms2); - assert_eq!( - get_child_state(nexus_hdl, rebuild_src).await, - ChildState::ChildOnline as i32 - ); - assert_eq!( - get_child_state(nexus_hdl, rebuild_dst).await, - ChildState::ChildFaulted as i32 - ); -} - -/// Test rebuild when disconnecting the destination container from the -/// network. -#[tokio::test] -#[ignore] -async fn rebuild_dst_disconnect() { - let test_name = "rebuild_dst_disconnect"; - let test = start_infrastructure(test_name).await; - let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; - let nexus_hdl = &mut ms1; - let rebuild_dst = &get_share_uri(&ms3); - - // Check a rebuild is started for a newly added child. - add_child(nexus_hdl, rebuild_dst, true).await; - assert!(wait_for_rebuild_state( - nexus_hdl, - rebuild_dst, - "running", - Duration::from_secs(1), - ) - .await - .unwrap()); - - // Disconnect the rebuild destination container from the network and check - // that the rebuild terminates. This requires a large timeout because it - // takes some time for the NVMf subsystem to report the error up. - test.disconnect("ms3") - .await - .expect("Failed to disconnect destination container from network"); - assert_eq!( - wait_for_num_rebuilds(nexus_hdl, 0, Duration::from_secs(180)).await, - true - ); - - // Check the states of the nexus and children. - // Note: A failed rebuild will not change the state of the source child - // (even if it fails to read from it), but it will fault the destination - // child. - check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; - let rebuild_src = &get_share_uri(&ms2); - assert_eq!( - get_child_state(nexus_hdl, rebuild_src).await, - ChildState::ChildOnline as i32 - ); - assert_eq!( - get_child_state(nexus_hdl, rebuild_dst).await, - ChildState::ChildFaulted as i32 - ); -} - -/// Build the infrastructure required to run the tests. -async fn start_infrastructure(test_name: &str) -> ComposeTest { - Builder::new() - .name(test_name) - .network("10.1.0.0/16") - .add_container("ms1") - .add_container("ms2") - .add_container("ms3") - .with_clean(true) - .with_prune(true) - .build() - .await - .unwrap() -} - -/// Set up the prerequisites for the tests. -/// Create a nexus on ms1 and create NVMf shares from ms2 & ms3. -/// The number of children to be added to the nexus is passed in as a parameter. -async fn setup_test( - test: &ComposeTest, - num_nexus_children: usize, -) -> (RpcHandle, RpcHandle, RpcHandle) { - // Currently only support creating a nexus with up to 2 children. - assert!(num_nexus_children < 3); - - let mut ms1 = test.grpc_handle("ms1").await.unwrap(); - let mut ms2 = test.grpc_handle("ms2").await.unwrap(); - let mut ms3 = test.grpc_handle("ms3").await.unwrap(); - - let mut replicas = vec![]; - replicas.push(bdev_create_and_share(&mut ms2, None, None).await); - replicas.push(bdev_create_and_share(&mut ms3, None, None).await); - create_nexus(&mut ms1, replicas[0 .. num_nexus_children].to_vec()).await; - (ms1, ms2, ms3) -} - -/// Publish the nexus and return the share uri. -async fn publish_nexus(hdl: &mut RpcHandle) -> String { - let reply = hdl - .mayastor - .publish_nexus(PublishNexusRequest { - uuid: NEXUS_UUID.into(), - key: "".to_string(), - share: ShareProtocolNexus::NexusNvmf as i32, - }) - .await - .unwrap() - .into_inner(); - reply.device_uri -} - -/// Create and share a bdev and return the share uri. -async fn bdev_create_and_share( - hdl: &mut RpcHandle, - child_size_mb: Option, - disk_name: Option, -) -> String { - let size_mb = child_size_mb.unwrap_or(100); - let disk_name = match disk_name { - Some(n) => n, - None => "disk0".to_string(), - }; - bdev_create(hdl, size_mb, disk_name.clone()).await; - bdev_share(hdl, disk_name).await -} - -/// Create a bdev and return the uri. -async fn bdev_create( - hdl: &mut RpcHandle, - size_mb: u64, - disk_name: String, -) -> String { - let uri = format!("malloc:///{}?size_mb={}", disk_name, size_mb,); - hdl.bdev - .create(BdevUri { - uri: uri.clone(), - }) - .await - .unwrap(); - uri -} - -/// Destroy a bdev. -async fn bdev_destroy(hdl: &mut RpcHandle, uri: String) { - hdl.bdev - .destroy(BdevUri { - uri, - }) - .await - .expect("Failed to destroy bdev"); -} - -/// Share a bdev and return the share uri. -async fn bdev_share(hdl: &mut RpcHandle, name: String) -> String { - let result = hdl - .bdev - .share(BdevShareRequest { - name, - proto: "nvmf".into(), - }) - .await - .expect("Failed to share bdev") - .into_inner(); - result.uri -} - -/// Get a bdev share uri. -fn get_share_uri(hdl: &RpcHandle) -> String { - format!( - "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", - hdl.endpoint.ip() - ) -} - -/// Unshare a bdev. -async fn bdev_unshare(hdl: &mut RpcHandle) { - hdl.bdev - .unshare(CreateReply { - name: "disk0".to_string(), - }) - .await - .unwrap(); -} - -/// Create a nexus. -async fn create_nexus(hdl: &mut RpcHandle, children: Vec) { - hdl.mayastor - .create_nexus(CreateNexusRequest { - uuid: NEXUS_UUID.into(), - size: NEXUS_SIZE, - children, - }) - .await - .unwrap(); -} - -/// Delete a nexus. -async fn destroy_nexus(hdl: &mut RpcHandle) { - hdl.mayastor - .destroy_nexus(DestroyNexusRequest { - uuid: NEXUS_UUID.into(), - }) - .await - .expect("Failed to destroy nexus"); -} - -/// Add a child to the nexus. -async fn add_child(hdl: &mut RpcHandle, child: &str, rebuild: bool) { - hdl.mayastor - .add_child_nexus(AddChildNexusRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - norebuild: !rebuild, - }) - .await - .unwrap(); -} - -/// Remove a child from the nexus. -async fn remove_child(hdl: &mut RpcHandle, child: &str) { - hdl.mayastor - .remove_child_nexus(RemoveChildNexusRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - .expect("Failed to remove child"); -} - -/// Start a rebuild for the given child. -async fn start_rebuild(hdl: &mut RpcHandle, child: &str) -> Result<(), ()> { - match hdl - .mayastor - .start_rebuild(StartRebuildRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - { - Ok(_) => Ok(()), - Err(_) => Err(()), - } -} - -/// Stop a rebuild for the given child. -async fn stop_rebuild(hdl: &mut RpcHandle, child: &str) { - hdl.mayastor - .stop_rebuild(StopRebuildRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - .expect("Failed to stop rebuild"); -} - -/// Pause a rebuild for the given child. -async fn pause_rebuild(hdl: &mut RpcHandle, child: &str) { - hdl.mayastor - .pause_rebuild(PauseRebuildRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - .expect("Failed to pause rebuild"); -} - -/// Resume a rebuild for the given child. -async fn resume_rebuild(hdl: &mut RpcHandle, child: &str) -> Result<(), ()> { - match hdl - .mayastor - .resume_rebuild(ResumeRebuildRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - { - Ok(_) => Ok(()), - Err(_) => Err(()), - } -} - -/// Get the number of rebuilds. -async fn get_num_rebuilds(hdl: &mut RpcHandle) -> u32 { - let n = get_nexus(hdl, NEXUS_UUID).await; - n.rebuilds -} - -/// Get the rebuild progress for the given child. -/// Return None if the progress cannot be obtained i.e. because the rebuild job -/// has completed. -async fn get_rebuild_progress(hdl: &mut RpcHandle, child: &str) -> Option { - match hdl - .mayastor - .get_rebuild_progress(RebuildProgressRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - { - Ok(reply) => Some(reply.into_inner().progress), - Err(_) => None, - } -} - -/// Wait for the number of rebuilds to reach the desired number. -/// Returns false if a timeout occurs. -async fn wait_for_num_rebuilds( - hdl: &mut RpcHandle, - num_rebuilds: u32, - timeout: Duration, -) -> bool { - let time = std::time::Instant::now(); - while time.elapsed().as_millis() < timeout.as_millis() { - if get_num_rebuilds(hdl).await == num_rebuilds { - return true; - } - std::thread::sleep(Duration::from_millis(10)); - } - false -} - -/// Waits on the given rebuild state or times out. -/// Returns false if a timeout occurs. -async fn wait_for_rebuild_state( - hdl: &mut RpcHandle, - child: &str, - state: &str, - timeout: Duration, -) -> Option { - let time = std::time::Instant::now(); - while time.elapsed().as_millis() < timeout.as_millis() { - match get_rebuild_state(hdl, child).await { - Some(rebuild_state) => { - if rebuild_state == state { - return Some(true); - } - } - None => return None, - } - std::thread::sleep(Duration::from_millis(10)); - } - Some(false) -} - -/// Get the current state of the rebuild for the given child uri. -/// Returns None if the rebuild job isn't found. -async fn get_rebuild_state(hdl: &mut RpcHandle, child: &str) -> Option { - match hdl - .mayastor - .get_rebuild_state(RebuildStateRequest { - uuid: NEXUS_UUID.into(), - uri: child.into(), - }) - .await - { - Ok(rebuild_state) => Some(rebuild_state.into_inner().state), - Err(_) => None, - } -} - -/// Returns true if the rebuild has completed successfully i.e. the destination -/// child is 'online'. -/// Returns false if: -/// 1. The rebuild does not make any progress within the progress window -/// 2. The rebuild takes longer than the TIMEOUT time. -async fn wait_for_successful_rebuild(hdl: &mut RpcHandle, child: &str) -> bool { - let mut last_progress = 0; - let mut progress_start_time = std::time::Instant::now(); - let progress_window = std::time::Duration::from_secs(5); - let time = std::time::Instant::now(); - const TIMEOUT: Duration = std::time::Duration::from_secs(30); - - // Keep looping while progress is being made and the rebuild has not timed - // out. - while std::time::Instant::now() - progress_start_time < progress_window - || time.elapsed().as_millis() < TIMEOUT.as_millis() - { - match get_rebuild_progress(hdl, child).await { - Some(progress) => { - if progress - last_progress > 0 { - // Progress has been made, reset the progress window. - progress_start_time = std::time::Instant::now(); - last_progress = progress; - } - } - None => { - // 'None' is returned when the rebuild job cannot be found - - // which can indicate rebuild completion. - // If the child is online, the rebuild completed successfully. - return get_child_state(hdl, child).await - == ChildState::ChildOnline as i32; - } - } - std::thread::sleep(Duration::from_millis(50)); - } - return false; -} - -/// Wait on the given child state or times out. -/// Returns false if a timeout occurs. -async fn wait_for_child_state( - hdl: &mut RpcHandle, - child: &str, - state: ChildState, - timeout: Duration, -) -> bool { - let time = std::time::Instant::now(); - while time.elapsed().as_millis() < timeout.as_millis() { - if get_child_state(hdl, child).await == state as i32 { - return true; - } - std::thread::sleep(Duration::from_millis(10)); - } - false -} - -/// Return the current state of the given child. -async fn get_child_state(hdl: &mut RpcHandle, child: &str) -> i32 { - get_child(hdl, NEXUS_UUID, child).await.state -} - -/// Returns the state of the nexus with the given uuid. -async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> Option { - let list = hdl - .mayastor - .list_nexus(Null {}) - .await - .unwrap() - .into_inner() - .nexus_list; - for nexus in list { - if nexus.uuid == uuid { - return Some(nexus.state); - } - } - None -} - -/// Returns the nexus with the given uuid. -async fn get_nexus(hdl: &mut RpcHandle, uuid: &str) -> Nexus { - let nexus_list = hdl - .mayastor - .list_nexus(Null {}) - .await - .unwrap() - .into_inner() - .nexus_list; - let n = nexus_list - .iter() - .filter(|n| n.uuid == uuid) - .collect::>(); - assert_eq!(n.len(), 1); - n[0].clone() -} - -/// Returns a child with the given URI. -async fn get_child( - hdl: &mut RpcHandle, - nexus_uuid: &str, - child_uri: &str, -) -> Child { - let n = get_nexus(hdl, nexus_uuid).await; - let c = n - .children - .iter() - .filter(|c| c.uri == child_uri) - .collect::>(); - assert_eq!(c.len(), 1); - c[0].clone() -} - -/// Connect to NVMf target and return device name. -fn nvmf_connect(uri: String) -> String { - let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); - let devices = target.connect().unwrap(); - devices[0].path.to_string() -} - -// Disconnect from NVMf target. -fn nvmf_disconnect(uri: String) { - let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); - target.disconnect().unwrap(); -} - -/// Initialise tracing. -fn init_tracing() { - if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("info").init(); - } -} - -/// Checks if the nexus state matches the expected state. -async fn check_nexus_state(nexus_hdl: &mut RpcHandle, state: NexusState) { - assert_eq!( - get_nexus_state(nexus_hdl, NEXUS_UUID).await.unwrap(), - state as i32 - ); -} From e736ceb324ab74ee135ed6ae9a4a402fda086f40 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Wed, 17 Feb 2021 12:03:52 +0000 Subject: [PATCH 20/78] chore(nightly): update nix rust nightly Update the rust nightly toolchain --- .rustfmt.toml | 4 +- composer/src/lib.rs | 6 +- control-plane/agents/common/src/lib.rs | 11 ++- control-plane/mbus-api/src/v0.rs | 23 ++++-- .../rest/service/src/v0/swagger_ui.rs | 4 +- control-plane/rest/src/versions/v0.rs | 80 +++++++++---------- csi/src/nodeplugin_grpc.rs | 3 +- csi/src/nodeplugin_svc.rs | 4 +- csi/src/server.rs | 7 +- jsonrpc/src/test.rs | 16 ++-- mayastor/src/bdev/dev/nvme.rs | 1 + mayastor/src/bdev/nexus/nexus_bdev.rs | 4 +- .../src/bdev/nexus/nexus_bdev_children.rs | 6 +- mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs | 6 +- mayastor/src/bdev/nexus/nexus_channel.rs | 17 ++-- mayastor/src/bdev/nexus/nexus_child.rs | 4 +- .../bdev/nexus/nexus_child_status_config.rs | 4 +- mayastor/src/bdev/nexus/nexus_io.rs | 16 ++-- mayastor/src/bdev/nexus/nexus_label.rs | 5 +- mayastor/src/bdev/nexus/nexus_metadata.rs | 5 +- mayastor/src/bin/casperf.rs | 14 ++-- mayastor/src/bin/mayastor.rs | 10 +-- mayastor/src/core/env.rs | 4 +- mayastor/src/core/io_driver.rs | 14 ++-- mayastor/src/core/nvme.rs | 1 + mayastor/src/ffihelper.rs | 1 + mayastor/src/rebuild/rebuild_api.rs | 4 +- mayastor/src/rebuild/rebuild_impl.rs | 3 +- mayastor/src/subsys/config/opts.rs | 6 +- mayastor/src/subsys/mod.rs | 2 +- mayastor/src/subsys/nvmf/subsystem.rs | 10 +-- mayastor/src/subsys/nvmf/target.rs | 10 +-- mayastor/src/subsys/nvmf/transport.rs | 12 +-- mayastor/tests/nexus_metadata.rs | 66 ++++++++------- nix/lib/rust.nix | 2 +- nvmeadm/src/nvmf_discovery.rs | 14 ++-- rpc/src/lib.rs | 1 + spdk-sys/src/lib.rs | 3 +- 38 files changed, 206 insertions(+), 197 deletions(-) diff --git a/.rustfmt.toml b/.rustfmt.toml index d36819da1..66f195f1c 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -7,8 +7,8 @@ comment_width = 80 struct_lit_single_line = false #changed from Mixed imports_layout = "HorizontalVertical" -# changed from false -merge_imports = true +# changed from Preserve (merge_imports = false) +imports_granularity="Crate" #default false spaces_around_ranges = true # was 2015 diff --git a/composer/src/lib.rs b/composer/src/lib.rs index b25912103..f8f4bc6f8 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -745,7 +745,7 @@ impl ComposeTest { // attached to it are removed. To get a list of attached // containers, use network_list() if let Err(e) = self.docker.remove_network(name).await { - if !matches!(e, Error::DockerResponseNotFoundError{..}) { + if !matches!(e, Error::DockerResponseNotFoundError { .. }) { return Err(e); } } @@ -1057,7 +1057,7 @@ impl ComposeTest { .await { // where already stopped - if !matches!(e, Error::DockerResponseNotModifiedError{..}) { + if !matches!(e, Error::DockerResponseNotModifiedError { .. }) { return Err(e); } } @@ -1084,7 +1084,7 @@ impl ComposeTest { .await { // where already stopped - if !matches!(e, Error::DockerResponseNotModifiedError{..}) { + if !matches!(e, Error::DockerResponseNotModifiedError { .. }) { return Err(e); } } diff --git a/control-plane/agents/common/src/lib.rs b/control-plane/agents/common/src/lib.rs index 2fc784d9a..4ba22c69d 100644 --- a/control-plane/agents/common/src/lib.rs +++ b/control-plane/agents/common/src/lib.rs @@ -187,10 +187,13 @@ impl Service { let type_name = std::any::type_name::(); tracing::debug!("Adding shared type: {}", type_name); if !self.shared_state.set(state) { - panic!(format!( - "Shared state for type '{}' has already been set!", - type_name - )); + panic!( + "{}", + format!( + "Shared state for type '{}' has already been set!", + type_name + ) + ); } self } diff --git a/control-plane/mbus-api/src/v0.rs b/control-plane/mbus-api/src/v0.rs index e4b56ab90..8f7b8f7e7 100644 --- a/control-plane/mbus-api/src/v0.rs +++ b/control-plane/mbus-api/src/v0.rs @@ -298,7 +298,16 @@ impl Default for Filter { macro_rules! bus_impl_string_id_inner { ($Name:ident, $Doc:literal) => { #[doc = $Doc] - #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash, Apiv2Schema)] + #[derive( + Serialize, + Deserialize, + Debug, + Clone, + Eq, + PartialEq, + Hash, + Apiv2Schema, + )] pub struct $Name(String); impl std::fmt::Display for $Name { @@ -325,15 +334,15 @@ macro_rules! bus_impl_string_id_inner { } } - impl Into<$Name> for &$Name { - fn into(self) -> $Name { - self.clone() + impl From<&$Name> for $Name { + fn from(id: &$Name) -> $Name { + id.clone() } } - impl Into for $Name { - fn into(self) -> String { - self.to_string() + impl From<$Name> for String { + fn from(id: $Name) -> String { + id.to_string() } } }; diff --git a/control-plane/rest/service/src/v0/swagger_ui.rs b/control-plane/rest/service/src/v0/swagger_ui.rs index de711acf4..19c20b16c 100644 --- a/control-plane/rest/service/src/v0/swagger_ui.rs +++ b/control-plane/rest/service/src/v0/swagger_ui.rs @@ -17,9 +17,9 @@ fn get_swagger_html(spec_uri: &str) -> Result { template .add_template("swagger-ui", TEMPLATE) .map_err(|e| e.to_string())?; - Ok(template + template .render("swagger-ui", &context) - .map_err(|e| e.to_string())?) + .map_err(|e| e.to_string()) } #[derive(Clone)] diff --git a/control-plane/rest/src/versions/v0.rs b/control-plane/rest/src/versions/v0.rs index 23e57c73d..57447fb1f 100644 --- a/control-plane/rest/src/versions/v0.rs +++ b/control-plane/rest/src/versions/v0.rs @@ -166,6 +166,15 @@ impl CreateVolumeBody { } } +/// Contains the query parameters that can be passed when calling +/// get_block_devices +#[derive(Deserialize, Apiv2Schema)] +#[serde(rename_all = "camelCase")] +pub struct GetBlockDeviceQueryParams { + /// specifies whether to list all devices or only usable ones + pub all: Option, +} + /// RestClient interface #[async_trait(?Send)] pub trait RestClient { @@ -232,7 +241,7 @@ pub trait RestClient { #[derive(Display, Debug)] #[allow(clippy::enum_variant_names)] -enum RestURNs { +enum RestUrns { #[strum(serialize = "nodes")] GetNodes(Node), #[strum(serialize = "pools")] @@ -254,7 +263,7 @@ macro_rules! get_all { ($S:ident, $T:ident) => { $S.get_vec(format!( "/v0/{}", - RestURNs::$T(Default::default()).to_string() + RestUrns::$T(Default::default()).to_string() )) }; } @@ -262,26 +271,26 @@ macro_rules! get_filter { ($S:ident, $F:ident, $T:ident) => { $S.get_vec(format!( "/v0/{}", - get_filtered_urn($F, &RestURNs::$T(Default::default()))? + get_filtered_urn($F, &RestUrns::$T(Default::default()))? )) }; } -fn get_filtered_urn(filter: Filter, r: &RestURNs) -> anyhow::Result { +fn get_filtered_urn(filter: Filter, r: &RestUrns) -> anyhow::Result { let urn = match r { - RestURNs::GetNodes(_) => match filter { + RestUrns::GetNodes(_) => match filter { Filter::None => "nodes".to_string(), Filter::Node(id) => format!("nodes/{}", id), _ => return Err(anyhow::Error::msg("Invalid filter for Nodes")), }, - RestURNs::GetPools(_) => match filter { + RestUrns::GetPools(_) => match filter { Filter::None => "pools".to_string(), Filter::Node(id) => format!("nodes/{}/pools", id), Filter::Pool(id) => format!("pools/{}", id), Filter::NodePool(n, p) => format!("nodes/{}/pools/{}", n, p), _ => return Err(anyhow::Error::msg("Invalid filter for pools")), }, - RestURNs::GetReplicas(_) => match filter { + RestUrns::GetReplicas(_) => match filter { Filter::None => "replicas".to_string(), Filter::Node(id) => format!("nodes/{}/replicas", id), Filter::Pool(id) => format!("pools/{}/replicas", id), @@ -296,21 +305,21 @@ fn get_filtered_urn(filter: Filter, r: &RestURNs) -> anyhow::Result { Filter::PoolReplica(p, r) => format!("pools/{}/replicas/{}", p, r), _ => return Err(anyhow::Error::msg("Invalid filter for replicas")), }, - RestURNs::GetNexuses(_) => match filter { + RestUrns::GetNexuses(_) => match filter { Filter::None => "nexuses".to_string(), Filter::Node(n) => format!("nodes/{}/nexuses", n), Filter::NodeNexus(n, x) => format!("nodes/{}/nexuses/{}", n, x), Filter::Nexus(x) => format!("nexuses/{}", x), _ => return Err(anyhow::Error::msg("Invalid filter for nexuses")), }, - RestURNs::GetChildren(_) => match filter { + RestUrns::GetChildren(_) => match filter { Filter::NodeNexus(n, x) => { format!("nodes/{}/nexuses/{}/children", n, x) } Filter::Nexus(x) => format!("nexuses/{}/children", x), _ => return Err(anyhow::Error::msg("Invalid filter for nexuses")), }, - RestURNs::GetVolumes(_) => match filter { + RestUrns::GetVolumes(_) => match filter { Filter::None => "volumes".to_string(), Filter::Node(n) => format!("nodes/{}/volumes", n), Filter::Volume(x) => format!("volumes/{}", x), @@ -497,24 +506,24 @@ impl RestClient for ActixRestClient { } } -impl Into for CreatePoolBody { - fn into(self) -> Body { - Body::from(serde_json::to_value(self).unwrap()) +impl From for Body { + fn from(src: CreatePoolBody) -> Self { + Body::from(serde_json::to_value(src).unwrap()) } } -impl Into for CreateReplicaBody { - fn into(self) -> Body { - Body::from(serde_json::to_value(self).unwrap()) +impl From for Body { + fn from(src: CreateReplicaBody) -> Self { + Body::from(serde_json::to_value(src).unwrap()) } } -impl Into for CreateNexusBody { - fn into(self) -> Body { - Body::from(serde_json::to_value(self).unwrap()) +impl From for Body { + fn from(src: CreateNexusBody) -> Self { + Body::from(serde_json::to_value(src).unwrap()) } } -impl Into for CreateVolumeBody { - fn into(self) -> Body { - Body::from(serde_json::to_value(self).unwrap()) +impl From for Body { + fn from(src: CreateVolumeBody) -> Self { + Body::from(serde_json::to_value(src).unwrap()) } } @@ -578,9 +587,9 @@ impl From for RestError { } } } -impl Into for RestError { - fn into(self) -> HttpResponse { - self.get_resp_error() +impl From for HttpResponse { + fn from(src: RestError) -> Self { + src.get_resp_error() } } @@ -611,25 +620,16 @@ impl RestRespond { Ok(Json(object)) } } -impl Into> for Result { - fn into(self) -> RestRespond { - RestRespond(self.map_err(RestError::from)) +impl From> for RestRespond { + fn from(src: Result) -> Self { + RestRespond(src.map_err(RestError::from)) } } -impl Into for RestRespond { - fn into(self) -> HttpResponse { - match self.0 { +impl From> for HttpResponse { + fn from(src: RestRespond) -> Self { + match src.0 { Ok(resp) => HttpResponse::Ok().json(resp), Err(error) => error.into(), } } } - -/// Contains the query parameters that can be passed when calling -/// get_block_devices -#[derive(Deserialize, Apiv2Schema)] -#[serde(rename_all = "camelCase")] -pub struct GetBlockDeviceQueryParams { - /// specifies whether to list all devices or only usable ones - pub all: Option, -} diff --git a/csi/src/nodeplugin_grpc.rs b/csi/src/nodeplugin_grpc.rs index f5292ac55..73081f482 100644 --- a/csi/src/nodeplugin_grpc.rs +++ b/csi/src/nodeplugin_grpc.rs @@ -27,6 +27,7 @@ use nodeplugin_svc::{ }; use tonic::{transport::Server, Code, Request, Response, Status}; +#[allow(clippy::upper_case_acronyms)] pub mod mayastor_node_plugin { tonic::include_proto!("mayastornodeplugin"); } @@ -49,7 +50,7 @@ impl From for Status { ServiceError::InternalFailure { .. } => Status::new(Code::Internal, err.to_string()), - ServiceError::IOError { + ServiceError::IoError { .. } => Status::new(Code::Unknown, err.to_string()), ServiceError::InconsistentMountFs { diff --git a/csi/src/nodeplugin_svc.rs b/csi/src/nodeplugin_svc.rs index 4a9803b4f..0d59283c2 100644 --- a/csi/src/nodeplugin_svc.rs +++ b/csi/src/nodeplugin_svc.rs @@ -25,7 +25,7 @@ pub enum ServiceError { #[snafu(display("Internal failure: volume ID: {}, {}", volid, source))] InternalFailure { source: DeviceError, volid: String }, #[snafu(display("IO error: volume ID: {}, {}", volid, source))] - IOError { + IoError { source: std::io::Error, volid: String, }, @@ -60,7 +60,7 @@ async fn fsfreeze( let args = [freeze_op, &mnt.dest]; let output = Command::new(FSFREEZE).args(&args).output().await.context( - IOError { + IoError { volid: volume_id.to_string(), }, )?; diff --git a/csi/src/server.rs b/csi/src/server.rs index 11a876dee..e091fe3d9 100644 --- a/csi/src/server.rs +++ b/csi/src/server.rs @@ -35,6 +35,7 @@ use crate::{identity::Identity, mount::probe_filesystems, node::Node}; #[allow(clippy::unit_arg)] #[allow(clippy::redundant_closure)] #[allow(clippy::enum_variant_names)] +#[allow(clippy::upper_case_acronyms)] pub mod csi { tonic::include_proto!("csi.v1"); } @@ -192,7 +193,7 @@ async fn main() -> Result<(), String> { }; let _ = tokio::join!( - CSIServer::run(csi_socket, node_name), + CsiServer::run(csi_socket, node_name), MayastorNodePluginGrpcServer::run( sock_addr.parse().expect("Invalid gRPC endpoint") ), @@ -201,9 +202,9 @@ async fn main() -> Result<(), String> { Ok(()) } -struct CSIServer {} +struct CsiServer {} -impl CSIServer { +impl CsiServer { pub async fn run(csi_socket: &str, node_name: &str) -> Result<(), ()> { let mut uds_sock = UnixListener::bind(csi_socket).unwrap(); info!("CSI plugin bound to {}", csi_socket); diff --git a/jsonrpc/src/test.rs b/jsonrpc/src/test.rs index affb19d84..e14aba4ac 100644 --- a/jsonrpc/src/test.rs +++ b/jsonrpc/src/test.rs @@ -118,7 +118,7 @@ async fn normal_request_reply() { assert_eq!(res.code, 123); assert!(!res.flag); } - Err(err) => panic!(format!("{}", err)), + Err(err) => panic!("{}", err), }, ) .await; @@ -142,7 +142,7 @@ async fn invalid_json() { |res: Result<(), Error>| match res { Ok(_) => panic!("Expected error and got ok"), Err(Error::ParseError(_)) => (), - Err(err) => panic!(format!("Wrong error type: {}", err)), + Err(err) => panic!("Wrong error type: {}", err), }, ) .await; @@ -186,7 +186,7 @@ async fn invalid_version() { |res: Result<(), Error>| match res { Ok(_) => panic!("Expected error and got ok"), Err(Error::InvalidVersion) => (), - Err(err) => panic!(format!("Wrong error type: {}", err)), + Err(err) => panic!("Wrong error type: {}", err), }, ) .await; @@ -209,7 +209,7 @@ async fn missing_version() { }, |res: Result| match res { Ok(_) => (), - Err(err) => panic!(format!("{}", err)), + Err(err) => panic!("{}", err), }, ) .await; @@ -233,7 +233,7 @@ async fn wrong_reply_id() { |res: Result| match res { Ok(_) => panic!("Expected error and got ok"), Err(Error::InvalidReplyId) => (), - Err(err) => panic!(format!("Wrong error type: {}", err)), + Err(err) => panic!("Wrong error type: {}", err), }, ) .await; @@ -257,7 +257,7 @@ async fn empty_result_unexpected() { |res: Result<(), Error>| match res { Ok(_) => panic!("Expected error and got ok"), Err(Error::ParseError(_)) => (), - Err(err) => panic!(format!("Wrong error type: {}", err)), + Err(err) => panic!("Wrong error type: {}", err), }, ) .await; @@ -280,7 +280,7 @@ async fn empty_result_expected() { }, |res: Result<(), Error>| match res { Ok(_) => (), - Err(err) => panic!(format!("Unexpected error {}", err)), + Err(err) => panic!("Unexpected error {}", err), }, ) .await; @@ -314,7 +314,7 @@ async fn rpc_error() { assert_eq!(code, RpcCode::NotFound); assert_eq!(&msg, "Not found"); } - Err(err) => panic!(format!("Wrong error type: {}", err)), + Err(err) => panic!("Wrong error type: {}", err), }, ) .await; diff --git a/mayastor/src/bdev/dev/nvme.rs b/mayastor/src/bdev/dev/nvme.rs index ab7334085..6e9d480e0 100644 --- a/mayastor/src/bdev/dev/nvme.rs +++ b/mayastor/src/bdev/dev/nvme.rs @@ -26,6 +26,7 @@ use crate::{ }; #[derive(Debug)] +#[allow(clippy::upper_case_acronyms)] pub(super) struct NVMe { /// name of the bdev that should be created name: String, diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index e6212165c..5095bbd77 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -40,7 +40,7 @@ use crate::{ nexus::{ instances, nexus_channel::{ - DREvent, + DrEvent, NexusChannel, NexusChannelInner, ReconfigureCtx, @@ -448,7 +448,7 @@ impl Nexus { } /// reconfigure the child event handler - pub(crate) async fn reconfigure(&self, event: DREvent) { + pub(crate) async fn reconfigure(&self, event: DrEvent) { let (s, r) = oneshot::channel::(); info!( diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index b0a61dba2..e4289b967 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -39,7 +39,7 @@ use crate::{ NexusStatus, OpenChild, }, - nexus_channel::DREvent, + nexus_channel::DrEvent, nexus_child::{ChildState, NexusChild}, nexus_child_status_config::ChildStatusConfig, nexus_label::{ @@ -268,7 +268,7 @@ impl Nexus { }); } - self.reconfigure(DREvent::ChildOffline).await; + self.reconfigure(DrEvent::ChildOffline).await; self.start_rebuild_jobs(cancelled_rebuilding_children).await; Ok(self.status()) @@ -313,7 +313,7 @@ impl Nexus { _ => { child.fault(reason).await; NexusChild::save_state_change(); - self.reconfigure(DREvent::ChildFault).await; + self.reconfigure(DrEvent::ChildFault).await; } } Ok(()) diff --git a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs index 19a296197..3317ded97 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -19,7 +19,7 @@ use crate::{ RebuildOperationError, RemoveRebuildJob, }, - nexus_channel::DREvent, + nexus_channel::DrEvent, nexus_child::{ChildState, NexusChild, Reason}, }, VerboseError, @@ -99,7 +99,7 @@ impl Nexus { // rebuilt would then need to be rebuilt again. // Ensuring that the dst child receives all frontend Write IO keeps all // rebuilt ranges in sync with the other children. - self.reconfigure(DREvent::ChildRebuild).await; + self.reconfigure(DrEvent::ChildRebuild).await; job.as_client().start().context(RebuildOperationError { job: name.to_owned(), @@ -301,7 +301,7 @@ impl Nexus { } } - self.reconfigure(DREvent::ChildRebuild).await; + self.reconfigure(DrEvent::ChildRebuild).await; Ok(()) } diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index a1f0cd724..d3ab5a9ad 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -58,11 +58,9 @@ impl ReconfigureCtx { #[derive(Debug)] /// Dynamic Reconfiguration Events occur when a child is added or removed -pub enum DREvent { +pub enum DrEvent { /// Child offline reconfiguration event ChildOffline, - /// Child online reconfiguration event - ChildOnline, /// mark the child as faulted ChildFault, /// Child remove reconfiguration event @@ -211,15 +209,14 @@ impl NexusChannel { pub extern "C" fn reconfigure( device: *mut c_void, ctx: Box, - event: &DREvent, + event: &DrEvent, ) { match event { - DREvent::ChildOffline - | DREvent::ChildOnline - | DREvent::ChildRemove - | DREvent::ChildFault - | DREvent::ChildRebuild - | DREvent::ChildStatusSync => unsafe { + DrEvent::ChildOffline + | DrEvent::ChildRemove + | DrEvent::ChildFault + | DrEvent::ChildRebuild + | DrEvent::ChildStatusSync => unsafe { spdk_for_each_channel( device, Some(NexusChannel::refresh_io_channels), diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index e05fe1995..e29c5c4d2 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -8,7 +8,7 @@ use crate::{ bdev::{ nexus::{ instances, - nexus_channel::DREvent, + nexus_channel::DrEvent, nexus_child::ChildState::Faulted, nexus_child_status_config::ChildStatusConfig, }, @@ -426,7 +426,7 @@ impl NexusChild { let nexus_name = self.parent.clone(); Reactor::block_on(async move { match nexus_lookup(&nexus_name) { - Some(n) => n.reconfigure(DREvent::ChildRemove).await, + Some(n) => n.reconfigure(DrEvent::ChildRemove).await, None => error!("Nexus {} not found", nexus_name), } }); diff --git a/mayastor/src/bdev/nexus/nexus_child_status_config.rs b/mayastor/src/bdev/nexus/nexus_child_status_config.rs index f51e9dd72..df9b9a10c 100644 --- a/mayastor/src/bdev/nexus/nexus_child_status_config.rs +++ b/mayastor/src/bdev/nexus/nexus_child_status_config.rs @@ -14,7 +14,7 @@ use crate::bdev::nexus::{ instances, - nexus_channel::DREvent, + nexus_channel::DrEvent, nexus_child::{ChildState, NexusChild}, }; use once_cell::sync::OnceCell; @@ -93,7 +93,7 @@ impl ChildStatusConfig { child.set_state(*status); } }); - nexus.reconfigure(DREvent::ChildStatusSync).await; + nexus.reconfigure(DrEvent::ChildStatusSync).await; } } diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index 041c4ad87..21b76f880 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -18,7 +18,7 @@ use crate::{ bdev::{ nexus::{ nexus_bdev::{Nexus, NEXUS_PRODUCT_ID}, - nexus_channel::DREvent, + nexus_channel::DrEvent, nexus_fn_table::NexusFnTable, }, nexus_lookup, @@ -94,8 +94,8 @@ pub enum IoType { Flush, Reset, NvmeAdmin, - NvmeIO, - NvmeIOMD, + NvmeIo, + NvmeIoMd, WriteZeros, ZeroCopy, ZoneInfo, @@ -117,8 +117,8 @@ impl From for u32 { IoType::Flush => 4, IoType::Reset => 5, IoType::NvmeAdmin => 6, - IoType::NvmeIO => 7, - IoType::NvmeIOMD => 8, + IoType::NvmeIo => 7, + IoType::NvmeIoMd => 8, IoType::WriteZeros => 9, IoType::ZeroCopy => 10, IoType::ZoneInfo => 11, @@ -142,8 +142,8 @@ impl From for IoType { 4 => Self::Flush, 5 => Self::Reset, 6 => Self::NvmeAdmin, - 7 => Self::NvmeIO, - 8 => Self::NvmeIOMD, + 7 => Self::NvmeIo, + 8 => Self::NvmeIoMd, 9 => Self::WriteZeros, 10 => Self::ZeroCopy, 11 => Self::ZoneInfo, @@ -335,7 +335,7 @@ impl Bio { let uri = child.name.clone(); nexus.pause().await.unwrap(); - nexus.reconfigure(DREvent::ChildFault).await; + nexus.reconfigure(DrEvent::ChildFault).await; //nexus.remove_child(&uri).await.unwrap(); // Note, an error can occur here if a separate task, diff --git a/mayastor/src/bdev/nexus/nexus_label.rs b/mayastor/src/bdev/nexus/nexus_label.rs index 52e750246..dd38ea31f 100644 --- a/mayastor/src/bdev/nexus/nexus_label.rs +++ b/mayastor/src/bdev/nexus/nexus_label.rs @@ -121,7 +121,7 @@ pub enum LabelError { #[snafu(display("Incorrect GPT partition table checksum"))] PartitionTableChecksum {}, #[snafu(display("Disk GUIDs differ"))] - CompareDiskGUID {}, + CompareDiskGuid {}, #[snafu(display("Disk sizes differ"))] CompareDiskSize {}, #[snafu(display("GPT stored partition table checksums differ"))] @@ -464,6 +464,7 @@ impl GptGuid { } #[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)] +#[allow(clippy::upper_case_acronyms)] pub struct GPTHeader { /// GPT signature (must be "EFI PART"). pub signature: [u8; 8], @@ -897,7 +898,7 @@ impl NexusLabel { secondary: &GPTHeader, ) -> Result<(), LabelError> { if primary.guid != secondary.guid { - return Err(LabelError::CompareDiskGUID {}); + return Err(LabelError::CompareDiskGuid {}); } if primary.lba_start != secondary.lba_start || primary.lba_end != secondary.lba_end diff --git a/mayastor/src/bdev/nexus/nexus_metadata.rs b/mayastor/src/bdev/nexus/nexus_metadata.rs index 172271b61..fcde5a8e0 100644 --- a/mayastor/src/bdev/nexus/nexus_metadata.rs +++ b/mayastor/src/bdev/nexus/nexus_metadata.rs @@ -308,8 +308,7 @@ impl NexusMetaData { if checksum != entry.data_checksum { return Err(MetaDataError::ObjectChecksum {}); } - Ok(NexusConfig::from_slice(buf.as_slice()) - .context(DeserializeError {})?) + NexusConfig::from_slice(buf.as_slice()).context(DeserializeError {}) } } @@ -733,7 +732,7 @@ impl NexusChild { == GptGuid::from_str(Nexus::METADATA_PARTITION_TYPE_ID).unwrap() && partition.ent_name.name == "MayaMeta" { - return Ok(self.probe_index(partition.ent_start).await?); + return self.probe_index(partition.ent_start).await; } } diff --git a/mayastor/src/bin/casperf.rs b/mayastor/src/bin/casperf.rs index be02631aa..93c61a0d5 100644 --- a/mayastor/src/bin/casperf.rs +++ b/mayastor/src/bin/casperf.rs @@ -31,10 +31,10 @@ use spdk_sys::{ #[derive(Debug)] enum IoType { /// perform random read operations - READ, + Read, /// perform random write operations #[allow(dead_code)] - WRITE, + Write, } /// default queue depth @@ -148,7 +148,7 @@ impl Job { (0 ..= qd).for_each(|offset| { queue.push(Io { buf: DmaBuf::new(size, bdev.alignment()).unwrap(), - iot: IoType::READ, + iot: IoType::Read, offset, job: NonNull::dangling(), }); @@ -202,16 +202,16 @@ impl Io { fn run(&mut self, job: *mut Job) { self.job = NonNull::new(job).unwrap(); match self.iot { - IoType::READ => self.read(0), - IoType::WRITE => self.write(0), + IoType::Read => self.read(0), + IoType::Write => self.write(0), }; } /// dispatch the next IO, this is called from within the completion callback pub fn next(&mut self, offset: u64) { match self.iot { - IoType::READ => self.read(offset), - IoType::WRITE => self.write(offset), + IoType::Read => self.read(offset), + IoType::Write => self.write(offset), } } diff --git a/mayastor/src/bin/mayastor.rs b/mayastor/src/bin/mayastor.rs index 9556629cc..45a0fe34f 100644 --- a/mayastor/src/bin/mayastor.rs +++ b/mayastor/src/bin/mayastor.rs @@ -57,13 +57,11 @@ fn main() -> Result<(), Box> { let master = Reactors::master(); master.send_future(async { info!("Mayastor started {} ...", '\u{1F680}') }); - let mut futures = Vec::new(); - - futures.push(master.boxed_local()); - futures.push(subsys::Registration::run().boxed_local()); - futures.push( + let futures = vec![ + master.boxed_local(), + subsys::Registration::run().boxed_local(), grpc::MayastorGrpcServer::run(grpc_endpoint, rpc_address).boxed_local(), - ); + ]; rt.block_on(futures::future::try_join_all(futures)) .expect_err("reactor exit in abnormal state"); diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index e66243f76..7a914fa64 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -384,9 +384,7 @@ impl MayastorEnvironment { /// construct an array of options to be passed to EAL and start it fn initialize_eal(&self) { - let mut args: Vec = Vec::new(); - - args.push(CString::new(self.name.clone()).unwrap()); + let mut args = vec![CString::new(self.name.clone()).unwrap()]; if self.mem_channel > 0 { args.push( diff --git a/mayastor/src/core/io_driver.rs b/mayastor/src/core/io_driver.rs index 6530a05b9..58d2b406f 100644 --- a/mayastor/src/core/io_driver.rs +++ b/mayastor/src/core/io_driver.rs @@ -19,14 +19,14 @@ use crate::{ #[derive(Debug, Copy, Clone)] pub enum IoType { /// perform random read operations - READ, + Read, /// perform random write operations - WRITE, + Write, } impl Default for IoType { fn default() -> Self { - Self::READ + Self::Read } } @@ -47,8 +47,8 @@ impl Io { fn run(&mut self, job: *mut Job) { self.job = NonNull::new(job).unwrap(); match self.iot { - IoType::READ => self.read(0), - IoType::WRITE => self.write(0), + IoType::Read => self.read(0), + IoType::Write => self.write(0), }; } @@ -66,8 +66,8 @@ impl Io { } match self.iot { - IoType::READ => self.read(offset), - IoType::WRITE => self.write(offset), + IoType::Read => self.read(offset), + IoType::Write => self.write(offset), } } diff --git a/mayastor/src/core/nvme.rs b/mayastor/src/core/nvme.rs index 4715aac58..4d615a294 100644 --- a/mayastor/src/core/nvme.rs +++ b/mayastor/src/core/nvme.rs @@ -32,6 +32,7 @@ impl From for StatusCodeType { } #[derive(Debug, Copy, Clone, Eq, PartialOrd, PartialEq)] +#[allow(clippy::upper_case_acronyms)] pub enum GenericStatusCode { Success, InvalidOpcode, diff --git a/mayastor/src/ffihelper.rs b/mayastor/src/ffihelper.rs index 3fae16e44..2207e8fae 100644 --- a/mayastor/src/ffihelper.rs +++ b/mayastor/src/ffihelper.rs @@ -118,6 +118,7 @@ pub fn errno_result_from_i32(val: T, errno: i32) -> ErrnoResult { } /// Helper routines to convert from FFI functions +#[allow(clippy::wrong_self_convention)] pub(crate) trait FfiResult { type Ok; fn to_result(self, f: F) -> Result diff --git a/mayastor/src/rebuild/rebuild_api.rs b/mayastor/src/rebuild/rebuild_api.rs index 01e653d39..a6c4b2f4f 100644 --- a/mayastor/src/rebuild/rebuild_api.rs +++ b/mayastor/src/rebuild/rebuild_api.rs @@ -71,7 +71,7 @@ pub enum RebuildError { source: nix::errno::Errno, }, #[snafu(display("Failed to get bdev name from URI {}", uri))] - BdevInvalidURI { source: NexusBdevError, uri: String }, + BdevInvalidUri { source: NexusBdevError, uri: String }, } #[derive(Debug, PartialEq, Copy, Clone)] @@ -189,7 +189,7 @@ impl RebuildJob { ) -> Result<&'a mut Self, RebuildError> { Self::new(nexus, source, destination, range, notify_fn)?.store()?; - Ok(Self::lookup(destination)?) + Self::lookup(destination) } /// Lookup a rebuild job by its destination uri and return it diff --git a/mayastor/src/rebuild/rebuild_impl.rs b/mayastor/src/rebuild/rebuild_impl.rs index 0af3c9209..743e1b6d5 100644 --- a/mayastor/src/rebuild/rebuild_impl.rs +++ b/mayastor/src/rebuild/rebuild_impl.rs @@ -1,5 +1,4 @@ #![warn(missing_docs)] -#![allow(clippy::unknown_clippy_lints)] use std::{cell::UnsafeCell, collections::HashMap}; @@ -429,7 +428,7 @@ impl RebuildJob { claim: bool, ) -> Result { BdevHandle::open( - &bdev_get_name(uri).context(BdevInvalidURI { + &bdev_get_name(uri).context(BdevInvalidUri { uri: uri.to_string(), })?, read_write, diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index b14bd7476..5abb9905a 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -35,8 +35,8 @@ pub trait GetOpts { } } -#[serde(default, deny_unknown_fields)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default, deny_unknown_fields)] pub struct NexusOpts { /// enable nvmf target pub nvmf_enable: bool, @@ -85,8 +85,8 @@ impl GetOpts for NexusOpts { } } -#[serde(default, deny_unknown_fields)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default, deny_unknown_fields)] pub struct NvmfTgtConfig { /// name of the target to be created pub name: String, @@ -599,8 +599,8 @@ impl GetOpts for PosixSocketOpts { } } -#[serde(default, deny_unknown_fields)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default, deny_unknown_fields)] pub struct ErrStoreOpts { /// ring buffer size pub err_store_size: usize, diff --git a/mayastor/src/subsys/mod.rs b/mayastor/src/subsys/mod.rs index e2709902c..760399b74 100644 --- a/mayastor/src/subsys/mod.rs +++ b/mayastor/src/subsys/mod.rs @@ -1,4 +1,4 @@ -// +//! //! Main file to register additional subsystems pub use config::{ diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 3a21913b7..fd3e74acc 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -44,7 +44,7 @@ use crate::{ core::{Bdev, Reactors}, ffihelper::{cb_arg, AsStr, FfiResult, IntoCString}, subsys::{ - nvmf::{transport::TransportID, Error, NVMF_TGT}, + nvmf::{transport::TransportId, Error, NVMF_TGT}, Config, }, }; @@ -262,7 +262,7 @@ impl NvmfSubsystem { // dont yet enable both ports, IOW just add one transportID now - let trid_replica = TransportID::new(cfg.nexus_opts.nvmf_replica_port); + let trid_replica = TransportId::new(cfg.nexus_opts.nvmf_replica_port); let (s, r) = oneshot::channel::(); unsafe { @@ -526,7 +526,7 @@ impl NvmfSubsystem { Bdev::from_ptr(unsafe { spdk_nvmf_ns_get_bdev(ns) }) } - fn listeners_to_vec(&self) -> Option> { + fn listeners_to_vec(&self) -> Option> { unsafe { let mut listener = spdk_nvmf_subsystem_get_first_listener(self.0.as_ptr()); @@ -535,7 +535,7 @@ impl NvmfSubsystem { return None; } - let mut ids = vec![TransportID( + let mut ids = vec![TransportId( *spdk_nvmf_subsystem_listener_get_trid(listener), )]; @@ -545,7 +545,7 @@ impl NvmfSubsystem { listener, ); if !listener.is_null() { - ids.push(TransportID( + ids.push(TransportId( *spdk_nvmf_subsystem_listener_get_trid(listener), )); continue; diff --git a/mayastor/src/subsys/nvmf/target.rs b/mayastor/src/subsys/nvmf/target.rs index 8541eeed7..d83dcfaec 100644 --- a/mayastor/src/subsys/nvmf/target.rs +++ b/mayastor/src/subsys/nvmf/target.rs @@ -35,7 +35,7 @@ use crate::{ poll_groups::PollGroup, subsystem::NvmfSubsystem, transport, - transport::{get_ipv4_address, TransportID}, + transport::{get_ipv4_address, TransportId}, Error, NVMF_PGS, }, @@ -255,7 +255,7 @@ impl Target { /// port fn listen(&mut self) -> Result<()> { let cfg = Config::get(); - let trid_nexus = TransportID::new(cfg.nexus_opts.nvmf_nexus_port); + let trid_nexus = TransportId::new(cfg.nexus_opts.nvmf_nexus_port); let rc = unsafe { spdk_nvmf_tgt_listen(self.tgt.as_ptr(), trid_nexus.as_ptr()) }; @@ -266,7 +266,7 @@ impl Target { }); } - let trid_replica = TransportID::new(cfg.nexus_opts.nvmf_replica_port); + let trid_replica = TransportId::new(cfg.nexus_opts.nvmf_replica_port); let rc = unsafe { spdk_nvmf_tgt_listen(self.tgt.as_ptr(), trid_replica.as_ptr()) }; @@ -387,8 +387,8 @@ impl Target { unsafe { spdk_poller_unregister(&mut self.acceptor_poller.as_ptr()) }; let cfg = Config::get(); - let trid_nexus = TransportID::new(cfg.nexus_opts.nvmf_nexus_port); - let trid_replica = TransportID::new(cfg.nexus_opts.nvmf_replica_port); + let trid_nexus = TransportId::new(cfg.nexus_opts.nvmf_nexus_port); + let trid_replica = TransportId::new(cfg.nexus_opts.nvmf_replica_port); unsafe { spdk_nvmf_tgt_stop_listen(self.tgt.as_ptr(), trid_replica.as_ptr()) diff --git a/mayastor/src/subsys/nvmf/transport.rs b/mayastor/src/subsys/nvmf/transport.rs index ffa80a464..12a478125 100644 --- a/mayastor/src/subsys/nvmf/transport.rs +++ b/mayastor/src/subsys/nvmf/transport.rs @@ -69,8 +69,8 @@ pub async fn add_tcp_transport() -> Result<(), Error> { Ok(()) } -pub struct TransportID(pub(crate) spdk_nvme_transport_id); -impl Deref for TransportID { +pub struct TransportId(pub(crate) spdk_nvme_transport_id); +impl Deref for TransportId { type Target = spdk_nvme_transport_id; fn deref(&self) -> &Self::Target { @@ -78,13 +78,13 @@ impl Deref for TransportID { } } -impl DerefMut for TransportID { +impl DerefMut for TransportId { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl TransportID { +impl TransportId { pub fn new(port: u16) -> Self { let address = get_ipv4_address().unwrap(); @@ -125,7 +125,7 @@ impl TransportID { } } -impl Display for TransportID { +impl Display for TransportId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( f, @@ -136,7 +136,7 @@ impl Display for TransportID { } } -impl Debug for TransportID { +impl Debug for TransportId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("Transport ID") .field("trtype", &self.0.trtype) diff --git a/mayastor/tests/nexus_metadata.rs b/mayastor/tests/nexus_metadata.rs index a20bc9e7c..d3f746aa1 100644 --- a/mayastor/tests/nexus_metadata.rs +++ b/mayastor/tests/nexus_metadata.rs @@ -56,40 +56,38 @@ async fn read_write_metadata() { let child = &mut nexus.children[0]; let now = SystemTime::now(); - let mut data: Vec = Vec::new(); - - data.push(NexusConfig::Version1(NexusConfigVersion1 { - name: "Hello".to_string(), - tags: String::from("How now brown cow") - .split_whitespace() - .map(String::from) - .collect(), - revision: 38, - checksum: 0x3c2d_38ab, - data: String::from("Hello from v1"), - })); - - data.push(NexusConfig::Version2(NexusConfigVersion2 { - name: "Hello".to_string(), - tags: String::from("How now brown cow") - .split_whitespace() - .map(String::from) - .collect(), - revision: 40, - checksum: 0x3c2e_40ab, - data: String::from("Hello from v2"), - count: 100, - })); - - data.push(NexusConfig::Version3(NexusConfigVersion3 { - name: "Hello".to_string(), - revision: 42, - checksum: 0x3c2f_42ab, - data: String::from("Hello from v3") - .split_whitespace() - .map(String::from) - .collect(), - })); + let data = vec![ + NexusConfig::Version1(NexusConfigVersion1 { + name: "Hello".to_string(), + tags: String::from("How now brown cow") + .split_whitespace() + .map(String::from) + .collect(), + revision: 38, + checksum: 0x3c2d_38ab, + data: String::from("Hello from v1"), + }), + NexusConfig::Version2(NexusConfigVersion2 { + name: "Hello".to_string(), + tags: String::from("How now brown cow") + .split_whitespace() + .map(String::from) + .collect(), + revision: 40, + checksum: 0x3c2e_40ab, + data: String::from("Hello from v2"), + count: 100, + }), + NexusConfig::Version3(NexusConfigVersion3 { + name: "Hello".to_string(), + revision: 42, + checksum: 0x3c2f_42ab, + data: String::from("Hello from v3") + .split_whitespace() + .map(String::from) + .collect(), + }), + ]; // create an index and append two objects let mut metadata = child.create_metadata().await.unwrap(); diff --git a/nix/lib/rust.nix b/nix/lib/rust.nix index 870fd5bca..be38ff316 100644 --- a/nix/lib/rust.nix +++ b/nix/lib/rust.nix @@ -3,6 +3,6 @@ let pkgs = import sources.nixpkgs { overlays = [ (import sources.nixpkgs-mozilla) ]; }; in rec { - nightly = pkgs.rustChannelOf { channel = "nightly"; date = "2020-11-24"; }; + nightly = pkgs.rustChannelOf { channel = "nightly"; date = "2021-02-16"; }; stable = pkgs.rustChannelOf { channel = "stable"; }; } diff --git a/nvmeadm/src/nvmf_discovery.rs b/nvmeadm/src/nvmf_discovery.rs index 858280430..188438b28 100644 --- a/nvmeadm/src/nvmf_discovery.rs +++ b/nvmeadm/src/nvmf_discovery.rs @@ -58,11 +58,11 @@ impl fmt::Display for TrType { /// AddressFamily, in case of TCP and RDMA we use IPv6 or IPc4 only #[derive(Debug, Primitive)] pub enum AddressFamily { - PCI = 0, - IPv4 = 1, - IPv6 = 2, - IB = 3, - FC = 4, + Pci = 0, + Ipv4 = 1, + Ipv6 = 2, + Ib = 3, + Fc = 4, } impl fmt::Display for AddressFamily { @@ -76,8 +76,8 @@ impl fmt::Display for AddressFamily { /// even when we are not actively serving out any devices #[derive(Debug, Primitive)] pub enum SubType { - DISCOVERY = 1, - NVME = 2, + Discovery = 1, + Nvme = 2, } #[derive(Debug)] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 2ccdefe99..c12fc0375 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -9,6 +9,7 @@ extern crate tonic; #[allow(clippy::type_complexity)] #[allow(clippy::unit_arg)] #[allow(clippy::redundant_closure)] +#[allow(clippy::upper_case_acronyms)] pub mod mayastor { impl From<()> for Null { diff --git a/spdk-sys/src/lib.rs b/spdk-sys/src/lib.rs index 9ec11cf07..46688ac1a 100644 --- a/spdk-sys/src/lib.rs +++ b/spdk-sys/src/lib.rs @@ -9,7 +9,8 @@ non_snake_case, non_upper_case_globals, unknown_lints, - unused + unused, + clippy::upper_case_acronyms )] use std::os::raw::c_char; From f66833f00d967bc3f71a328823c2b382079ec7e7 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Thu, 18 Feb 2021 10:45:03 +0100 Subject: [PATCH 21/78] revert: remove rust-toolchain We use nix-shell to define the default version of rust (stable). If people wish to "bring their own" they can use `--arg norust true` or use `.envrc`. Having a default rust-toolchain in the repo prevents this previous behaviour from working correctly. And would require making changes to the repo if one wants to override it. I've updated the docs to reflect this. --- doc/build.md | 16 +++++++++------- rust-toolchain | 1 - 2 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 rust-toolchain diff --git a/doc/build.md b/doc/build.md index 31c53bbdf..44629d0a0 100644 --- a/doc/build.md +++ b/doc/build.md @@ -18,7 +18,7 @@ you won't need to worry about cross compiler toolchains, and all builds are repr ## Prerequisites -Mayastor **only** builds on modern Linuxes. We'd adore contributions to add support for +Mayastor **only** builds on modern Linuxes. We'd adore contributions to add support for Windows, FreeBSD, OpenWRT, or other server platforms. If you do not have Linux system: @@ -28,7 +28,7 @@ If you do not have Linux system: * **Mac:** We recommend you use [Docker for Mac][docker-install] and follow the Docker process described. Please let us know if you find a way to run it! -* **FreeBSD:** We *think* this might actually work, SPDK is compatible! But, we haven't +* **FreeBSD:** We *think* this might actually work, SPDK is compatible! But, we haven't tried it yet. * **Others:** This is kind of a "Do-it-yourself" situation. Sorry, we can't be more help! @@ -43,7 +43,7 @@ curl -L https://nixos.org/nix/install | sh > **Can't install Nix?** > > That's totally fine. You can use [`docker`][docker-install] just fine for one-off or occasional PRs! -> +> > This flow will get you a pre-fetched `nix` store: > ```bash > docker run --name mayastor-nix-prefetch -it -v $(pwd):/scratch:rw --privileged --workdir /scratch nixos/nix nix-shell --run "exit 0" @@ -75,16 +75,16 @@ First, setting the following: Then, updating the channel: ```bash -$ sudo nix-channel --list +$ sudo nix-channel --list nixos https://nixos.org/channels/nixos-20.09 $ sudo nix-channel --remove nixos $ sudo nix-channel --add https://nixos.org/channels/nixos-unstable nixos $ sudo nixos-rebuild switch --update ``` -> If you don't want, you can drop into a +> If you don't want, you can drop into a `nixUnstable` supporting shell with: -> +> > ```bash > nix-shell -I nixpkgs=channel:nixpkgs-unstable -p nixUnstable --command "nix --experimental-features 'nix-command flakes' develop -f . mayastor" > ``` @@ -96,7 +96,7 @@ $ sudo nixos-rebuild switch --update You can use a tool like [`direnv`][direnv] to automate `nix shell` entry. - +If you are unable to use the Nix provided Rust for some reason, there are `norust` and `nospdk` arguments to Nix shell. `nix-shell --arg norust true` ## Iterative Builds Contributors often build Mayastor repeatedly during the development process. @@ -120,6 +120,8 @@ cargo build --release **Want to run or hack on Mayastor?** *You need more configuration!* See [running][doc-running], then [testing][doc-testing]. +To ensure you are aware of this, we greet you with a nice cow. + ## Artifacts There are a few ways to build Mayastor! If you're hacking on Mayastor, it's best to use diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 870bbe4e5..000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -stable \ No newline at end of file From 672147cddba8f507f465b7c50ef6ec903f7fd2df Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Wed, 3 Feb 2021 19:44:07 +0000 Subject: [PATCH 22/78] refactor(nexus): clean up label parsing code - dispensed with NexusChildLabel struct entirely - added extra sanity checking for valid labels - create labels correctly for disks of varying size - allow changing of disk GUID if creating nexus with different UUID Resolves CAS-720 --- mayastor/src/bdev/mod.rs | 2 +- mayastor/src/bdev/nexus/nexus_bdev.rs | 64 +- .../src/bdev/nexus/nexus_bdev_children.rs | 96 -- mayastor/src/bdev/nexus/nexus_label.rs | 1512 +++++++++++------ mayastor/tests/nexus_label.rs | 10 +- mayastor/tests/nexus_rebuild.rs | 15 +- 6 files changed, 1034 insertions(+), 665 deletions(-) diff --git a/mayastor/src/bdev/mod.rs b/mayastor/src/bdev/mod.rs index a8060bece..5f69569a5 100644 --- a/mayastor/src/bdev/mod.rs +++ b/mayastor/src/bdev/mod.rs @@ -13,7 +13,7 @@ pub use nexus::{ nexus_child_error_store::{ActionType, NexusErrStore, QueryType}, nexus_child_status_config, nexus_io::Bio, - nexus_label::{GPTHeader, GptEntry}, + nexus_label::{GptEntry, GptHeader}, nexus_metadata_content::{ NexusConfig, NexusConfigVersion1, diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 5095bbd77..9bf7dfe0a 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -6,6 +6,7 @@ use std::{ convert::TryFrom, + env, fmt::{Display, Formatter}, os::raw::c_void, }; @@ -51,7 +52,7 @@ use crate::{ nexus_nbd::{NbdDisk, NbdError}, }, }, - core::{Bdev, CoreError, DmaError, Protocol, Reactor, Share}, + core::{Bdev, CoreError, Protocol, Reactor, Share}, ffihelper::errno_result_from_i32, lvs::Lvol, nexus_uri::{bdev_destroy, NexusBdevError}, @@ -113,19 +114,25 @@ pub enum Error { ShareNvmfNexus { source: CoreError, name: String }, #[snafu(display("Failed to unshare nexus {}", name))] UnshareNexus { source: CoreError, name: String }, - #[snafu(display("Failed to allocate label of nexus {}", name))] - AllocLabel { source: DmaError, name: String }, - #[snafu(display("Failed to write label of nexus {}", name))] + #[snafu(display( + "Failed to read child label of nexus {}: {}", + name, + source + ))] + ReadLabel { source: LabelError, name: String }, + #[snafu(display( + "Failed to write child label of nexus {}: {}", + name, + source + ))] WriteLabel { source: LabelError, name: String }, - #[snafu(display("Failed to read label from a child of nexus {}", name))] - ReadLabel { source: ChildError, name: String }, - #[snafu(display("Labels of the nexus {} are not the same", name))] - CheckLabels { name: String }, - #[snafu(display("Failed to write protective MBR of nexus {}", name))] - WritePmbr { source: LabelError, name: String }, - #[snafu(display("Failed to register IO device nexus {}", name))] + #[snafu(display( + "Failed to register IO device nexus {}: {}", + name, + source + ))] RegisterNexus { source: Errno, name: String }, - #[snafu(display("Failed to create child of nexus {}", name))] + #[snafu(display("Failed to create child of nexus {}: {}", name, source))] CreateChild { source: NexusBdevError, name: String, @@ -481,21 +488,26 @@ impl Nexus { } pub async fn sync_labels(&mut self) -> Result<(), Error> { - let label = self.update_child_labels().await.context(WriteLabel { - name: self.name.clone(), - })?; + if env::var("NEXUS_DONT_READ_LABELS").is_ok() { + // This is to allow for the specific case where the underlying + // child devices are NULL bdevs, which may be written to + // but cannot be read from. Just write out new labels, + // and don't attempt to read them back afterwards. + warn!("NOT reading disk labels on request"); + return self.create_child_labels().await.context(WriteLabel { + name: self.name.clone(), + }); + } - // Now register the bdev but update its size first - // to ensure we adhere to the partitions. - self.data_ent_offset = label.offset(); - let size_blocks = self.size / self.bdev.block_len() as u64; + // update child labels as necessary + if let Err(error) = self.update_child_labels().await { + warn!("error updating child labels: {}", error); + } - self.bdev.set_block_count(std::cmp::min( - // nexus is allowed to be smaller than the children - size_blocks, - // label might be smaller than expected due to the on disk metadata - label.get_block_count(), - )); + // check if we can read the labels back + self.validate_child_labels().await.context(ReadLabel { + name: self.name.clone(), + })?; Ok(()) } @@ -1045,6 +1057,7 @@ pub async fn nexus_create( for child in children { if let Err(err) = ni.create_and_register(child).await { + error!("failed to create child {}: {}", child, err); ni.destroy_children().await; return Err(err).context(CreateChild { name: ni.name.clone(), @@ -1072,6 +1085,7 @@ pub async fn nexus_create( } Err(e) => { + error!("failed to open nexus {}: {}", ni.name, e); ni.destroy_children().await; return Err(e); } diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index e4289b967..49be55e6e 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -23,8 +23,6 @@ //! When reconfiguring the nexus, we traverse all our children, create new IO //! channels for all children that are in the open state. -use std::env; - use futures::future::join_all; use snafu::ResultExt; @@ -42,12 +40,6 @@ use crate::{ nexus_channel::DrEvent, nexus_child::{ChildState, NexusChild}, nexus_child_status_config::ChildStatusConfig, - nexus_label::{ - LabelError, - NexusChildLabel, - NexusLabel, - NexusLabelStatus, - }, }, Reason, VerboseError, @@ -457,94 +449,6 @@ impl Nexus { Ok(()) } - /// Read labels from all child devices - async fn get_child_labels(&self) -> Vec> { - let mut futures = Vec::new(); - self.children - .iter() - .map(|child| futures.push(child.get_label())) - .for_each(drop); - join_all(futures).await - } - - /// Update labels of child devices as required: - /// (1) Update any child that does not have valid label. - /// (2) Upate all children with a new label if existing (valid) labels - /// are not all identical. - /// - /// Return the resulting label. - pub async fn update_child_labels( - &mut self, - ) -> Result { - // Get a list of all children and their labels - let list = self.get_child_labels().await; - - // Search for first "valid" label - if let Some(target) = NexusChildLabel::find_target_label(&list) { - // Check that all "valid" labels are equal - if NexusChildLabel::compare_labels(&target, &list) { - let (_valid, invalid): ( - Vec, - Vec, - ) = list.into_iter().partition(|label| { - label.get_label_status() == NexusLabelStatus::Both - }); - - if invalid.is_empty() { - info!( - "{}: All child disk labels are valid and consistent", - self.name - ); - } else { - // Write out (only) those labels that require updating - info!( - "{}: Replacing missing/invalid child disk labels", - self.name - ); - self.write_labels(&target, &invalid).await? - } - - // TODO: When the GUID does not match the given UUID. - // it means that the PVC has been recreated. - // We should consider also updating the labels in such a case. - - info!("{}: existing label: {}", self.name, target.primary.guid); - trace!("{}: existing label:\n {}", self.name, target); - - return Ok(target); - } - - info!("{}: Child disk labels do not match, writing new label to all children", self.name); - } else { - info!("{}: Child disk labels invalid or absent, writing new label to all children", self.name); - } - - // Either there are no valid labels or there - // are some valid labels that do not agree. - // Generate a new label ... - let label = self.generate_label(); - - // ... and write it out to ALL children. - let label = match self.write_all_labels(&label).await { - Ok(_) => Ok(label), - Err(LabelError::ReReadError { - .. - }) => { - if env::var("NEXUS_LABEL_IGNORE_ERRORS").is_ok() { - warn!("ignoring label error on request"); - Ok(label) - } else { - Err(LabelError::ProbeError {}) - } - } - Err(e) => Err(e), - }?; - - info!("{}: new label: {}", self.name, label.primary.guid); - trace!("{}: new label:\n{}", self.name, label); - Ok(label) - } - /// The nexus is allowed to be smaller then the underlying child devices /// this function returns the smallest blockcnt of all online children as /// they MAY vary in size. diff --git a/mayastor/src/bdev/nexus/nexus_label.rs b/mayastor/src/bdev/nexus/nexus_label.rs index dd38ea31f..5abc52484 100644 --- a/mayastor/src/bdev/nexus/nexus_label.rs +++ b/mayastor/src/bdev/nexus/nexus_label.rs @@ -54,13 +54,14 @@ //! ``` use bincode::{deserialize_from, serialize, serialize_into, Error}; use crc::{crc32, Hasher32}; -use futures::future::join_all; use serde::{ de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor}, ser::{Serialize, SerializeTuple, Serializer}, }; use snafu::{ResultExt, Snafu}; use std::{ + cmp::min, + convert::From, fmt::{self, Display}, io::{Cursor, Seek, SeekFrom}, str::FromStr, @@ -68,43 +69,55 @@ use std::{ use uuid::{self, parser, Uuid}; use crate::{ - bdev::nexus::{ - nexus_bdev::Nexus, - nexus_child::{ChildError, NexusChild}, - }, + bdev::nexus::{nexus_bdev::Nexus, nexus_child::NexusChild}, core::{CoreError, DmaBuf, DmaError}, }; #[derive(Debug, Snafu)] pub enum LabelError { - #[snafu(display("{}", source))] - NexusChildError { source: ChildError }, - #[snafu(display("Error reading {}", name))] - ReadError { name: String, source: CoreError }, - #[snafu(display("Write error"))] - WriteError { name: String, source: CoreError }, + #[snafu(display("Serialization error: {}", source))] + SerializeError { source: Error }, #[snafu(display( - "Failed to allocate buffer for reading {}: {}", + "Failed to allocate buffer for reading from child {}: {}", name, source ))] - ReadAlloc { name: String, source: DmaError }, + ReadAlloc { source: DmaError, name: String }, #[snafu(display( - "Failed to allocate buffer for writing {}: {}", + "Failed to allocate buffer for writing to child {}: {}", name, source ))] - WriteAlloc { name: String, source: DmaError }, - #[snafu(display("Serialization error: {}", source))] - SerializeError { source: Error }, + WriteAlloc { source: DmaError, name: String }, + #[snafu(display("Error reading from child {}: {}", name, source))] + ReadError { source: CoreError, name: String }, + #[snafu(display("Error writing to child {}: {}", name, source))] + WriteError { source: CoreError, name: String }, + #[snafu(display("Label is invalid: {}", source))] + InvalidLabel { source: ProbeError }, + #[snafu(display( + "Failed to obtain BdevHandle for child {}: {}", + name, + source + ))] + HandleError { source: CoreError, name: String }, + #[snafu(display( + "Device is too small to accomodate Metadata partition: blocks={}", + blocks + ))] + DeviceTooSmall { blocks: u64 }, + #[snafu(display("The written label could not be read from disk, likely the child {} is a null device", name))] + ReReadError { name: String }, +} + +#[derive(Debug, Snafu)] +pub enum ProbeError { #[snafu(display("Deserialization error: {}", source))] DeserializeError { source: Error }, - #[snafu(display("Label probe error"))] - ProbeError {}, - #[snafu(display("Label is invalid"))] - LabelInvalid {}, #[snafu(display("Incorrect MBR signature"))] MbrSignature {}, + #[snafu(display("Disk size in MBR does not match size in GPT header"))] + MbrSize {}, #[snafu(display("Incorrect GPT header signature"))] GptSignature {}, #[snafu(display( @@ -126,353 +139,210 @@ pub enum LabelError { CompareDiskSize {}, #[snafu(display("GPT stored partition table checksums differ"))] ComparePartitionTableChecksum {}, - #[snafu(display("Alternate GPT locations are incorrect"))] - BackupLocation {}, #[snafu(display("GPT partition table location is incorrect"))] PartitionTableLocation {}, - #[snafu(display("Could not get handle for child bdev {}", name,))] - HandleCreate { name: String, source: ChildError }, - #[snafu(display("The written label could not be read from disk, likely the child {} is a null device", name))] - ReReadError { name: String }, + #[snafu(display("Missing partition: {}", name))] + MissingPartition { name: String }, + #[snafu(display("Primary GTP header location is incorrect"))] + PrimaryLocation {}, + #[snafu(display("Secondary GTP header location is incorrect"))] + SecondaryLocation {}, + #[snafu(display("Location of first usable block is incorrect"))] + FirstUsableBlock {}, + #[snafu(display("Location of last usable block is incorrect"))] + LastUsableBlock {}, + #[snafu(display("Partition table exceeds maximum size"))] + PartitionTableSize {}, + #[snafu(display("Insufficient space reserved for partition table"))] + PartitionTableSpace {}, + #[snafu(display("Partition starts before first usable block"))] + PartitionStart {}, + #[snafu(display("Partition ends after last usable block"))] + PartitionEnd {}, + #[snafu(display("Partition has negative size"))] + NegativePartitionSize {}, + #[snafu(display("GPT header locations are inconsistent"))] + CompareHeaderLocation {}, + #[snafu(display("Number of partition table entries differ"))] + ComparePartitionEntryCount {}, + #[snafu(display("Partition table entry sizes differ"))] + ComparePartitionEntrySize {}, + #[snafu(display("Incorrect partition layout"))] + IncorrectPartitions {}, + #[snafu(display("Label is invalid"))] + LabelRedundancy {}, } -struct LabelData { - offset: u64, - buf: DmaBuf, +pub struct LabelConfig { + disk_guid: GptGuid, + meta_guid: GptGuid, + data_guid: GptGuid, +} + +impl LabelConfig { + fn new(guid: GptGuid) -> LabelConfig { + LabelConfig { + disk_guid: guid, + meta_guid: GptGuid::new_random(), + data_guid: GptGuid::new_random(), + } + } } impl Nexus { /// Partition Type GUID for our "MayaMeta" partition. pub const METADATA_PARTITION_TYPE_ID: &'static str = "27663382-e5e6-11e9-81b4-ca5ca5ca5ca5"; + pub const METADATA_PARTITION_SIZE: u64 = 4 * 1024 * 1024; /// Generate a new nexus label based on the nexus configuration. - /// The meta partition is fixed in size and aligned to a 1MB boundary. - pub(crate) fn generate_label(&mut self) -> NexusLabel { - let block_size: u32 = self.bdev.block_len(); - let num_blocks: u64 = self.min_num_blocks(); - - // + pub(crate) fn generate_label( + config: &LabelConfig, + block_size: u32, + data_blocks: u64, + total_blocks: u64, + ) -> Result { // (Protective) MBR let mut pmbr = Pmbr::default(); + pmbr.entries[0].protect(total_blocks); - pmbr.entries[0].ent_type = 0xee; // indicates this is a protective MBR partition - pmbr.entries[0].attributes = 0x00; - pmbr.entries[0].chs_start = [0x00, 0x02, 0x00]; - pmbr.entries[0].chs_last = [0xff, 0xff, 0xff]; - - // the partition must accurately reflect the disk size where possible. - // if the size (in blocks) is too large to fit into 32 bits, - // we set the size to 0xffff_ffff - - pmbr.entries[0].lba_start = 1; // "partition" starts immediately after the MBR - pmbr.entries[0].num_sectors = if num_blocks > u32::max_value().into() { - u32::max_value() - } else { - (num_blocks as u32) - 1 // do not count the first block that - // contains the MBR - }; - - pmbr.signature = [0x55, 0xaa]; - - // // Primary GPT header - let mut header = GPTHeader::new( - block_size, - num_blocks, - Uuid::from_bytes(self.bdev.uuid().as_bytes()), - ); + let mut header = + GptHeader::new(block_size, total_blocks, config.disk_guid); - // // Partition table - let mut entries = - vec![GptEntry::default(); header.num_entries as usize]; - - entries[0] = GptEntry { - ent_type: GptGuid::from_str(Nexus::METADATA_PARTITION_TYPE_ID) - .unwrap(), - ent_guid: GptGuid::new_random(), - // 1MB aligned - ent_start: header.lba_start, - // 4MB - ent_end: header.lba_start + u64::from((4 << 20) / block_size) - 1, - ent_attr: 0, - ent_name: GptName { - name: "MayaMeta".into(), - }, - }; - - entries[1] = GptEntry { - ent_type: GptGuid::from_str(Nexus::METADATA_PARTITION_TYPE_ID) - .unwrap(), - ent_guid: GptGuid::new_random(), - ent_start: entries[0].ent_end + 1, - ent_end: header.lba_end, - ent_attr: 0, - ent_name: GptName { - name: "MayaData".into(), - }, - }; + let partitions = Nexus::create_maya_partitions( + config, + &header, + block_size, + data_blocks, + )?; - header.table_crc = GptEntry::checksum(&entries); + header.table_crc = GptEntry::checksum(&partitions, header.num_entries); header.checksum(); - // // Secondary GPT header let backup = header.to_backup(); - NexusLabel { + Ok(NexusLabel { status: NexusLabelStatus::Neither, mbr: pmbr, primary: header, - partitions: entries, + partitions, secondary: backup, - } - } - - fn get_primary_data( - &self, - label: &NexusLabel, - ) -> Result { - let block_size = self.bdev.block_len() as u64; - let blocks = Aligned::get_blocks( - GPTHeader::PARTITION_TABLE_SIZE as u64, - block_size, - ); - // allocate 2 extra blocks for the MBR and GPT header respectively - let mut buf = - DmaBuf::new((blocks + 2) * block_size, self.bdev.alignment()) - .context(WriteAlloc { - name: String::from("primary"), - })?; - let mut writer = Cursor::new(buf.as_mut_slice()); - - // Protective MBR - writer.seek(SeekFrom::Start(440)).unwrap(); - serialize_into(&mut writer, &label.mbr).context(SerializeError {})?; - - // Primary GPT header - writer - .seek(SeekFrom::Start(label.primary.lba_self * block_size)) - .unwrap(); - serialize_into(&mut writer, &label.primary) - .context(SerializeError {})?; - - // Primary partition table - writer - .seek(SeekFrom::Start(label.primary.lba_table * block_size)) - .unwrap(); - for entry in &label.partitions { - serialize_into(&mut writer, &entry).context(SerializeError {})?; - } - - Ok(LabelData { - offset: 0, - buf, }) } - fn get_secondary_data( - &self, - label: &NexusLabel, - ) -> Result { - let block_size = self.bdev.block_len() as u64; - let blocks = Aligned::get_blocks( - GPTHeader::PARTITION_TABLE_SIZE as u64, - block_size, + /// Create partition table entries for the MayaMeta and + /// MayaData partitions based on the nexus configuration. + #[allow(clippy::vec_init_then_push)] + fn create_maya_partitions( + config: &LabelConfig, + header: &GptHeader, + block_size: u32, + data_blocks: u64, + ) -> Result, LabelError> { + let metadata_size = Aligned::get_blocks( + Nexus::METADATA_PARTITION_SIZE, + u64::from(block_size), ); - // allocate 1 extra block for the GPT header - let mut buf = - DmaBuf::new((blocks + 1) * block_size, self.bdev.alignment()) - .context(WriteAlloc { - name: String::from("secondary"), - })?; - let mut writer = Cursor::new(buf.as_mut_slice()); - - // Secondary partition table - for entry in &label.partitions { - serialize_into(&mut writer, &entry).context(SerializeError {})?; - } - - // Secondary GPT header - writer - .seek(SeekFrom::Start(GPTHeader::PARTITION_TABLE_SIZE)) - .unwrap(); - serialize_into(&mut writer, &label.secondary) - .context(SerializeError {})?; - - Ok(LabelData { - offset: label.secondary.lba_table * block_size, - buf, - }) - } + let data = header.lba_start + metadata_size; - pub async fn write_labels( - &self, - target: &NexusLabel, - list: &[NexusChildLabel<'_>], - ) -> Result<(), LabelError> { - let primary = self.get_primary_data(target)?; - let secondary = self.get_secondary_data(target)?; - - let mut futures = Vec::new(); - - for label in list { - match label.get_label_status() { - NexusLabelStatus::Both => { - // Nothing to do as both labels are already valid. - } - NexusLabelStatus::Primary => { - // Only write out secondary as primary is already valid. - futures.push( - label.child.write_at(secondary.offset, &secondary.buf), - ); - } - NexusLabelStatus::Secondary => { - // Only write out primary as secondary is already valid. - futures.push( - label.child.write_at(primary.offset, &primary.buf), - ); - } - NexusLabelStatus::Neither => { - futures.push( - label.child.write_at(primary.offset, &primary.buf), - ); - futures.push( - label.child.write_at(secondary.offset, &secondary.buf), - ); - } - } - } - - for result in join_all(futures).await { - if let Err(error) = result { - // return the first error - return Err(error); - } - } - - // check if we can read the labels - for label in list { - if let Err(error) = label.child.probe_label().await { - warn!( - "{}: {}: Error validating newly written disk label: {}", - label.child.parent, label.child.name, error - ); - return Err(LabelError::ProbeError {}); - } - info!( - "{}: {}: Disk label written", - label.child.parent, label.child.name - ); + if data > header.lba_end { + // Device is too small to accomodate Metadata partition + return Err(LabelError::DeviceTooSmall { + blocks: header.lba_alt + 1, + }); } - Ok(()) - } - - pub async fn write_all_labels( - &self, - label: &NexusLabel, - ) -> Result<(), LabelError> { - let primary = self.get_primary_data(label)?; - let secondary = self.get_secondary_data(label)?; - - let mut futures = Vec::new(); - - for child in &self.children { - futures.push(child.write_at(primary.offset, &primary.buf)); - futures.push(child.write_at(secondary.offset, &secondary.buf)); - } + let mut partitions: Vec = Vec::with_capacity(2); - for result in join_all(futures).await { - if let Err(error) = result { - // return the first error - return Err(error); - } - } + partitions.push(GptEntry { + ent_type: GptGuid::from_str(Nexus::METADATA_PARTITION_TYPE_ID) + .unwrap(), + ent_guid: config.meta_guid, + ent_start: header.lba_start, + ent_end: data - 1, + ent_attr: 0, + ent_name: "MayaMeta".into(), + }); - // check if we can read the labels - for child in &self.children { - if let Err(error) = child.probe_label().await { - warn!( - "{}: {}: Error validating newly written disk label: {}", - child.parent, child.name, error - ); - return Err(LabelError::ReReadError { - name: child.name.clone(), - }); - } - info!("{}: {}: Disk label written", child.parent, child.name); - } + partitions.push(GptEntry { + ent_type: GptGuid::from_str(Nexus::METADATA_PARTITION_TYPE_ID) + .unwrap(), + ent_guid: config.data_guid, + ent_start: data, + ent_end: min(data + data_blocks - 1, header.lba_end), + ent_attr: 0, + ent_name: "MayaData".into(), + }); - Ok(()) + Ok(partitions) } } -#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)] /// based on RFC4122 +#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)] pub struct GptGuid { pub time_low: u32, pub time_mid: u16, pub time_high: u16, pub node: [u8; 8], } -impl std::str::FromStr for GptGuid { - type Err = parser::ParseError; - - fn from_str(uuid: &str) -> Result { - let fields = uuid::Uuid::from_str(uuid)?; - let fields = fields.as_fields(); - Ok(GptGuid { +impl From for GptGuid { + fn from(uuid: Uuid) -> GptGuid { + let fields = uuid.as_fields(); + GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, - }) + } + } +} + +impl From for Uuid { + fn from(guid: GptGuid) -> Uuid { + Uuid::from_fields( + guid.time_low, + guid.time_mid, + guid.time_high, + &guid.node, + ) + .unwrap() + } +} + +impl FromStr for GptGuid { + type Err = parser::ParseError; + + fn from_str(uuid: &str) -> Result { + Ok(GptGuid::from(Uuid::from_str(uuid)?)) } } impl std::fmt::Display for GptGuid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{}", - uuid::Uuid::from_fields( - self.time_low, - self.time_mid, - self.time_high, - &self.node, - ) - .unwrap() - .to_string() - ) + write!(f, "{}", Uuid::from(*self).to_string()) } } impl GptGuid { pub(crate) fn new_random() -> Self { - let fields = uuid::Uuid::new_v4(); - let fields = fields.as_fields(); - GptGuid { - time_low: fields.0, - time_mid: fields.1, - time_high: fields.2, - node: *fields.3, - } + GptGuid::from(Uuid::new_v4()) } } #[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)] -#[allow(clippy::upper_case_acronyms)] -pub struct GPTHeader { +pub struct GptHeader { /// GPT signature (must be "EFI PART"). pub signature: [u8; 8], /// 00 00 01 00 up til version 2.17 pub revision: [u8; 4], /// GPT header size (92 bytes) pub header_size: u32, - /// CRC32 of the header. + /// CRC32 of the header. pub self_checksum: u32, pub reserved: [u8; 4], /// primary lba where the header @@ -495,17 +365,17 @@ pub struct GPTHeader { pub table_crc: u32, } -impl GPTHeader { +impl GptHeader { pub const PARTITION_TABLE_SIZE: u64 = 128 * 128; /// converts a slice into a gpt header and verifies the validity of the data - pub fn from_slice(slice: &[u8]) -> Result { + pub fn from_slice(slice: &[u8]) -> Result { let mut reader = Cursor::new(slice); - let mut gpt: GPTHeader = + let mut gpt: GptHeader = deserialize_from(&mut reader).context(DeserializeError {})?; if gpt.header_size != 92 { - return Err(LabelError::GptHeaderSize { + return Err(ProbeError::GptHeaderSize { actual_size: gpt.header_size, expected_size: 92, }); @@ -514,13 +384,13 @@ impl GPTHeader { if gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54] || gpt.revision != [0x00, 0x00, 0x01, 0x00] { - return Err(LabelError::GptSignature {}); + return Err(ProbeError::GptSignature {}); } let checksum = gpt.self_checksum; if gpt.checksum() != checksum { - return Err(LabelError::GptChecksum {}); + return Err(ProbeError::GptChecksum {}); } Ok(gpt) @@ -533,9 +403,16 @@ impl GPTHeader { self.self_checksum } - pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self { - let fields = guid.as_fields(); - GPTHeader { + // Create a new GPT header for a device with specified size + pub fn new(block_size: u32, num_blocks: u64, guid: GptGuid) -> Self { + let partition_size = Aligned::get_blocks( + GptHeader::PARTITION_TABLE_SIZE, + u64::from(block_size), + ); + + let start = u64::from((1 << 20) / block_size); + + GptHeader { signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54], revision: [0x00, 0x00, 0x01, 0x00], header_size: 92, @@ -543,16 +420,44 @@ impl GPTHeader { reserved: [0; 4], lba_self: 1, lba_alt: num_blocks - 1, - lba_start: u64::from((1 << 20) / blk_size), - lba_end: (num_blocks - 1) - - (GPTHeader::PARTITION_TABLE_SIZE / u64::from(blk_size)) - - 1, - guid: GptGuid { - time_low: fields.0, - time_mid: fields.1, - time_high: fields.2, - node: *fields.3, - }, + lba_start: start, + lba_end: num_blocks - partition_size - 2, + guid, + lba_table: 2, + num_entries: 2, + entry_size: 128, + table_crc: 0, + } + } + + // Create a reference GPT header for a device of sufficient + // size to have the requisite number of data blocks + pub fn reference(block_size: u32, data_blocks: u64, guid: GptGuid) -> Self { + let partition_size = Aligned::get_blocks( + GptHeader::PARTITION_TABLE_SIZE, + u64::from(block_size), + ); + + let metadata_size = Aligned::get_blocks( + Nexus::METADATA_PARTITION_SIZE, + u64::from(block_size), + ); + + let start = u64::from((1 << 20) / block_size); + let table = start + metadata_size + data_blocks; + let last = table + partition_size; + + GptHeader { + signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54], + revision: [0x00, 0x00, 0x01, 0x00], + header_size: 92, + self_checksum: 0, + reserved: [0; 4], + lba_self: 1, + lba_alt: last, + lba_start: start, + lba_end: table - 1, + guid, lba_table: 2, num_entries: 2, entry_size: 128, @@ -591,32 +496,39 @@ pub struct GptEntry { pub ent_end: u64, /// entry attributes, according to do the docs bit 0 MUST be zero pub ent_attr: u64, - /// utf16 name of the partition entry, do not confuse this fs labels! + /// UTF-16 name of the partition entry, + /// DO NOT confuse this with filesystem labels! pub ent_name: GptName, } impl GptEntry { - /// converts a slice into a partition array + /// converts a slice into a partition table pub fn from_slice( slice: &[u8], - parts: u32, - ) -> Result, LabelError> { + count: u32, + ) -> Result, ProbeError> { let mut reader = Cursor::new(slice); - let mut part_vec = Vec::new(); - // TODO 128 should be passed in as a argument - for _ in 0 .. parts { - part_vec.push( + let mut partitions: Vec = Vec::with_capacity(count as usize); + for _ in 0 .. count { + partitions.push( deserialize_from(&mut reader).context(DeserializeError {})?, ); } - Ok(part_vec) + Ok(partitions) } - /// calculate the checksum over the partitions table - pub fn checksum(parts: &[GptEntry]) -> u32 { + /// calculate the checksum over the partition table + pub fn checksum(partitions: &[GptEntry], size: u32) -> u32 { let mut digest = crc32::Digest::new(crc32::IEEE); - for p in parts { - digest.write(&serialize(p).unwrap()); + let count = partitions.len() as u32; + for entry in partitions { + digest.write(&serialize(entry).unwrap()); + } + if count < size { + let pad = serialize(&GptEntry::default()).unwrap(); + for _ in count .. size { + digest.write(&pad); + } } digest.sum32() } @@ -624,13 +536,13 @@ impl GptEntry { #[derive(Clone, Copy, Debug, PartialEq, Serialize)] pub enum NexusLabelStatus { - /// Both primary and secondary labels are valid + /// Both primary and secondary labels are synced with disk. Both, - /// Only primary label is valid + /// Only primary label is synced with disk. Primary, - /// Only secondary label is valid + /// Only secondary label is synced with disk. Secondary, - /// Neither primary or secondary labels are valid + /// Neither primary or secondary labels are synced with disk. Neither, } @@ -646,22 +558,84 @@ pub struct NexusLabel { /// The protective MBR pub mbr: Pmbr, /// The main GPT header - pub primary: GPTHeader, + pub primary: GptHeader, /// Vector of GPT entries where the first element is considered to be ours pub partitions: Vec, /// The backup GPT header - pub secondary: GPTHeader, + pub secondary: GptHeader, } impl NexusLabel { - /// returns the offset to the first data segment - pub(crate) fn offset(&self) -> u64 { - self.partitions[1].ent_start + /// update label with new disk guid + fn set_guid(&mut self, guid: GptGuid) { + self.primary.guid = guid; + self.primary.checksum(); + self.secondary = self.primary.to_backup(); + self.status = NexusLabelStatus::Neither; } - /// returns the number of total blocks in this segment - pub(crate) fn get_block_count(&self) -> u64 { - self.partitions[1].ent_end - self.partitions[1].ent_start + 1 + /// locate a partition by name + fn get_partition(&self, name: &str) -> Option<&GptEntry> { + self.partitions + .iter() + .find(|entry| entry.ent_name.name == name) + } + + #[allow(dead_code)] + /// returns the offset of the first metadata block + pub(crate) fn metadata_offset(&self) -> Result { + match self.get_partition("MayaMeta") { + Some(entry) => Ok(entry.ent_start), + None => Err(ProbeError::MissingPartition { + name: "MayaMeta".into(), + }), + } + } + + #[allow(dead_code)] + /// returns the offset of the first data block + pub(crate) fn data_offset(&self) -> Result { + match self.get_partition("MayaData") { + Some(entry) => Ok(entry.ent_start), + None => Err(ProbeError::MissingPartition { + name: "MayaData".into(), + }), + } + } + + #[allow(dead_code)] + /// returns the total number of metadata blocks + pub(crate) fn metadata_block_count(&self) -> Result { + match self.get_partition("MayaMeta") { + Some(entry) => Ok(entry.ent_end - entry.ent_start + 1), + None => Err(ProbeError::MissingPartition { + name: "MayaMeta".into(), + }), + } + } + + /// returns the total number of data blocks + pub(crate) fn data_block_count(&self) -> Result { + match self.get_partition("MayaData") { + Some(entry) => Ok(entry.ent_end - entry.ent_start + 1), + None => Err(ProbeError::MissingPartition { + name: "MayaData".into(), + }), + } + } + + /// get current label config + pub fn get_label_config(&self) -> Option { + if let Some(meta) = self.get_partition("MayaMeta") { + if let Some(data) = self.get_partition("MayaData") { + return Some(LabelConfig { + disk_guid: self.primary.guid, + meta_guid: meta.ent_guid, + data_guid: data.ent_guid, + }); + } + } + None } } @@ -714,16 +688,20 @@ impl Display for NexusLabel { } } -// for arrays bigger than 32 elements, things start to get unimplemented -// in terms of derive and what not. So we create a struct with a string, -// and tell serde how to use it during (de)serializing +// For arrays bigger than 32 elements, things start to get unimplemented +// in terms of derive and what not. So we create our own "newtype" struct, +// and tell serde how to use it during serializing/deserializing. +#[derive(Debug, PartialEq, Default, Clone)] +pub struct GptName { + pub name: String, +} struct GpEntryNameVisitor; -impl<'de> Deserialize<'de> for GptName { +impl<'a> Deserialize<'a> for GptName { fn deserialize(deserializer: D) -> std::result::Result where - D: Deserializer<'de>, + D: Deserializer<'a>, { deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor) } @@ -738,7 +716,6 @@ impl Serialize for GptName { S: Serializer, { // we can't use serialize_type_struct here as we want exactly 72 bytes - let mut s = serializer.serialize_tuple(36)?; let mut out: Vec = vec![0; 36]; for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) { @@ -749,7 +726,7 @@ impl Serialize for GptName { s.end() } } -impl<'de> Visitor<'de> for GpEntryNameVisitor { +impl<'a> Visitor<'a> for GpEntryNameVisitor { type Value = GptName; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { @@ -758,7 +735,7 @@ impl<'de> Visitor<'de> for GpEntryNameVisitor { fn visit_seq(self, mut seq: A) -> std::result::Result where - A: SeqAccess<'de>, + A: SeqAccess<'a>, { let mut out = Vec::new(); let mut end = false; @@ -773,21 +750,28 @@ impl<'de> Visitor<'de> for GpEntryNameVisitor { } if end { - Ok(GptName { - name: String::from_utf16_lossy(&out), - }) + Ok(GptName::from(String::from_utf16_lossy(&out))) } else { Err(serde::de::Error::invalid_value(Unexpected::Seq, &self)) } } } -#[derive(Debug, PartialEq, Default, Clone)] -pub struct GptName { - pub name: String, +impl From for GptName { + fn from(name: String) -> GptName { + GptName { + name, + } + } +} + +impl From<&str> for GptName { + fn from(name: &str) -> GptName { + GptName::from(String::from(name)) + } } -/// although we don't use it, we must have a protective MBR to avoid systems +/// Although we don't use it, we must have a protective MBR to avoid systems /// to get confused about what's on the disk. Utils like sgdisk work fine /// without an MBR (but will warn) but as we want to be able to access the /// partitions with the nexus out of the data path, will create one here. @@ -801,7 +785,7 @@ pub struct Pmbr { reserved: u16, /// number of partition entries entries: [MbrEntry; 4], - /// must be set to [0x55, 0xAA] + /// must be set to [0x55, 0xaa] signature: [u8; 2], } @@ -823,15 +807,40 @@ struct MbrEntry { num_sectors: u32, } +impl MbrEntry { + // Set this MBR partition entry to represent + // a protective MBR partition of given size. + fn protect(&mut self, num_blocks: u64) { + self.attributes = 0x00; // NOT bootable + self.ent_type = 0xee; // protective MBR partition + self.chs_start = [0x00, 0x02, 0x00]; // CHS address 0/0/2 + self.chs_last = [0xff, 0xff, 0xff]; // CHS address 1023/255/63 + + // The partition starts immediately after the MBR + self.lba_start = 1; + + // The partition size must accurately reflect + // the disk size where possible. + if num_blocks > u32::max_value().into() { + // If the size (in blocks) is too large to fit into 32 bits, + // then set the size to 0xffff_ffff + self.num_sectors = u32::max_value(); + } else { + // Do not count the first block that contains the MBR + self.num_sectors = (num_blocks - 1) as u32; + } + } +} + impl Pmbr { /// converts a slice into a MBR and validates the signature - pub fn from_slice(slice: &[u8]) -> Result { + pub fn from_slice(slice: &[u8]) -> Result { let mut reader = Cursor::new(slice); let mbr: Pmbr = deserialize_from(&mut reader).context(DeserializeError {})?; if mbr.signature != [0x55, 0xaa] { - return Err(LabelError::MbrSignature {}); + return Err(ProbeError::MbrSignature {}); } Ok(mbr) @@ -844,74 +853,172 @@ impl Default for Pmbr { disk_signature: 0, reserved: 0, entries: [MbrEntry::default(); 4], - signature: [0x55, 0xAA], + signature: [0x55, 0xaa], } } } impl NexusLabel { /// construct a Pmbr from raw data - fn read_mbr(buf: &DmaBuf) -> Result { + fn read_mbr(buf: &DmaBuf) -> Result { Pmbr::from_slice(&buf.as_slice()[440 .. 512]) } - /// construct a GPTHeader from raw data - fn read_header(buf: &DmaBuf) -> Result { - GPTHeader::from_slice(buf.as_slice()) + /// construct a GPT header from raw data + fn read_header(buf: &DmaBuf) -> Result { + GptHeader::from_slice(buf.as_slice()) } - /// construct and validate primary GPTHeader - fn read_primary_header(buf: &DmaBuf) -> Result { - let primary = NexusLabel::read_header(buf)?; - if primary.lba_table != primary.lba_self + 1 { - return Err(LabelError::PartitionTableLocation {}); - } - Ok(primary) + /// construct and validate primary GPT header + fn read_primary_header( + buf: &DmaBuf, + block_size: u64, + num_blocks: u64, + ) -> Result { + let header = NexusLabel::read_header(buf)?; + NexusLabel::validate_primary_header(&header, block_size, num_blocks)?; + Ok(header) } - /// construct and validate secondary GPTHeader - fn read_secondary_header(buf: &DmaBuf) -> Result { - let secondary = NexusLabel::read_header(buf)?; - if secondary.lba_table != secondary.lba_end + 1 { - return Err(LabelError::PartitionTableLocation {}); - } - Ok(secondary) + /// construct and validate secondary GPT header + fn read_secondary_header( + buf: &DmaBuf, + block_size: u64, + num_blocks: u64, + ) -> Result { + let header = NexusLabel::read_header(buf)?; + NexusLabel::validate_secondary_header(&header, block_size, num_blocks)?; + Ok(header) } - /// construct partition table from raw data + /// construct and validate partition table fn read_partitions( buf: &DmaBuf, - header: &GPTHeader, - ) -> Result, LabelError> { + header: &GptHeader, + ) -> Result, ProbeError> { let partitions = GptEntry::from_slice(buf.as_slice(), header.num_entries)?; - if GptEntry::checksum(&partitions) != header.table_crc { - return Err(LabelError::PartitionTableChecksum {}); - } + NexusLabel::validate_partitions(&partitions, header)?; Ok(partitions) } - /// check that primary and secondary GPTHeaders - /// are consistent with each other - fn check_consistency( - primary: &GPTHeader, - secondary: &GPTHeader, - ) -> Result<(), LabelError> { - if primary.guid != secondary.guid { - return Err(LabelError::CompareDiskGuid {}); + /// check that primary GPT header is valid and consistent + fn validate_primary_header( + primary: &GptHeader, + block_size: u64, + num_blocks: u64, + ) -> Result<(), ProbeError> { + if primary.lba_self != 1 { + return Err(ProbeError::PrimaryLocation {}); + } + if primary.lba_alt + 1 != num_blocks { + return Err(ProbeError::SecondaryLocation {}); + } + if primary.lba_end >= primary.lba_alt { + return Err(ProbeError::LastUsableBlock {}); + } + if primary.lba_table != primary.lba_self + 1 { + return Err(ProbeError::PartitionTableLocation {}); + } + if (primary.num_entries * primary.entry_size) as u64 + > GptHeader::PARTITION_TABLE_SIZE + { + return Err(ProbeError::PartitionTableSize {}); + } + if primary.lba_table + + Aligned::get_blocks(GptHeader::PARTITION_TABLE_SIZE, block_size) + > primary.lba_start + { + return Err(ProbeError::PartitionTableSpace {}); + } + Ok(()) + } + + /// check that secondary GPT header is valid and consistent + fn validate_secondary_header( + secondary: &GptHeader, + block_size: u64, + num_blocks: u64, + ) -> Result<(), ProbeError> { + if secondary.lba_alt != 1 { + return Err(ProbeError::PrimaryLocation {}); + } + if secondary.lba_self + 1 != num_blocks { + return Err(ProbeError::SecondaryLocation {}); + } + if secondary.lba_alt >= secondary.lba_start { + return Err(ProbeError::FirstUsableBlock {}); } - if primary.lba_start != secondary.lba_start - || primary.lba_end != secondary.lba_end + if secondary.lba_table != secondary.lba_end + 1 { + return Err(ProbeError::PartitionTableLocation {}); + } + if (secondary.num_entries * secondary.entry_size) as u64 + > GptHeader::PARTITION_TABLE_SIZE { - return Err(LabelError::CompareDiskSize {}); + return Err(ProbeError::PartitionTableSize {}); } - if primary.lba_alt != secondary.lba_self - || secondary.lba_alt != primary.lba_self + if secondary.lba_table + + Aligned::get_blocks(GptHeader::PARTITION_TABLE_SIZE, block_size) + > secondary.lba_self { - return Err(LabelError::BackupLocation {}); + return Err(ProbeError::PartitionTableSpace {}); + } + Ok(()) + } + + /// check that partition table entries are valid and consistent + fn validate_partitions( + partitions: &[GptEntry], + header: &GptHeader, + ) -> Result<(), ProbeError> { + for entry in partitions { + if 0 < entry.ent_start && entry.ent_start < header.lba_start { + return Err(ProbeError::PartitionStart {}); + } + if entry.ent_start > entry.ent_end { + return Err(ProbeError::NegativePartitionSize {}); + } + if entry.ent_end > header.lba_end { + return Err(ProbeError::PartitionEnd {}); + } + } + if GptEntry::checksum(partitions, header.num_entries) + != header.table_crc + { + return Err(ProbeError::PartitionTableChecksum {}); + } + Ok(()) + } + + /// check that primary and secondary GPT headers + /// are consistent with each other + fn consistency_check( + primary: &GptHeader, + secondary: &GptHeader, + ) -> Result<(), ProbeError> { + if primary.lba_self != secondary.lba_alt { + return Err(ProbeError::CompareHeaderLocation {}); + } + if primary.lba_alt != secondary.lba_self { + return Err(ProbeError::CompareHeaderLocation {}); + } + if primary.lba_start != secondary.lba_start { + return Err(ProbeError::FirstUsableBlock {}); + } + if primary.lba_end != secondary.lba_end { + return Err(ProbeError::LastUsableBlock {}); + } + if primary.guid != secondary.guid { + return Err(ProbeError::CompareDiskGuid {}); + } + if primary.num_entries != secondary.num_entries { + return Err(ProbeError::ComparePartitionEntryCount {}); + } + if primary.entry_size != secondary.entry_size { + return Err(ProbeError::ComparePartitionEntrySize {}); } if primary.table_crc != secondary.table_crc { - return Err(LabelError::ComparePartitionTableChecksum {}); + return Err(ProbeError::ComparePartitionTableChecksum {}); } Ok(()) } @@ -920,235 +1027,568 @@ impl NexusLabel { impl NexusChild { /// read and validate this child's label pub async fn probe_label(&self) -> Result { - let hndl = self.handle().context(ReadError { + let handle = self.handle().context(HandleError { name: self.name.clone(), })?; - let bdev = hndl.get_bdev(); - let block_size = hndl.get_bdev().block_len() as u64; + + let bdev = handle.get_bdev(); + let block_size = u64::from(bdev.block_len()); + let num_blocks = bdev.num_blocks(); // Protective MBR - let mut buf = hndl.dma_malloc(block_size).context(ReadAlloc { + let mut buf = handle.dma_malloc(block_size).context(ReadAlloc { name: String::from("header"), })?; - hndl.read_at(0, &mut buf).await.context(ReadError { + handle.read_at(0, &mut buf).await.context(ReadError { name: String::from("MBR"), })?; - let mbr = NexusLabel::read_mbr(&buf)?; + let mbr = NexusLabel::read_mbr(&buf).context(InvalidLabel {})?; - let status: NexusLabelStatus; - let primary: GPTHeader; - let secondary: GPTHeader; - let active: &GPTHeader; + // GPT headers - // - // GPT header(s) + let status: NexusLabelStatus; + let primary: GptHeader; + let secondary: GptHeader; + let active: &GptHeader; - // Get primary. - hndl.read_at(block_size, &mut buf) + // Get primary GPT header. + handle + .read_at(block_size, &mut buf) .await .context(ReadError { name: String::from("primary GPT header"), })?; - match NexusLabel::read_primary_header(&buf) { + match NexusLabel::read_primary_header(&buf, block_size, num_blocks) { Ok(header) => { primary = header; active = &primary; - // Get secondary. - let offset = (bdev.num_blocks() - 1) * block_size; - hndl.read_at(offset, &mut buf).await.context(ReadError { + // Get secondary GPT header. + let offset = (num_blocks - 1) * block_size; + handle.read_at(offset, &mut buf).await.context(ReadError { name: String::from("secondary GPT header"), })?; - match NexusLabel::read_secondary_header(&buf) { + match NexusLabel::read_secondary_header( + &buf, block_size, num_blocks, + ) { Ok(header) => { - // Both primary and secondary GPT headers are valid. - // Check if they are consistent with each other. - match NexusLabel::check_consistency(&primary, &header) { - Ok(()) => { - // All good. - secondary = header; - status = NexusLabelStatus::Both; - } - Err(error) => { - warn!("{}: {}: The primary and secondary GPT headers are inconsistent: {}", self.parent, self.name, error); - warn!("{}: {}: Recreating secondary GPT header from primary!", self.parent, self.name); - secondary = primary.to_backup(); - status = NexusLabelStatus::Primary; - } - } + NexusLabel::consistency_check(&primary, &header) + .context(InvalidLabel {})?; + // All good - primary and secondary GTP headers + // are valid and consistent with each other. + secondary = header; + status = NexusLabelStatus::Both; } - Err(error) => { - warn!( - "{}: {}: The secondary GPT header is invalid: {}", - self.parent, self.name, error - ); - warn!("{}: {}: Recreating secondary GPT header from primary!", self.parent, self.name); + Err(_) => { + // Secondary GPT header is either not present + // or invalid. Construct new secondary + // GPT header from primary. secondary = primary.to_backup(); status = NexusLabelStatus::Primary; } } } Err(error) => { - warn!( - "{}: {}: The primary GPT header is invalid: {}", - self.parent, self.name, error - ); - // Get secondary and see if we are able to proceed. - let offset = (bdev.num_blocks() - 1) * block_size; - hndl.read_at(offset, &mut buf).await.context(ReadError { + // Primary GPT header is either not present or invalid. + // See if we can obtain a valid secondary GPT header. + let offset = (num_blocks - 1) * block_size; + handle.read_at(offset, &mut buf).await.context(ReadError { name: String::from("secondary GPT header"), })?; - match NexusLabel::read_secondary_header(&buf) { + match NexusLabel::read_secondary_header( + &buf, block_size, num_blocks, + ) { Ok(header) => { secondary = header; active = &secondary; - warn!("{}: {}: Recreating primary GPT header from secondary!", self.parent, self.name); + // Construct new primary GPT header from secondary. primary = secondary.to_primary(); status = NexusLabelStatus::Secondary; } - Err(error) => { - warn!( - "{}: {}: The secondary GPT header is invalid: {}", - self.parent, self.name, error - ); - warn!("{}: {}: Both primary and secondary GPT headers are invalid!", self.parent, self.name); - return Err(LabelError::LabelInvalid {}); + Err(_) => { + // Neither primary or secondary GPT header + // is present or valid. + return Err(LabelError::InvalidLabel { + source: error, + }); } } } } + // The disk size recorded in protective MBR + // must be consistent with GPT header. if mbr.entries[0].num_sectors != 0xffff_ffff - && mbr.entries[0].num_sectors as u64 != primary.lba_alt + && u64::from(mbr.entries[0].num_sectors) != primary.lba_alt { - warn!("{}: {}: The protective MBR disk size does not match the GPT disk size!", self.parent, self.name); - return Err(LabelError::LabelInvalid {}); + return Err(LabelError::InvalidLabel { + source: ProbeError::MbrSize {}, + }); } - // // Partition table let blocks = Aligned::get_blocks( - (active.entry_size * active.num_entries) as u64, + u64::from(active.entry_size * active.num_entries), block_size, ); let mut buf = - hndl.dma_malloc(blocks * block_size).context(ReadAlloc { + handle.dma_malloc(blocks * block_size).context(ReadAlloc { name: String::from("partition table"), })?; let offset = active.lba_table * block_size; - hndl.read_at(offset, &mut buf).await.context(ReadError { + handle.read_at(offset, &mut buf).await.context(ReadError { name: String::from("partition table"), })?; - let mut partitions = NexusLabel::read_partitions(&buf, active)?; + let mut partitions = NexusLabel::read_partitions(&buf, active) + .context(InvalidLabel {})?; - // Some tools always write 128 partition entries, even though most - // are not used. In any case we are only ever interested - // in the first two partitions, so we drain the others. - let entries = partitions.drain(.. 2).collect::>(); + // There can be up to 128 partition entries stored on disk, + // even though most are not used. Retain only those entries + // that actually define partitions. + partitions.retain(|entry| entry.ent_start > 0 && entry.ent_end > 0); Ok(NexusLabel { status, mbr, primary, - partitions: entries, + partitions, secondary, }) } - /// return this child and its label - pub async fn get_label(&self) -> NexusChildLabel<'_> { - let label = match self.probe_label().await { - Ok(label) => Some(label), - Err(error) => { - warn!( - "{}: {}: Error probing label: {}", - self.parent, self.name, error - ); - None + // Check for the presence of "MayaMeta" and "MayaData" partitions + fn check_maya_partitions( + reference: &[GptEntry], + label: &NexusLabel, + block_size: u32, + ) -> bool { + match label.get_partition("MayaMeta") { + Some(entry) => { + if entry.ent_start != reference[0].ent_start { + return false; + } + if entry.ent_end != reference[0].ent_end { + return false; + } + if (entry.ent_end - entry.ent_start + 1) * u64::from(block_size) + < Nexus::METADATA_PARTITION_SIZE + { + return false; + } } - }; + None => { + return false; + } + } - NexusChildLabel { - child: self, - label, + if let Some(entry) = label.get_partition("MayaData") { + if entry.ent_start == reference[1].ent_start { + return true; + } } + + false } - /// write the contents of the buffer to this child - async fn write_at( + /// Create a new label on this child + async fn create_label( + &mut self, + config: &LabelConfig, + block_size: u32, + data_blocks: u64, + total_blocks: u64, + ) -> Result { + info!("creating new label for child {}", self.name); + let label = Nexus::generate_label( + config, + block_size, + data_blocks, + total_blocks, + )?; + self.write_label(&label).await?; + Ok(label) + } + + /// Create or Update label on this child as and when necessary + async fn update_label( + &mut self, + reference: &[GptEntry], + config: &LabelConfig, + block_size: u32, + data_blocks: u64, + total_blocks: u64, + ) -> Result { + match self.probe_label().await { + Ok(mut label) + if NexusChild::check_maya_partitions( + reference, &label, block_size, + ) => + { + // Use existing label + if label.primary.guid != config.disk_guid { + info!("updating existing label for child {}: setting guid to {}", self.name, config.disk_guid); + label.set_guid(config.disk_guid); + } + self.write_label(&label).await?; + Ok(label) + } + Ok(_) => { + // Replace existing label + self.create_label(config, block_size, data_blocks, total_blocks) + .await + } + Err(LabelError::InvalidLabel { + .. + }) => { + // Create new label + self.create_label(config, block_size, data_blocks, total_blocks) + .await + } + Err(error) => Err(error), + } + } + + /// Validate label on this child + async fn validate_label( &self, - offset: u64, - buf: &DmaBuf, - ) -> Result { - let hdl = self.handle().context(WriteError { - name: self.name.clone(), - })?; - Ok(hdl.write_at(offset, buf).await.context(WriteError { - name: self.name.clone(), - })?) + reference: &[GptEntry], + block_size: u32, + ) -> Result { + let label = self.probe_label().await?; + + if !NexusChild::check_maya_partitions(reference, &label, block_size) { + return Err(LabelError::InvalidLabel { + source: ProbeError::IncorrectPartitions {}, + }); + } + + if label.status != NexusLabelStatus::Both { + return Err(LabelError::InvalidLabel { + source: ProbeError::LabelRedundancy {}, + }); + } + + Ok(label) } } -pub struct NexusChildLabel<'a> { - pub child: &'a NexusChild, - pub label: Option, -} +impl Nexus { + /// Validate label on each child device + pub(crate) async fn validate_child_labels( + &mut self, + ) -> Result<(), LabelError> { + let guid = GptGuid::from(Uuid::from_bytes(self.bdev.uuid().as_bytes())); + let config = LabelConfig::new(guid); + + let block_size = self.bdev.block_len(); + let nexus_blocks = self.size / u64::from(block_size); + let mut min_blocks = nexus_blocks; + + // Generate "reference" partition table entries + let header = GptHeader::reference(block_size, nexus_blocks, guid); + let reference = Nexus::create_maya_partitions( + &config, + &header, + block_size, + nexus_blocks, + )?; + let data_offset = reference[1].ent_start; + + for child in self.children.iter_mut() { + let handle = child.handle().context(HandleError { + name: child.name.clone(), + })?; -impl NexusChildLabel<'_> { - /// Return the current status of this NexusChildLabel. - pub fn get_label_status(&self) -> NexusLabelStatus { - match &self.label { - Some(label) => label.status, - None => NexusLabelStatus::Neither, + let bdev = handle.get_bdev(); + let label = + child.validate_label(&reference, bdev.block_len()).await?; + let data_blocks = + label.data_block_count().context(InvalidLabel {})?; + + // Adjust size of data partition if necessary + if data_blocks < min_blocks { + min_blocks = data_blocks; + } } + + // Update the nexus size + self.data_ent_offset = data_offset; + self.bdev.set_block_count(min_blocks); + + Ok(()) } - /// Search for the first "valid" NexusLabel. - /// Prefer a target label where the primary and secondary GPT headers - /// are both valid (on disk), but a target with at least one valid - /// GPT header (on disk) is considered acceptable. - pub fn find_target_label( - list: &[NexusChildLabel<'_>], - ) -> Option { - for status in &[ - NexusLabelStatus::Both, - NexusLabelStatus::Primary, - NexusLabelStatus::Secondary, - ] { - for label in list { - if let Some(target) = &label.label { - if target.status == *status { - return Some(target.clone()); + // Get configuration from first valid label with specified disk guid + async fn find_label_config( + &self, + guid: GptGuid, + ) -> Result, LabelError> { + for child in self.children.iter() { + match child.probe_label().await { + Ok(label) => { + if label.primary.guid != guid { + continue; } + if let Some(config) = label.get_label_config() { + return Ok(Some(config)); + } + } + Err(LabelError::InvalidLabel { + .. + }) => { + // Label is most likely not present or possibly invalid. + continue; + } + Err(error) => { + // Any other errors are fatal. + return Err(error); } } } - None + Ok(None) } - /// Compare all (existing) labels in list against the target NexusLabel. - /// Return true (only) if all are identical. - pub fn compare_labels( - target: &NexusLabel, - list: &[NexusChildLabel<'_>], - ) -> bool { - for label in list { - if let Some(entry) = &label.label { - if entry.mbr != target.mbr - || entry.primary != target.primary - || entry.secondary != target.secondary - || entry.partitions != target.partitions - { - return false; - } + /// Create or Update label on each child device as and when necessary + pub(crate) async fn update_child_labels( + &mut self, + ) -> Result<(), LabelError> { + let guid = GptGuid::from(Uuid::from_bytes(self.bdev.uuid().as_bytes())); + let config = self + .find_label_config(guid) + .await? + .unwrap_or_else(|| LabelConfig::new(guid)); + + let block_size = self.bdev.block_len(); + let nexus_blocks = self.size / u64::from(block_size); + + // Generate "reference" partition table entries + let header = GptHeader::reference(block_size, nexus_blocks, guid); + let reference = Nexus::create_maya_partitions( + &config, + &header, + block_size, + nexus_blocks, + )?; + + for child in self.children.iter_mut() { + let handle = child.handle().context(HandleError { + name: child.name.clone(), + })?; + + let bdev = handle.get_bdev(); + child + .update_label( + &reference, + &config, + bdev.block_len(), + nexus_blocks, + bdev.num_blocks(), + ) + .await?; + } + + Ok(()) + } + + /// Create a new label on each child device. + /// DO NOT check for existing labels and ALWAYS write a new label. + pub(crate) async fn create_child_labels( + &mut self, + ) -> Result<(), LabelError> { + let guid = GptGuid::from(Uuid::from_bytes(self.bdev.uuid().as_bytes())); + let config = LabelConfig::new(guid); + + let block_size = self.bdev.block_len(); + let nexus_blocks = self.size / u64::from(block_size); + let mut min_blocks = nexus_blocks; + + // Generate "reference" partition table entries + let header = GptHeader::reference(block_size, nexus_blocks, guid); + let reference = Nexus::create_maya_partitions( + &config, + &header, + block_size, + nexus_blocks, + )?; + let data_offset = reference[1].ent_start; + + for child in self.children.iter_mut() { + let handle = child.handle().context(HandleError { + name: child.name.clone(), + })?; + + let bdev = handle.get_bdev(); + let label = child + .create_label( + &config, + bdev.block_len(), + nexus_blocks, + bdev.num_blocks(), + ) + .await?; + let data_blocks = + label.data_block_count().context(InvalidLabel {})?; + + // Adjust size of data partition if necessary + if data_blocks < min_blocks { + min_blocks = data_blocks; } } - true + + // Update the nexus size + self.data_ent_offset = data_offset; + self.bdev.set_block_count(min_blocks); + + Ok(()) + } +} + +struct LabelData { + offset: u64, + buf: DmaBuf, +} + +impl NexusChild { + /// generate raw data for (primary) label ready to be written to disk + fn get_primary_data( + &self, + label: &NexusLabel, + ) -> Result { + let handle = self.handle().context(HandleError { + name: self.name.clone(), + })?; + + let bdev = handle.get_bdev(); + let block_size = u64::from(bdev.block_len()); + + let mut buf = + DmaBuf::new(label.primary.lba_start * block_size, bdev.alignment()) + .context(WriteAlloc { + name: String::from("primary"), + })?; + + let mut writer = Cursor::new(buf.as_mut_slice()); + + // Protective MBR + writer.seek(SeekFrom::Start(440)).unwrap(); + serialize_into(&mut writer, &label.mbr).context(SerializeError {})?; + + // Primary GPT header + writer + .seek(SeekFrom::Start(label.primary.lba_self * block_size)) + .unwrap(); + serialize_into(&mut writer, &label.primary) + .context(SerializeError {})?; + + // Primary partition table + writer + .seek(SeekFrom::Start(label.primary.lba_table * block_size)) + .unwrap(); + for entry in label.partitions.iter() { + serialize_into(&mut writer, &entry).context(SerializeError {})?; + } + + Ok(LabelData { + offset: 0, + buf, + }) + } + + /// generate raw data for (secondary) label ready to be written to disk + fn get_secondary_data( + &self, + label: &NexusLabel, + ) -> Result { + let handle = self.handle().context(HandleError { + name: self.name.clone(), + })?; + + let bdev = handle.get_bdev(); + let block_size = u64::from(bdev.block_len()); + + let mut buf = DmaBuf::new( + (label.secondary.lba_self - label.secondary.lba_table + 1) + * block_size, + bdev.alignment(), + ) + .context(WriteAlloc { + name: String::from("secondary"), + })?; + + let mut writer = Cursor::new(buf.as_mut_slice()); + + // Secondary partition table + for entry in label.partitions.iter() { + serialize_into(&mut writer, &entry).context(SerializeError {})?; + } + + // Secondary GPT header + writer + .seek(SeekFrom::Start( + (label.secondary.lba_self - label.secondary.lba_table) + * block_size, + )) + .unwrap(); + serialize_into(&mut writer, &label.secondary) + .context(SerializeError {})?; + + Ok(LabelData { + offset: label.secondary.lba_table * block_size, + buf, + }) + } + + /// write the contents of the buffer to this child + async fn write_at( + &self, + offset: u64, + buf: &DmaBuf, + ) -> Result { + let handle = self.handle().context(HandleError { + name: self.name.clone(), + })?; + + Ok(handle.write_at(offset, buf).await.context(WriteError { + name: self.name.clone(), + })?) + } + + pub async fn write_label( + &self, + label: &NexusLabel, + ) -> Result<(), LabelError> { + match label.status { + NexusLabelStatus::Both => { + // Nothing to do as both labels on disk are valid. + } + NexusLabelStatus::Primary => { + // Only write out secondary as disk already has valid primary. + info!("writing secondary label to child {}", self.name); + let secondary = self.get_secondary_data(label)?; + self.write_at(secondary.offset, &secondary.buf).await?; + } + NexusLabelStatus::Secondary => { + // Only write out primary as disk already has valid secondary. + info!("writing primary label to child {}", self.name); + let primary = self.get_primary_data(label)?; + self.write_at(primary.offset, &primary.buf).await?; + } + NexusLabelStatus::Neither => { + // Write out both labels. + info!("writing label to child {}", self.name); + let primary = self.get_primary_data(label)?; + let secondary = self.get_secondary_data(label)?; + self.write_at(primary.offset, &primary.buf).await?; + self.write_at(secondary.offset, &secondary.buf).await?; + } + } + + Ok(()) } } pub trait Aligned { - /// Return the (appropriately aligned) number of blocks representing this - /// size. + /// Return the (appropriately aligned) number of blocks + /// representing this size. fn get_blocks(size: Self, block_size: Self) -> Self; } diff --git a/mayastor/tests/nexus_label.rs b/mayastor/tests/nexus_label.rs index efb698484..5909d452a 100644 --- a/mayastor/tests/nexus_label.rs +++ b/mayastor/tests/nexus_label.rs @@ -6,7 +6,7 @@ use std::{ use bincode::serialize_into; use mayastor::{ - bdev::{nexus_create, nexus_lookup, GPTHeader, GptEntry}, + bdev::{nexus_create, nexus_lookup, GptEntry, GptHeader}, core::{ mayastor_env_stop, DmaBuf, @@ -74,7 +74,7 @@ fn test_known_label() { let mut hdr_buf: [u8; 512] = [0; 512]; file.read_exact(&mut hdr_buf).unwrap(); - let mut hdr: GPTHeader = GPTHeader::from_slice(&hdr_buf).unwrap(); + let mut hdr: GptHeader = GptHeader::from_slice(&hdr_buf).unwrap(); assert_eq!(hdr.self_checksum, CRC32); assert_eq!(hdr.guid.to_string(), HDR_GUID,); @@ -91,7 +91,7 @@ fn test_known_label() { assert_eq!(hdr.checksum(), CRC32); - let array_checksum = GptEntry::checksum(&partitions); + let array_checksum = GptEntry::checksum(&partitions, hdr.num_entries); assert_eq!(array_checksum, hdr.table_crc); @@ -105,9 +105,9 @@ fn test_known_label() { } let partitions = - GptEntry::from_slice(&buf.as_slice(), hdr.num_entries).unwrap(); + GptEntry::from_slice(buf.as_slice(), hdr.num_entries).unwrap(); - let array_checksum = GptEntry::checksum(&partitions); + let array_checksum = GptEntry::checksum(&partitions, hdr.num_entries); assert_eq!(array_checksum, hdr.table_crc); } diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index bd930060e..e0b52ce5d 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -1,4 +1,4 @@ -use std::sync::Mutex; +use std::{sync::Mutex, time::Duration}; use crossbeam::channel::unbounded; use once_cell::sync::Lazy; @@ -7,11 +7,12 @@ use tracing::error; use mayastor::{ bdev::nexus_lookup, core::{MayastorCliArgs, MayastorEnvironment, Mthread, Reactor}, - rebuild::RebuildJob, + rebuild::{RebuildJob, RebuildState}, }; use rpc::mayastor::ShareProtocolNexus; pub mod common; +use common::wait_for_rebuild; // each test `should` use a different nexus name to prevent clashing with // one another. This allows the failed tests to `panic gracefully` improving @@ -165,6 +166,16 @@ fn rebuild_lookup() { .count(), 1 ); + + // wait for the rebuild to start - and then pause it + wait_for_rebuild( + get_dev(children), + RebuildState::Running, + Duration::from_secs(1), + ); + nexus.pause_rebuild(&get_dev(children)).await.unwrap(); + assert_eq!(RebuildJob::lookup_src(&src).len(), 1); + nexus.add_child(&get_dev(children + 1), true).await.unwrap(); let _ = nexus.start_rebuild(&get_dev(children + 1)).await.unwrap(); assert_eq!(RebuildJob::lookup_src(&src).len(), 2); From da84ccb23acaa52c9d5ccdf24abff0ac0ce02a41 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Fri, 19 Feb 2021 10:56:05 +0000 Subject: [PATCH 23/78] chore: upgrade bindgen to 0.57 Compared to 0.54, this brings fixes to bitfield layouts, enums and cross-compilation. Also do a cargo update and fix the use of serde::export, which was a bug in serde and meant for its internal use. --- Cargo.lock | 973 +++++++++--------- mayastor/src/bdev/nexus/nexus_child.rs | 8 +- .../src/bdev/nexus/nexus_child_error_store.rs | 4 +- mayastor/src/core/channel.rs | 4 +- mayastor/src/core/descriptor.rs | 7 +- mayastor/src/core/handle.rs | 3 +- mayastor/src/core/reactor.rs | 3 +- mayastor/src/subsys/nvmf/subsystem.rs | 4 +- mayastor/src/subsys/nvmf/transport.rs | 3 +- nix/pkgs/mayastor/default.nix | 2 +- spdk-sys/Cargo.toml | 2 +- 11 files changed, 520 insertions(+), 493 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2e2772d3..7b727328b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "Inflector" version = "0.11.4" @@ -85,7 +87,7 @@ dependencies = [ "log", "mime", "percent-encoding 2.1.0", - "pin-project 1.0.2", + "pin-project 1.0.5", "rand 0.7.3", "regex", "serde", @@ -93,17 +95,17 @@ dependencies = [ "serde_urlencoded 0.7.0", "sha-1", "slab", - "time 0.2.23", + "time 0.2.25", ] [[package]] name = "actix-macros" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a60f9ba7c4e6df97f3aacb14bb5c0cd7d98a49dcbaed0d7f292912ad9a6a3ed2" +checksum = "b4ca8ce00b267af8ccebbd647de0d61e0674b6e61185cc7a592ff88772bed655" dependencies = [ - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -111,15 +113,15 @@ name = "actix-openapi-macros" version = "0.1.0" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "actix-router" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd1f7dbda1645bf7da33554db60891755f6c01c1b2169e2f4c492098d30c235" +checksum = "2ad299af73649e1fc893e333ccf86f377751eb95ff875d095131574c6f43452c" dependencies = [ "bytestring", "http 0.2.3", @@ -240,9 +242,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "3.3.0" +version = "3.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a89a7b133e734f6d1e555502d450408ae04105826aef7e3605019747d3ac732" +checksum = "e641d4a172e7faa0862241a20ff4f1f5ab0ab7c279f00c2d4587b77483477b86" dependencies = [ "actix-codec", "actix-http", @@ -266,14 +268,14 @@ dependencies = [ "fxhash", "log", "mime", - "pin-project 1.0.2", + "pin-project 1.0.5", "regex", "rustls", "serde", "serde_json", "serde_urlencoded 0.7.0", "socket2", - "time 0.2.23", + "time 0.2.25", "tinyvec", "url", ] @@ -285,8 +287,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad26f77093333e0e7c6ffe54ebe3582d908a104e448723eec6d43d08b07143fb" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -305,9 +307,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -327,7 +329,7 @@ dependencies = [ "dyn-clonable", "futures", "http 0.2.3", - "humantime 2.0.1", + "humantime 2.1.0", "lazy_static", "mbus_api", "nats", @@ -384,9 +386,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.34" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" [[package]] name = "array_tool" @@ -408,15 +410,15 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assert_matches" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-channel" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" dependencies = [ "concurrent-queue", "event-listener", @@ -425,15 +427,15 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1ff21a63d3262af46b9f33a826a8d134e2d0d9b2179c86034948b732ea8b2a" +checksum = "b72c1f1154e234325b50864a349b9c8e56939e266a4c307c0f159812df2f9537" dependencies = [ "bytes 0.5.6", "flate2", "futures-core", "memchr", - "pin-project-lite 0.1.11", + "pin-project-lite 0.2.4", ] [[package]] @@ -463,9 +465,9 @@ dependencies = [ [[package]] name = "async-io" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a0b2bb8ae20fede194e779150fe283f65a4a08461b496de546ec366b174ad9" +checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" dependencies = [ "concurrent-queue", "fastrand", @@ -504,17 +506,17 @@ dependencies = [ [[package]] name = "async-process" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" +checksum = "ef37b86e2fa961bae5a4d212708ea0154f904ce31d1a4a7f47e1bbc33a0c040b" dependencies = [ "async-io", "blocking", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "event-listener", "futures-lite", "once_cell", - "signal-hook", + "signal-hook 0.3.4", "winapi 0.3.9", ] @@ -546,8 +548,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -563,8 +565,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -598,9 +600,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "awc" -version = "2.0.2" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9056f5e27b0d56bedd82f78eceaba0bcddcbbcbbefae3cd0a53994b28c96ff5" +checksum = "b381e490e7b0cfc37ebc54079b0413d8093ef43d14a4e4747083f7fa47a9e691" dependencies = [ "actix-codec", "actix-http", @@ -623,9 +625,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -683,22 +685,21 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.54.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" +checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" dependencies = [ "bitflags", "cexpr", - "cfg-if 0.1.10", "clang-sys", "clap", - "env_logger", + "env_logger 0.8.3", "lazy_static", "lazycell", "log", "peeking_take_while", "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "regex", "rustc-hash", "shlex", @@ -838,9 +839,9 @@ checksum = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" [[package]] name = "byte-unit" @@ -850,9 +851,9 @@ checksum = "415301c9de11005d4b92193c0eb7ac7adc37e5a49e0ac9bed0a42343512744b8" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -884,11 +885,11 @@ checksum = "81a18687293a1546b67c246452202bbbf143d239cb43494cc163da14979082da" [[package]] name = "bytestring" -version = "0.1.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7c05fa5172da78a62d9949d662d2ac89d4cc7355d7b49adee5163f1fb3f363" +checksum = "90706ba19e97b90786e19dc0d5e2abd80008d99d4c0c5d1ad0b5e72cec7c494d" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", ] [[package]] @@ -899,9 +900,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cc" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15" +checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" [[package]] name = "cexpr" @@ -940,9 +941,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "0.29.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" +checksum = "5cb92721cb37482245ed88428f72253ce422b3b4ee169c70a0642521bb5db4cc" dependencies = [ "glob", "libc", @@ -973,15 +974,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "cloudabi" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" -dependencies = [ - "bitflags", -] - [[package]] name = "colored_json" version = "2.1.0" @@ -1020,21 +1012,11 @@ dependencies = [ "cache-padded", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const-random" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486d435a7351580347279f374cb8a3c16937485441db80181357b7c4d70f17ed" +checksum = "f590d95d011aa80b063ffe3253422ed5aa462af4e9867d43ce8337562bac77c4" dependencies = [ "const-random-macro", "proc-macro-hack", @@ -1042,11 +1024,11 @@ dependencies = [ [[package]] name = "const-random-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a84d8ff70e3ec52311109b019c27672b4c1929e4cf7c18bcf0cd9fb5e230be" +checksum = "615f6e27d000a2bffbc7f2f6a8669179378fa27ee4d0a509e985dfc0a7defb40" dependencies = [ - "getrandom 0.2.0", + "getrandom 0.2.2", "lazy_static", "proc-macro-hack", "tiny-keccak", @@ -1054,9 +1036,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -1071,7 +1053,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784ad0fbab4f3e9cef09f20e0aea6000ae08d2cb98ac4c0abc53df18803d702f" dependencies = [ "percent-encoding 2.1.0", - "time 0.2.23", + "time 0.2.25", "version_check", ] @@ -1242,7 +1224,7 @@ dependencies = [ "bytesize", "chrono", "clap", - "env_logger", + "env_logger 0.7.1", "failure", "futures", "git-version", @@ -1306,9 +1288,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.0.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" +checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" dependencies = [ "byteorder", "digest", @@ -1329,12 +1311,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.10.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" +checksum = "11947000d710ff98138229f633039982f0fef2d9a3f546c21d610fee5f8631d5" dependencies = [ - "darling_core 0.10.2", - "darling_macro 0.10.2", + "darling_core 0.12.0", + "darling_macro 0.12.0", ] [[package]] @@ -1353,16 +1335,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.10.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" +checksum = "ae53b4d9cc89c40314ccf2bf9e6ff1eb19c31e3434542445a41893dbf041aec2" dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.24", - "quote 1.0.8", - "strsim 0.9.3", - "syn 1.0.51", + "quote 1.0.9", + "strsim 0.10.0", + "syn 1.0.60", ] [[package]] @@ -1378,13 +1360,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.10.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" +checksum = "e9cd9ac4d50d023af5e710cae1501afb063efcd917bd3fc026e8ed6493cc9755" dependencies = [ - "darling_core 0.10.2", - "quote 1.0.8", - "syn 1.0.51", + "darling_core 0.12.0", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -1400,9 +1382,9 @@ dependencies = [ [[package]] name = "dashmap" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b937cd1fbd1f194ac842196bd2529f21618088ee6d8bff6a46ece611451c96b" +checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" dependencies = [ "cfg-if 1.0.0", "num_cpus", @@ -1410,9 +1392,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" [[package]] name = "deployer" @@ -1432,13 +1414,13 @@ dependencies = [ [[package]] name = "derivative" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -1473,8 +1455,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -1542,9 +1524,9 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "dyn-clonable" @@ -1563,15 +1545,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "dyn-clone" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55796afa1b20c2945ca8eabfc421839f2b766619209f1ede813cf2484f31804" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" [[package]] name = "ed25519" @@ -1604,9 +1586,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if 1.0.0", ] @@ -1619,8 +1601,8 @@ checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" dependencies = [ "heck", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -1647,6 +1629,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" +dependencies = [ + "atty", + "humantime 2.1.0", + "log", + "regex", + "termcolor", +] + [[package]] name = "err-derive" version = "0.2.4" @@ -1655,9 +1650,9 @@ checksum = "22deed3a8124cff5fa835713fa105621e43bbdc46690c3a6b68328a012d350d4" dependencies = [ "proc-macro-error", "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "rustversion", - "syn 1.0.51", + "syn 1.0.60", "synstructure", ] @@ -1705,8 +1700,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", "synstructure", ] @@ -1727,9 +1722,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129" +checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" dependencies = [ "cfg-if 1.0.0", "crc32fast", @@ -1760,9 +1755,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding 2.1.0", @@ -1802,9 +1797,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -1817,9 +1812,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", @@ -1827,15 +1822,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -1844,48 +1839,48 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-lite" -version = "1.11.2" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6c079abfac3ab269e2927ec048dabc89d009ebfdda6b8ee86624f30c689658" +checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" dependencies = [ "fastrand", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite 0.1.11", + "pin-project-lite 0.2.4", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ "once_cell", ] @@ -1898,9 +1893,9 @@ checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ "futures-channel", "futures-core", @@ -1909,7 +1904,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1931,19 +1926,6 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" -[[package]] -name = "generator" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" -dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", -] - [[package]] name = "generic-array" version = "0.14.4" @@ -1956,24 +1938,24 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -2000,8 +1982,8 @@ checksum = "34a97a52fdee1870a34fa6e4b77570cba531b27d1838874fef4429a791a3d657" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -2037,18 +2019,18 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -2114,9 +2096,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -2135,15 +2117,15 @@ dependencies = [ [[package]] name = "humantime" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.13.9" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ "bytes 0.5.6", "futures-channel", @@ -2155,7 +2137,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project 1.0.5", "socket2", "tokio", "tower-service", @@ -2216,9 +2198,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -2227,9 +2209,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg 1.0.1", "hashbrown", @@ -2246,9 +2228,9 @@ dependencies = [ [[package]] name = "integer-encoding" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4ebd0bd29be0f11973e9b3e219005661042a019fd757798c36a47c87852625" +checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" [[package]] name = "io-uring" @@ -2322,15 +2304,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.45" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -2343,12 +2325,12 @@ checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" [[package]] name = "jsonpath_lib" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8727f6987896c010ec9add275f59de2ae418b672fafa77bc3673b4cee1f09ca" +checksum = "61352ec23883402b7d30b3313c16cbabefb8907361c4eb669d990cbb87ceee5a" dependencies = [ "array_tool", - "env_logger", + "env_logger 0.7.1", "log", "serde", "serde_json", @@ -2422,7 +2404,7 @@ dependencies = [ "serde_yaml", "static_assertions", "thiserror", - "time 0.2.23", + "time 0.2.25", "tokio", "url", ] @@ -2435,9 +2417,9 @@ checksum = "cd71bf282e5551ac0852afcf25352b7fb8dd9a66eed7b6e66a6ebbf6b5b2f475" dependencies = [ "Inflector", "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "serde_json", - "syn 1.0.51", + "syn 1.0.60", ] [[package]] @@ -2478,17 +2460,17 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.80" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "libloading" -version = "0.5.2" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" +checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883" dependencies = [ - "cc", + "cfg-if 1.0.0", "winapi 0.3.9", ] @@ -2504,9 +2486,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -2520,24 +2502,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", -] - -[[package]] -name = "loom" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" -dependencies = [ - "cfg-if 0.1.10", - "generator", - "scoped-tls", - "serde", - "serde_json", + "cfg-if 1.0.0", ] [[package]] @@ -2608,7 +2577,7 @@ dependencies = [ "crossbeam", "crossbeam-sync", "dns-lookup", - "env_logger", + "env_logger 0.7.1", "futures", "futures-timer", "git-version", @@ -2634,7 +2603,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "signal-hook", + "signal-hook 0.1.17", "smol", "snafu", "spdk-sys", @@ -2666,7 +2635,7 @@ dependencies = [ "async-trait", "composer", "dyn-clonable", - "env_logger", + "env_logger 0.7.1", "log", "nats", "once_cell", @@ -2730,9 +2699,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ "cfg-if 0.1.10", "fuchsia-zircon", @@ -2741,7 +2710,7 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow 0.2.1", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", @@ -2772,9 +2741,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -2800,9 +2769,9 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "native-tls" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -2844,19 +2813,19 @@ dependencies = [ [[package]] name = "nb-connect" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" +checksum = "670361df1bc2399ee1ff50406a0d422587dd3bb0da596e1978fe8e05dabddf4f" dependencies = [ "libc", - "winapi 0.3.9", + "socket2", ] [[package]] name = "net2" -version = "0.2.35" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -2981,9 +2950,9 @@ dependencies = [ [[package]] name = "object" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" [[package]] name = "once_cell" @@ -2999,12 +2968,12 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -3019,9 +2988,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg 1.0.1", "cc", @@ -3037,7 +3006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3434e2a9d2aec539d91f4251bf9047cd53b4d3f386f9d336f4c8076c72a5256" dependencies = [ "async-trait", - "dashmap 4.0.1", + "dashmap 4.0.2", "fnv", "futures", "js-sys", @@ -3080,7 +3049,7 @@ dependencies = [ "actix-web", "anyhow", "either", - "humantime 2.0.1", + "humantime 2.1.0", "k8s-openapi", "kube", "kube-derive", @@ -3111,9 +3080,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fe9037165d7023b1228bc4ae9a2fa1a2b0095eca6c2998c624723dfd01314a5" +checksum = "766f840da25490628d8e63e529cd21c014f6600c6b8517add12a6fa6167a6218" dependencies = [ "num-traits 0.2.14", ] @@ -3131,7 +3100,7 @@ dependencies = [ "paperclip-core", "paperclip-macros", "parking_lot", - "semver", + "semver 0.11.0", "serde", "serde_derive", "serde_json", @@ -3167,7 +3136,7 @@ dependencies = [ "once_cell", "paperclip-macros", "parking_lot", - "pin-project 1.0.2", + "pin-project 1.0.5", "regex", "serde", "serde_json", @@ -3187,10 +3156,10 @@ dependencies = [ "mime", "proc-macro-error", "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "strum", "strum_macros", - "syn 1.0.51", + "syn 1.0.60", ] [[package]] @@ -3212,15 +3181,14 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ - "cfg-if 0.1.10", - "cloudabi 0.1.0", + "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.5", "smallvec", "winapi 0.3.9", ] @@ -3247,9 +3215,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ "base64 0.13.0", "once_cell", @@ -3268,6 +3236,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pest" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] + [[package]] name = "petgraph" version = "0.5.1" @@ -3289,11 +3266,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal 1.0.5", ] [[package]] @@ -3303,19 +3280,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "pin-project-internal" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -3326,9 +3303,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -3369,8 +3346,8 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", "version_check", ] @@ -3381,7 +3358,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "version_check", ] @@ -3393,9 +3370,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -3462,8 +3439,8 @@ dependencies = [ "anyhow", "itertools 0.8.2", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -3499,9 +3476,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2 1.0.24", ] @@ -3531,7 +3508,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -3539,6 +3516,18 @@ dependencies = [ "rand_pcg 0.2.1", ] +[[package]] +name = "rand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", +] + [[package]] name = "rand_chacha" version = "0.1.1" @@ -3559,6 +3548,16 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.2", +] + [[package]] name = "rand_core" version = "0.3.1" @@ -3580,7 +3579,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +dependencies = [ + "getrandom 0.2.2", ] [[package]] @@ -3601,6 +3609,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.2", +] + [[package]] name = "rand_isaac" version = "0.1.1" @@ -3627,7 +3644,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi 0.0.3", + "cloudabi", "fuchsia-cprng", "libc", "rand_core 0.4.2", @@ -3678,22 +3695,31 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", - "redox_syscall", + "getrandom 0.1.16", + "redox_syscall 0.1.57", "rust-argon2", ] [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -3713,9 +3739,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -3728,9 +3754,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "async-compression", "base64 0.13.0", @@ -3750,7 +3776,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding 2.1.0", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.4", "serde", "serde_json", "serde_urlencoded 0.7.0", @@ -3759,7 +3785,6 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -3811,9 +3836,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.18" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70017ed5c555d79ee3538fc63ca09c70ad8f317dcadc1adc2c496b60c22bb24f" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -3878,7 +3903,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver", + "semver 0.9.0", ] [[package]] @@ -3928,12 +3953,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -4002,7 +4021,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.2", ] [[package]] @@ -4011,11 +4039,20 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "serde" -version = "1.0.117" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] @@ -4026,26 +4063,26 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" dependencies = [ - "ordered-float 2.0.0", + "ordered-float 2.1.1", "serde", ] [[package]] name = "serde_derive" -version = "1.0.117" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "serde_json" -version = "1.0.59" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" +checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" dependencies = [ "indexmap", "itoa", @@ -4079,9 +4116,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "1.6.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f6201e064705553ece353a736a64be975680bd244908cf63e8fa71e478a51a" +checksum = "b44be9227e214a0420707c9ca74c2d4991d9955bae9415a8f93f05cebf561be5" dependencies = [ "serde", "serde_with_macros", @@ -4089,21 +4126,21 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1197ff7de45494f290c1e3e1a6f80e108974681984c87a3e480991ef3d0f1950" +checksum = "e48b35457e9d855d3dc05ef32a73e0df1e2c0fd72c38796a4ee909160c8eeec2" dependencies = [ - "darling 0.10.2", + "darling 0.12.0", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" dependencies = [ "dtoa", "linked-hash-map", @@ -4113,9 +4150,9 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" +checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" dependencies = [ "block-buffer", "cfg-if 1.0.0", @@ -4132,9 +4169,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" dependencies = [ "block-buffer", "cfg-if 1.0.0", @@ -4145,12 +4182,11 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" dependencies = [ "lazy_static", - "loom", ] [[package]] @@ -4161,9 +4197,19 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook" -version = "0.1.16" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604508c1418b99dfe1925ca9224829bb2a8a9a04dda655cc01fcad46f4ab05ed" +checksum = "780f5e3fe0c66f67197236097d89de1e86216f1f6fdeaf47c442f854ab46c240" dependencies = [ "libc", "signal-hook-registry", @@ -4171,9 +4217,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -4184,7 +4230,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eaebd4be561a7d8148803baa108092f85090189c4b8c3ffb81602b15b5c1771" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "signature", "subtle-encoding", "zeroize", @@ -4192,9 +4238,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" [[package]] name = "slab" @@ -4204,15 +4250,15 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "smol" -version = "1.2.4" +version = "1.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf8ded16994c0ae59596c6e4733c76faeb0533c26fd5ca1b1bc89271a049a66" +checksum = "85cf3b5351f3e783c1d79ab5fc604eeed8b8ae9abd36b166e8b87a089efd85e4" dependencies = [ "async-channel", "async-executor", @@ -4228,9 +4274,9 @@ dependencies = [ [[package]] name = "snafu" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c4e6046e4691afe918fd1b603fd6e515bcda5388a1092a9edbada307d159f09" +checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" dependencies = [ "doc-comment", "futures-core", @@ -4240,24 +4286,23 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5" +checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -4277,9 +4322,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -4317,10 +4362,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "serde", "serde_derive", - "syn 1.0.51", + "syn 1.0.60", ] [[package]] @@ -4331,12 +4376,12 @@ checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" dependencies = [ "base-x", "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "serde", "serde_derive", "serde_json", "sha1", - "syn 1.0.51", + "syn 1.0.60", ] [[package]] @@ -4359,15 +4404,15 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "strsim" -version = "0.9.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "structopt" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126d630294ec449fae0b16f964e35bf3c74f940da9dca17ee9b905f7b3112eb8" +checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" dependencies = [ "clap", "lazy_static", @@ -4376,15 +4421,15 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e51c492f9e23a220534971ff5afc14037289de430e3c83f9daf6a1b6ae91e8" +checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" dependencies = [ "heck", "proc-macro-error", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -4401,15 +4446,15 @@ checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" dependencies = [ "heck", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "subtle" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "subtle-encoding" @@ -4444,12 +4489,12 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.51" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b4f34193997d92804d359ed09953e25d5138df6bcc055a71bf68ee89fdf9223" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "unicode-xid 0.2.1", ] @@ -4469,8 +4514,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", "unicode-xid 0.2.1", ] @@ -4491,14 +4536,14 @@ version = "0.1.0" [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.3", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] @@ -4523,31 +4568,31 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -4585,9 +4630,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -4616,9 +4661,9 @@ checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.24", - "quote 1.0.8", + "quote 1.0.9", "standback", - "syn 1.0.51", + "syn 1.0.60", ] [[package]] @@ -4642,9 +4687,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -4657,9 +4702,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ "bytes 0.5.6", "fnv", @@ -4686,8 +4731,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -4778,8 +4823,8 @@ checksum = "0436413ba71545bcc6c2b9a0f9d78d72deb0123c6a75ccdfe7c056f9930f5e52" dependencies = [ "proc-macro2 1.0.24", "prost-build", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -4849,9 +4894,9 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35d656f2638b288b33495d1053ea74c40dc05ec0b92084dd71ca5566c4ed1dc" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-limit" @@ -4932,9 +4977,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tower-timeout" @@ -4962,26 +5007,26 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "f77d3842f76ca899ff2dbcf231c5c65813dea431301d6eb686279c15c4464f12" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", ] [[package]] @@ -4995,11 +5040,11 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 0.4.27", + "pin-project 1.0.5", "tracing", ] @@ -5111,6 +5156,12 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +[[package]] +name = "ucd-trie" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" + [[package]] name = "udev" version = "0.4.0" @@ -5141,9 +5192,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] @@ -5186,9 +5237,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", "idna", @@ -5217,9 +5268,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "vec-arena" @@ -5275,11 +5326,11 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.68" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -5287,26 +5338,26 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.68" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.18" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -5314,62 +5365,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.68" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ - "quote 1.0.8", + "quote 1.0.9", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.68" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.68" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" - -[[package]] -name = "wasm-bindgen-test" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d1cdc8b98a557f24733d50a1199c4b0635e465eecba9c45b214544da197f64" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fb9c67be7439ee8ab1b7db502a49c05e51e2835b66796c705134d9b8e1a585" -dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.8", -] +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.45" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -5377,9 +5404,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ "ring", "untrusted", @@ -5492,18 +5519,18 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] [[package]] name = "zeroize" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" dependencies = [ "zeroize_derive", ] @@ -5515,7 +5542,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" dependencies = [ "proc-macro2 1.0.24", - "quote 1.0.8", - "syn 1.0.51", + "quote 1.0.9", + "syn 1.0.60", "synstructure", ] diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index e29c5c4d2..2506205be 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -1,7 +1,11 @@ -use std::{convert::TryFrom, fmt::Display, sync::Arc}; +use std::{ + convert::TryFrom, + fmt::{Display, Formatter}, + sync::Arc, +}; use nix::errno::Errno; -use serde::{export::Formatter, Serialize}; +use serde::Serialize; use snafu::{ResultExt, Snafu}; use crate::{ diff --git a/mayastor/src/bdev/nexus/nexus_child_error_store.rs b/mayastor/src/bdev/nexus/nexus_child_error_store.rs index 539fdd6dc..93086759f 100644 --- a/mayastor/src/bdev/nexus/nexus_child_error_store.rs +++ b/mayastor/src/bdev/nexus/nexus_child_error_store.rs @@ -1,10 +1,8 @@ use std::{ - fmt::{Debug, Display}, + fmt::{Debug, Display, Error, Formatter}, time::{Duration, Instant}, }; -use serde::export::{fmt::Error, Formatter}; - use spdk_sys::spdk_bdev; use crate::{ diff --git a/mayastor/src/core/channel.rs b/mayastor/src/core/channel.rs index 8f8ec1f72..7b31ff9d7 100644 --- a/mayastor/src/core/channel.rs +++ b/mayastor/src/core/channel.rs @@ -1,6 +1,4 @@ -use std::fmt::Debug; - -use serde::export::{fmt::Error, Formatter}; +use std::fmt::{Debug, Error, Formatter}; use spdk_sys::{spdk_io_channel, spdk_put_io_channel}; diff --git a/mayastor/src/core/descriptor.rs b/mayastor/src/core/descriptor.rs index 72905d0fe..a81dc2ade 100644 --- a/mayastor/src/core/descriptor.rs +++ b/mayastor/src/core/descriptor.rs @@ -1,7 +1,10 @@ -use std::{convert::TryFrom, fmt::Debug, os::raw::c_void}; +use std::{ + convert::TryFrom, + fmt::{Debug, Error, Formatter}, + os::raw::c_void, +}; use futures::channel::oneshot; -use serde::export::{fmt::Error, Formatter}; use spdk_sys::{ bdev_lock_lba_range, diff --git a/mayastor/src/core/handle.rs b/mayastor/src/core/handle.rs index ae2da726f..e77ef84ae 100644 --- a/mayastor/src/core/handle.rs +++ b/mayastor/src/core/handle.rs @@ -1,6 +1,6 @@ use std::{ convert::TryFrom, - fmt::Debug, + fmt::{Debug, Error, Formatter}, mem::ManuallyDrop, os::raw::c_void, sync::Arc, @@ -8,7 +8,6 @@ use std::{ use futures::channel::oneshot; use nix::errno::Errno; -use serde::export::{fmt::Error, Formatter}; use spdk_sys::{ spdk_bdev_desc, diff --git a/mayastor/src/core/reactor.rs b/mayastor/src/core/reactor.rs index bc7aaccee..fb5bdef18 100644 --- a/mayastor/src/core/reactor.rs +++ b/mayastor/src/core/reactor.rs @@ -33,7 +33,7 @@ use std::{ cell::RefCell, collections::VecDeque, fmt, - fmt::Display, + fmt::{Display, Formatter}, os::raw::c_void, pin::Pin, slice::Iter, @@ -46,7 +46,6 @@ use futures::{ Future, }; use once_cell::sync::OnceCell; -use serde::export::Formatter; use spdk_sys::{ spdk_cpuset_get_cpu, diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index fd3e74acc..10e5a3689 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -1,7 +1,8 @@ use std::{ + convert::TryFrom, ffi::{c_void, CString}, fmt, - fmt::{Debug, Display}, + fmt::{Debug, Display, Formatter}, mem::size_of, ptr, ptr::NonNull, @@ -9,7 +10,6 @@ use std::{ use futures::channel::oneshot; use nix::errno::Errno; -use serde::export::{Formatter, TryFrom}; use spdk_sys::{ spdk_bdev_nvme_opts, diff --git a/mayastor/src/subsys/nvmf/transport.rs b/mayastor/src/subsys/nvmf/transport.rs index 12a478125..14543b614 100644 --- a/mayastor/src/subsys/nvmf/transport.rs +++ b/mayastor/src/subsys/nvmf/transport.rs @@ -1,7 +1,7 @@ use std::{ env, ffi::CString, - fmt::{Debug, Display}, + fmt::{Debug, Display, Formatter}, net::Ipv4Addr, ops::{Deref, DerefMut}, ptr::copy_nonoverlapping, @@ -10,7 +10,6 @@ use std::{ use futures::channel::oneshot; use nix::errno::Errno; use once_cell::sync::Lazy; -use serde::export::Formatter; use spdk_sys::{ spdk_nvme_transport_id, diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index c490e4703..0f91c8ebb 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -56,7 +56,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1ynd6fmdr89f0g9vqsbz2rfl6ld23qv92lqcma5m4xcyhblbv5g0"; + cargoSha256 = "1s64shmbrihxqaz26iv69j9izjnhn2mprmzivpdn4s7262i60c3y"; inherit version; src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; diff --git a/spdk-sys/Cargo.toml b/spdk-sys/Cargo.toml index 961198f6d..5c435e200 100644 --- a/spdk-sys/Cargo.toml +++ b/spdk-sys/Cargo.toml @@ -12,5 +12,5 @@ authors = [ ] [build-dependencies] -bindgen = "0.54" +bindgen = "0.57" cc = "1.0" From 3bd87b0944f0a96b5af1cdbc634c888b786db9b4 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 18 Feb 2021 14:10:12 +0000 Subject: [PATCH 24/78] refactor: e2e, split util.go Split util.go along resource and function lines into a bunch of smaller files. --- test/e2e/common/util.go | 929 +------------------------- test/e2e/common/util_cleanup.go | 221 ++++++ test/e2e/common/util_mayastor_crds.go | 323 +++++++++ test/e2e/common/util_node.go | 73 ++ test/e2e/common/util_pvc.go | 259 +++++++ test/e2e/common/util_testpods.go | 115 ++++ test/e2e/uninstall/uninstall_test.go | 2 +- 7 files changed, 993 insertions(+), 929 deletions(-) create mode 100644 test/e2e/common/util_cleanup.go create mode 100644 test/e2e/common/util_mayastor_crds.go create mode 100644 test/e2e/common/util_node.go create mode 100644 test/e2e/common/util_pvc.go create mode 100644 test/e2e/common/util_testpods.go diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index ab6a5ccd4..6339ee70f 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -7,30 +7,20 @@ import ( "os/exec" "regexp" "strconv" - "strings" "time" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/runtime/schema" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" . "github.com/onsi/gomega" - "reflect" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var defTimeoutSecs = "90s" - func ApplyDeployYaml(filename string) { cmd := exec.Command("kubectl", "apply", "-f", filename) cmd.Dir = "" @@ -45,497 +35,6 @@ func DeleteDeployYaml(filename string) { Expect(err).ToNot(HaveOccurred()) } -// Status part of the mayastor volume CRD -type MayastorVolStatus struct { - State string - Node string - Replicas []string -} - -func GetMSV(uuid string) *MayastorVolStatus { - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) - if err != nil { - fmt.Println(err) - return nil - } - if msv == nil { - return nil - } - status, found, err := unstructured.NestedFieldCopy(msv.Object, "status") - if err != nil { - fmt.Println(err) - return nil - } - - if !found { - return nil - } - msVol := MayastorVolStatus{} - - msVol.Replicas = make([]string, 0, 4) - - v := reflect.ValueOf(status) - if v.Kind() == reflect.Map { - for _, key := range v.MapKeys() { - sKey := key.Interface().(string) - val := v.MapIndex(key) - switch sKey { - case "state": - msVol.State = val.Interface().(string) - case "nexus": - nexusInt := val.Interface().(map[string]interface{}) - if node, ok := nexusInt["node"].(string); ok { - msVol.Node = node - } - case "replicas": - replicas := val.Interface().([]interface{}) - for _, replica := range replicas { - replicaMap := reflect.ValueOf(replica) - if replicaMap.Kind() == reflect.Map { - for _, field := range replicaMap.MapKeys() { - switch field.Interface().(string) { - case "node": - value := replicaMap.MapIndex(field) - msVol.Replicas = append(msVol.Replicas, value.Interface().(string)) - } - } - } - } - } - } - // Note: msVol.Node can be unassigned here if the volume is not mounted - Expect(msVol.State).NotTo(Equal("")) - Expect(len(msVol.Replicas)).To(BeNumerically(">", 0)) - return &msVol - } - return nil -} - -// Check for a deleted Mayastor Volume, -// the object does not exist if deleted -func IsMSVDeleted(uuid string) bool { - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - - msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) - - if err != nil { - // Unfortunately there is no associated error code so we resort to string comparison - if strings.HasPrefix(err.Error(), "mayastorvolumes.openebs.io") && - strings.HasSuffix(err.Error(), " not found") { - return true - } - } - - Expect(err).To(BeNil()) - Expect(msv).ToNot(BeNil()) - return false -} - -func DeleteMSV(uuid string) error { - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - - err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Delete(context.TODO(), uuid, metav1.DeleteOptions{}) - return err -} - -// Check for a deleted Persistent Volume Claim, -// either the object does not exist -// or the status phase is invalid. -func IsPVCDeleted(volName string) bool { - pvc, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) - if err != nil { - // Unfortunately there is no associated error code so we resort to string comparison - if strings.HasPrefix(err.Error(), "persistentvolumeclaims") && - strings.HasSuffix(err.Error(), " not found") { - return true - } - } - // After the PVC has been deleted it may still accessible, but status phase will be invalid - Expect(err).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - switch pvc.Status.Phase { - case - corev1.ClaimBound, - corev1.ClaimPending, - corev1.ClaimLost: - return false - default: - return true - } -} - -// Check for a deleted Persistent Volume, -// either the object does not exist -// or the status phase is invalid. -func IsPVDeleted(volName string) bool { - pv, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) - if err != nil { - // Unfortunately there is no associated error code so we resort to string comparison - if strings.HasPrefix(err.Error(), "persistentvolumes") && - strings.HasSuffix(err.Error(), " not found") { - return true - } - } - // After the PV has been deleted it may still accessible, but status phase will be invalid - Expect(err).To(BeNil()) - Expect(pv).ToNot(BeNil()) - switch pv.Status.Phase { - case - corev1.VolumeBound, - corev1.VolumeAvailable, - corev1.VolumeFailed, - corev1.VolumePending, - corev1.VolumeReleased: - return false - default: - return true - } -} - -// IsPvcBound returns true if a PVC with the given name is bound otherwise false is returned. -func IsPvcBound(pvcName string) bool { - return GetPvcStatusPhase(pvcName) == corev1.ClaimBound -} - -// Retrieve status phase of a Persistent Volume Claim -func GetPvcStatusPhase(volname string) (phase corev1.PersistentVolumeClaimPhase) { - pvc, getPvcErr := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - return pvc.Status.Phase -} - -// Retrieve status phase of a Persistent Volume -func GetPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { - pv, getPvErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) - Expect(getPvErr).To(BeNil()) - Expect(pv).ToNot(BeNil()) - return pv.Status.Phase -} - -// Retrieve the state of a Mayastor Volume -func GetMsvState(uuid string) string { - msv := GetMSV(uuid) - Expect(msv).ToNot(BeNil()) - return msv.State -} - -// Retrieve the nexus node hosting the Mayastor Volume, -// and the names of the replica nodes -func GetMsvNodes(uuid string) (string, []string) { - msv := GetMSV(uuid) - Expect(msv).ToNot(BeNil()) - return msv.Node, msv.Replicas -} - -// Create a PVC and verify that -// 1. The PVC status transitions to bound, -// 2. The associated PV is created and its status transitions bound -// 3. The associated MV is created and has a State "healthy" -func MkPVC(volName string, scName string) string { - fmt.Printf("creating %s, %s\n", volName, scName) - // PVC create options - createOpts := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: volName, - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: &scName, - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("64Mi"), - }, - }, - }, - } - - // Create the PVC. - PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims - _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) - Expect(createErr).To(BeNil()) - - // Confirm the PVC has been created. - pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - - // Wait for the PVC to be bound. - Eventually(func() corev1.PersistentVolumeClaimPhase { - return GetPvcStatusPhase(volName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(corev1.ClaimBound)) - - // Refresh the PVC contents, so that we can get the PV name. - pvc, getPvcErr = PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - - // Wait for the PV to be provisioned - Eventually(func() *corev1.PersistentVolume { - pv, getPvErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if getPvErr != nil { - return nil - } - return pv - - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Not(BeNil())) - - // Wait for the PV to be bound. - Eventually(func() corev1.PersistentVolumePhase { - return GetPvStatusPhase(pvc.Spec.VolumeName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(corev1.VolumeBound)) - - Eventually(func() *MayastorVolStatus { - return GetMSV(string(pvc.ObjectMeta.UID)) - }, - defTimeoutSecs, - "1s", - ).Should(Not(BeNil())) - - return string(pvc.ObjectMeta.UID) -} - -// Delete the PVC and verify that -// 1. The PVC is deleted -// 2. The associated PV is deleted -// 3. The associated MV is deleted -func RmPVC(volName string, scName string) { - fmt.Printf("removing %s, %s\n", volName, scName) - - PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims - - // Confirm the PVC has been created. - pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - - // Delete the PVC - deleteErr := PVCApi("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) - Expect(deleteErr).To(BeNil()) - - // Wait for the PVC to be deleted. - Eventually(func() bool { - return IsPVCDeleted(volName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - // Wait for the PV to be deleted. - Eventually(func() bool { - return IsPVDeleted(pvc.Spec.VolumeName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - // Wait for the MSV to be deleted. - Eventually(func() bool { - return IsMSVDeleted(string(pvc.ObjectMeta.UID)) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) -} - -func RunFio(podName string, duration int) { - argRuntime := fmt.Sprintf("--runtime=%d", duration) - cmd := exec.Command( - "kubectl", - "exec", - "-it", - podName, - "--", - "fio", - "--name=benchtest", - "--size=50m", - "--filename=/volume/test", - "--direct=1", - "--rw=randrw", - "--ioengine=libaio", - "--bs=4k", - "--iodepth=16", - "--numjobs=1", - "--time_based", - argRuntime, - ) - cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - -func FioReadyPod() bool { - return IsPodRunning("fio") -} - -func IsPodRunning(podName string) bool { - var pod corev1.Pod - if gTestEnv.K8sClient.Get(context.TODO(), types.NamespacedName{Name: podName, Namespace: "default"}, &pod) != nil { - return false - } - return pod.Status.Phase == v1.PodRunning -} - -/// Create a PVC in default namespace, no options and no context -func CreatePVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(), pvc, metav1.CreateOptions{}) -} - -/// Retrieve a PVC in default namespace, no options and no context -func GetPVC(volName string) (*v1.PersistentVolumeClaim, error) { - return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) -} - -/// Delete a PVC in default namespace, no options and no context -func DeletePVC(volName string) error { - return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) -} - -/// Retrieve a PV in default namespace, no options and no context -func GetPV(volName string) (*v1.PersistentVolume, error) { - return gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) -} - -/// Create a Pod in default namespace, no options and no context -func CreatePod(podDef *corev1.Pod) (*corev1.Pod, error) { - return gTestEnv.KubeInt.CoreV1().Pods("default").Create(context.TODO(), podDef, metav1.CreateOptions{}) -} - -/// Delete a Pod in default namespace, no options and no context -func DeletePod(podName string) error { - return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) -} - -/// Delete all pods in the default namespace -// returns: -// 1) success i.e. true if all pods were deleted or there were no pods to delete. -// 2) the number of pods found -func DeleteAllPods() (bool, int) { - logf.Log.Info("DeleteAllPods") - success := true - numPods := 0 - pods, err := gTestEnv.KubeInt.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - logf.Log.Info("DeleteAllPods: list pods failed.", "error", err) - success = false - } - if err == nil && pods != nil { - numPods = len(pods.Items) - for _, pod := range pods.Items { - logf.Log.Info("DeleteAllPods: Deleting", "pod", pod.Name) - if err := DeletePod(pod.Name); err != nil { - success = false - } - } - } - return success, numPods -} - -func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { - podDef := CreateFioPodDef(podName, volName) - return CreatePod(podDef) -} - -/// Create a test fio pod in default namespace, no options and no context -/// mayastor volume is mounted on /volume -func CreateFioPodDef(podName string, volName string) *corev1.Pod { - podDef := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: podName, - Image: "dmonakhov/alpine-fio", - Args: []string{"sleep", "1000000"}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "ms-volume", - MountPath: "/volume", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "ms-volume", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: volName, - }, - }, - }, - }, - }, - } - return &podDef -} - -type NodeLocation struct { - NodeName string - IPAddress string - MayastorNode bool -} - -// returns vector of populated NodeLocation structs -func GetNodeLocs() ([]NodeLocation, error) { - nodeList := corev1.NodeList{} - - if gTestEnv.K8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil { - return nil, errors.New("failed to list nodes") - } - NodeLocs := make([]NodeLocation, 0, len(nodeList.Items)) - for _, k8snode := range nodeList.Items { - addrstr := "" - namestr := "" - mayastorNode := false - for label, value := range k8snode.Labels { - if label == "openebs.io/engine" && value == "mayastor" { - mayastorNode = true - } - } - for _, addr := range k8snode.Status.Addresses { - if addr.Type == corev1.NodeInternalIP { - addrstr = addr.Address - } - if addr.Type == corev1.NodeHostName { - namestr = addr.Address - } - } - if namestr != "" && addrstr != "" { - NodeLocs = append(NodeLocs, NodeLocation{NodeName: namestr, IPAddress: addrstr, MayastorNode: mayastorNode}) - } else { - return nil, errors.New("node lacks expected fields") - } - } - return NodeLocs, nil -} - // create a storage class func MkStorageClass(scName string, scReplicas int, protocol string, provisioner string) { createOpts := &storagev1.StorageClass{ @@ -615,26 +114,6 @@ func SetDeploymentReplication(deploymentName string, namespace string, replicas Expect(err).ToNot(HaveOccurred()) } -// TODO remove dependency on kubectl -// label is a string in the form "key=value" -// function still succeeds if label already present -func LabelNode(nodename string, label string, value string) { - labelAssign := fmt.Sprintf("%s=%s", label, value) - cmd := exec.Command("kubectl", "label", "node", nodename, labelAssign, "--overwrite=true") - cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - -// TODO remove dependency on kubectl -// function still succeeds if label not present -func UnlabelNode(nodename string, label string) { - cmd := exec.Command("kubectl", "label", "node", nodename, label+"-") - cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - // Wait until all instances of the specified pod are absent from the given node func WaitForPodAbsentFromNode(podNameRegexp string, namespace string, nodeName string, timeoutSeconds int) error { var validID = regexp.MustCompile(podNameRegexp) @@ -723,367 +202,6 @@ func PodPresentOnNode(podNameRegexp string, namespace string, nodeName string) b return false } -// Return a group version resource for a MSV -func getMsvGvr() schema.GroupVersionResource { - return schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } -} - -// Get the k8s MSV CRD -func getMsv(uuid string) (*unstructured.Unstructured, error) { - msvGVR := getMsvGvr() - return gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) -} - -// Get a field within the MSV. -// The "fields" argument specifies the path within the MSV where the field should be found. -// E.g. for the replicaCount field which is nested under the MSV spec the function should be called like: -// getMsvFieldValue(, "spec", "replicaCount") -func getMsvFieldValue(uuid string, fields ...string) (interface{}, error) { - msv, err := getMsv(uuid) - if err != nil { - return nil, fmt.Errorf("Failed to get MSV with error %v", err) - } - if msv == nil { - return nil, fmt.Errorf("MSV with uuid %s does not exist", uuid) - } - - field, found, err := unstructured.NestedFieldCopy(msv.Object, fields...) - if err != nil { - // The last field is the one that we were looking for. - lastFieldIndex := len(fields) - 1 - return nil, fmt.Errorf("Failed to get field %s with error %v", fields[lastFieldIndex], err) - } - if !found { - // The last field is the one that we were looking for. - lastFieldIndex := len(fields) - 1 - return nil, fmt.Errorf("Failed to find field %s", fields[lastFieldIndex]) - } - return field, nil -} - -// GetNumReplicas returns the number of replicas in the MSV. -// An error is returned if the number of replicas cannot be retrieved. -func GetNumReplicas(uuid string) (int64, error) { - // Get the number of replicas from the MSV. - repl, err := getMsvFieldValue(uuid, "spec", "replicaCount") - if err != nil { - return 0, err - } - if repl == nil { - return 0, fmt.Errorf("Failed to get replicaCount") - } - - return reflect.ValueOf(repl).Interface().(int64), nil -} - -// UpdateNumReplicas sets the number of replicas in the MSV to the desired number. -// An error is returned if the number of replicas cannot be updated. -func UpdateNumReplicas(uuid string, numReplicas int64) error { - msv, err := getMsv(uuid) - if err != nil { - return fmt.Errorf("Failed to get MSV with error %v", err) - } - if msv == nil { - return fmt.Errorf("MSV not found") - } - - // Set the number of replicas in the MSV. - err = unstructured.SetNestedField(msv.Object, numReplicas, "spec", "replicaCount") - if err != nil { - return err - } - - // Update the k8s MSV object. - msvGVR := getMsvGvr() - _, err = gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Update(context.TODO(), msv, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("Failed to update MSV: %v", err) - } - return nil -} - -// GetNumChildren returns the number of nexus children listed in the MSV -func GetNumChildren(uuid string) int { - children, err := getMsvFieldValue(uuid, "status", "nexus", "children") - if err != nil { - return 0 - } - if children == nil { - return 0 - } - - switch reflect.TypeOf(children).Kind() { - case reflect.Slice: - return reflect.ValueOf(children).Len() - } - return 0 -} - -// NexusChild represents the information stored in the MSV about the child -type NexusChild struct { - State string - URI string -} - -// GetChildren returns a slice containing information about the children. -// An error is returned if the child information cannot be retrieved. -func GetChildren(uuid string) ([]NexusChild, error) { - children, err := getMsvFieldValue(uuid, "status", "nexus", "children") - if err != nil { - return nil, fmt.Errorf("Failed to get children with error %v", err) - } - if children == nil { - return nil, fmt.Errorf("Failed to find children") - } - - nexusChildren := make([]NexusChild, 2) - - switch reflect.TypeOf(children).Kind() { - case reflect.Slice: - s := reflect.ValueOf(children) - for i := 0; i < s.Len(); i++ { - child := s.Index(i).Elem() - if child.Kind() == reflect.Map { - for _, key := range child.MapKeys() { - skey := key.Interface().(string) - switch skey { - case "state": - nexusChildren[i].State = child.MapIndex(key).Interface().(string) - case "uri": - nexusChildren[i].URI = child.MapIndex(key).Interface().(string) - } - } - } - } - } - - return nexusChildren, nil -} - -// GetNexusState returns the nexus state from the MSV. -// An error is returned if the nexus state cannot be retrieved. -func GetNexusState(uuid string) (string, error) { - // Get the state of the nexus from the MSV. - state, err := getMsvFieldValue(uuid, "status", "nexus", "state") - if err != nil { - return "", err - } - if state == nil { - return "", fmt.Errorf("Failed to get nexus state") - } - - return reflect.ValueOf(state).Interface().(string), nil -} - -// IsVolumePublished returns true if the volume is published. -// A volume is published if the "targetNodes" field exists in the MSV. -func IsVolumePublished(uuid string) bool { - _, err := getMsvFieldValue(uuid, "status", "targetNodes") - if err != nil { - return false - } - return true -} - -func CheckForPVCs() (bool, error) { - logf.Log.Info("CheckForPVCs") - foundResources := false - - pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) - if err == nil && pvcs != nil && len(pvcs.Items) != 0 { - logf.Log.Info("CheckForVolumeResources: found PersistentVolumeClaims", - "PersistentVolumeClaims", pvcs.Items) - foundResources = true - } - return foundResources, err -} - -func CheckForPVs() (bool, error) { - logf.Log.Info("CheckForPVs") - foundResources := false - - pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) - if err == nil && pvs != nil && len(pvs.Items) != 0 { - logf.Log.Info("CheckForVolumeResources: found PersistentVolumes", - "PersistentVolumes", pvs.Items) - foundResources = true - } - return foundResources, err -} - -func CheckForMSVs() (bool, error) { - logf.Log.Info("CheckForMSVs") - foundResources := false - - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - - msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err == nil && msvs != nil && len(msvs.Items) != 0 { - logf.Log.Info("CheckForVolumeResources: found MayastorVolumes", - "MayastorVolumes", msvs.Items) - foundResources = true - } - return foundResources, err -} - -func CheckForTestPods() (bool, error) { - logf.Log.Info("CheckForTestPods") - foundPods := false - - pods, err := gTestEnv.KubeInt.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) - if err == nil && pods != nil && len(pods.Items) != 0 { - logf.Log.Info("CheckForTestPods", - "Pods", pods.Items) - foundPods = true - } - return foundPods, err -} - -// Make best attempt to delete PVCs, PVs and MSVs -func DeleteAllVolumeResources() (bool, bool) { - logf.Log.Info("DeleteAllVolumeResources") - foundResources := false - success := true - - // Delete all PVCs found - // Phase 1 to delete dangling resources - pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - logf.Log.Info("DeleteAllVolumeResources: list PVCs failed.", "error", err) - success = false - } - if err == nil && pvcs != nil && len(pvcs.Items) != 0 { - foundResources = true - logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumeClaims") - for _, pvc := range pvcs.Items { - if err := DeletePVC(pvc.Name); err != nil { - success = false - } - } - } - - // Delete all PVs found - pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - logf.Log.Info("DeleteAllVolumeResources: list PVs failed.", "error", err) - } - if err == nil && pvs != nil && len(pvs.Items) != 0 { - logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumes") - for _, pv := range pvs.Items { - if err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}); err != nil { - success = false - } - } - } - - // Wait 2 minutes for resources to be deleted - for attempts := 0; attempts < 120; attempts++ { - numPvcs := 0 - pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) - if err == nil && pvcs != nil { - numPvcs = len(pvcs.Items) - } - - numPvs := 0 - pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) - if err == nil && pvs != nil { - numPvs = len(pvs.Items) - } - - if numPvcs == 0 && numPvs == 0 { - break - } - time.Sleep(1 * time.Second) - } - - // If after deleting PVCs and PVs Mayastor volumes are leftover - // try cleaning them up explicitly - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - - msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly - logf.Log.Info("DeleteAllVolumeResources: list MSVs failed.", "Error", err) - } - if err == nil && msvs != nil && len(msvs.Items) != 0 { - logf.Log.Info("DeleteAllVolumeResources: deleting MayastorVolumes") - for _, msv := range msvs.Items { - if err := DeleteMSV(msv.GetName()); err != nil { - success = false - } - } - } - - // Wait 2 minutes for resources to be deleted - for attempts := 0; attempts < 120; attempts++ { - numMsvs := 0 - msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err == nil && msvs != nil { - numMsvs = len(msvs.Items) - } - if numMsvs == 0 { - break - } - time.Sleep(1 * time.Second) - } - - return success, foundResources -} - -func DeletePools() { - poolGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorpools", - } - - pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly - logf.Log.Info("DeletePools: list MSPs failed.", "Error", err) - } - if err == nil && pools != nil && len(pools.Items) != 0 { - logf.Log.Info("DeletePools: deleting MayastorPools") - for _, pool := range pools.Items { - logf.Log.Info("DeletePools: deleting", "pool", pool.GetName()) - err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Delete(context.TODO(), pool.GetName(), metav1.DeleteOptions{}) - if err != nil { - logf.Log.Error(err, "Failed to delete pool", pool.GetName()) - } - } - } - - numPools := 0 - // Wait 2 minutes for resources to be deleted - for attempts := 0; attempts < 120; attempts++ { - pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err == nil && pools != nil { - numPools = len(pools.Items) - } - if numPools == 0 { - break - } - time.Sleep(1 * time.Second) - } - - logf.Log.Info("DeletePools: ", "Pool count", numPools) - if numPools != 0 { - logf.Log.Info("DeletePools: ", "Pools", pools.Items) - } -} - func AfterSuiteCleanup() { logf.Log.Info("AfterSuiteCleanup") _, _ = DeleteAllVolumeResources() @@ -1126,48 +244,3 @@ func AfterEachCheck() error { } return nil } - -func MayastorUndeletedPodCount() int { - pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - logf.Log.Error(err, "MayastorUndeletedPodCount: list pods failed.") - return 0 - } - if pods != nil { - return len(pods.Items) - } - logf.Log.Info("MayastorUndeletedPodCount: nil list returned.") - return 0 -} - -// Force deletion of all existing mayastor pods -// Returns true if pods were deleted, false otherwise -func ForceDeleteMayastorPods() bool { - logf.Log.Info("EnsureMayastorDeleted") - pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - logf.Log.Error(err, "EnsureMayastorDeleted: list pods failed.") - return false - } - if pods == nil || len(pods.Items) == 0 { - return false - } - - logf.Log.Info("EnsureMayastorDeleted: MayastorPods found.", "Count", len(pods.Items)) - for _, pod := range pods.Items { - logf.Log.Info("EnsureMayastorDeleted: Force deleting", "pod", pod.Name) - cmd := exec.Command("kubectl", "-n", "mayastor", "delete", "pod", pod.Name, "--grace-period", "0", "--force") - _, err := cmd.CombinedOutput() - if err != nil { - logf.Log.Error(err, "EnsureMayastorDeleted", "podName", pod.Name) - } - } - - // We have made the best effort to cleanup, give things time to settle. - for attempts := 0; attempts < 30 && MayastorUndeletedPodCount() != 0; attempts++ { - time.Sleep(2 * time.Second) - } - - logf.Log.Info("EnsureMayastorDeleted: lingering Mayastor pods were found !!!!!!!!") - return true -} diff --git a/test/e2e/common/util_cleanup.go b/test/e2e/common/util_cleanup.go new file mode 100644 index 000000000..ffbdedf82 --- /dev/null +++ b/test/e2e/common/util_cleanup.go @@ -0,0 +1,221 @@ +package common + +// Utility functions for cleaning up a cluster +import ( + "context" + "os/exec" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +/// Delete all pods in the default namespace +// returns: +// 1) success i.e. true if all pods were deleted or there were no pods to delete. +// 2) the number of pods found +func DeleteAllPods() (bool, int) { + logf.Log.Info("DeleteAllPods") + success := true + numPods := 0 + pods, err := gTestEnv.KubeInt.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Info("DeleteAllPods: list pods failed.", "error", err) + success = false + } + if err == nil && pods != nil { + numPods = len(pods.Items) + for _, pod := range pods.Items { + logf.Log.Info("DeleteAllPods: Deleting", "pod", pod.Name) + if err := DeletePod(pod.Name); err != nil { + success = false + } + } + } + return success, numPods +} + +// Make best attempt to delete PVCs, PVs and MSVs +func DeleteAllVolumeResources() (bool, bool) { + logf.Log.Info("DeleteAllVolumeResources") + foundResources := false + success := true + + // Delete all PVCs found + // Phase 1 to delete dangling resources + pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Info("DeleteAllVolumeResources: list PVCs failed.", "error", err) + success = false + } + if err == nil && pvcs != nil && len(pvcs.Items) != 0 { + foundResources = true + logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumeClaims") + for _, pvc := range pvcs.Items { + if err := DeletePVC(pvc.Name); err != nil { + success = false + } + } + } + + // Delete all PVs found + pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Info("DeleteAllVolumeResources: list PVs failed.", "error", err) + } + if err == nil && pvs != nil && len(pvs.Items) != 0 { + logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumes") + for _, pv := range pvs.Items { + if err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}); err != nil { + success = false + } + } + } + + // Wait 2 minutes for resources to be deleted + for attempts := 0; attempts < 120; attempts++ { + numPvcs := 0 + pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if err == nil && pvcs != nil { + numPvcs = len(pvcs.Items) + } + + numPvs := 0 + pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err == nil && pvs != nil { + numPvs = len(pvs.Items) + } + + if numPvcs == 0 && numPvs == 0 { + break + } + time.Sleep(1 * time.Second) + } + + // If after deleting PVCs and PVs Mayastor volumes are leftover + // try cleaning them up explicitly + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly + logf.Log.Info("DeleteAllVolumeResources: list MSVs failed.", "Error", err) + } + if err == nil && msvs != nil && len(msvs.Items) != 0 { + logf.Log.Info("DeleteAllVolumeResources: deleting MayastorVolumes") + for _, msv := range msvs.Items { + if err := DeleteMSV(msv.GetName()); err != nil { + success = false + } + } + } + + // Wait 2 minutes for resources to be deleted + for attempts := 0; attempts < 120; attempts++ { + numMsvs := 0 + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err == nil && msvs != nil { + numMsvs = len(msvs.Items) + } + if numMsvs == 0 { + break + } + time.Sleep(1 * time.Second) + } + + return success, foundResources +} + +func DeleteAllPools() { + poolGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorpools", + } + + pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly + logf.Log.Info("DeleteAllPools: list MSPs failed.", "Error", err) + } + if err == nil && pools != nil && len(pools.Items) != 0 { + logf.Log.Info("DeleteAllPools: deleting MayastorPools") + for _, pool := range pools.Items { + logf.Log.Info("DeleteAllPools: deleting", "pool", pool.GetName()) + err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Delete(context.TODO(), pool.GetName(), metav1.DeleteOptions{}) + if err != nil { + logf.Log.Error(err, "Failed to delete pool", pool.GetName()) + } + } + } + + numPools := 0 + // Wait 2 minutes for resources to be deleted + for attempts := 0; attempts < 120; attempts++ { + pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err == nil && pools != nil { + numPools = len(pools.Items) + } + if numPools == 0 { + break + } + time.Sleep(1 * time.Second) + } + + logf.Log.Info("DeleteAllPools: ", "Pool count", numPools) + if numPools != 0 { + logf.Log.Info("DeleteAllPools: ", "Pools", pools.Items) + } +} + +func MayastorUndeletedPodCount() int { + pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "MayastorUndeletedPodCount: list pods failed.") + return 0 + } + if pods != nil { + return len(pods.Items) + } + logf.Log.Info("MayastorUndeletedPodCount: nil list returned.") + return 0 +} + +// Force deletion of all existing mayastor pods +// Returns true if pods were deleted, false otherwise +func ForceDeleteMayastorPods() bool { + logf.Log.Info("EnsureMayastorDeleted") + pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "EnsureMayastorDeleted: list pods failed.") + return false + } + if pods == nil || len(pods.Items) == 0 { + return false + } + + logf.Log.Info("EnsureMayastorDeleted: MayastorPods found.", "Count", len(pods.Items)) + for _, pod := range pods.Items { + logf.Log.Info("EnsureMayastorDeleted: Force deleting", "pod", pod.Name) + cmd := exec.Command("kubectl", "-n", "mayastor", "delete", "pod", pod.Name, "--grace-period", "0", "--force") + _, err := cmd.CombinedOutput() + if err != nil { + logf.Log.Error(err, "EnsureMayastorDeleted", "podName", pod.Name) + } + } + + // We have made the best effort to cleanup, give things time to settle. + for attempts := 0; attempts < 30 && MayastorUndeletedPodCount() != 0; attempts++ { + time.Sleep(2 * time.Second) + } + + logf.Log.Info("EnsureMayastorDeleted: lingering Mayastor pods were found !!!!!!!!") + return true +} diff --git a/test/e2e/common/util_mayastor_crds.go b/test/e2e/common/util_mayastor_crds.go new file mode 100644 index 000000000..2af167467 --- /dev/null +++ b/test/e2e/common/util_mayastor_crds.go @@ -0,0 +1,323 @@ +package common + +// Utility functions for Mayastor CRDs +import ( + "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "strings" + + . "github.com/onsi/gomega" + + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Status part of the mayastor volume CRD +type MayastorVolStatus struct { + State string + Node string + Replicas []string +} + +func GetMSV(uuid string) *MayastorVolStatus { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + if err != nil { + fmt.Println(err) + return nil + } + if msv == nil { + return nil + } + status, found, err := unstructured.NestedFieldCopy(msv.Object, "status") + if err != nil { + fmt.Println(err) + return nil + } + + if !found { + return nil + } + msVol := MayastorVolStatus{} + + msVol.Replicas = make([]string, 0, 4) + + v := reflect.ValueOf(status) + if v.Kind() == reflect.Map { + for _, key := range v.MapKeys() { + sKey := key.Interface().(string) + val := v.MapIndex(key) + switch sKey { + case "state": + msVol.State = val.Interface().(string) + case "nexus": + nexusInt := val.Interface().(map[string]interface{}) + if node, ok := nexusInt["node"].(string); ok { + msVol.Node = node + } + case "replicas": + replicas := val.Interface().([]interface{}) + for _, replica := range replicas { + replicaMap := reflect.ValueOf(replica) + if replicaMap.Kind() == reflect.Map { + for _, field := range replicaMap.MapKeys() { + switch field.Interface().(string) { + case "node": + value := replicaMap.MapIndex(field) + msVol.Replicas = append(msVol.Replicas, value.Interface().(string)) + } + } + } + } + } + } + // Note: msVol.Node can be unassigned here if the volume is not mounted + Expect(msVol.State).NotTo(Equal("")) + Expect(len(msVol.Replicas)).To(BeNumerically(">", 0)) + return &msVol + } + return nil +} + +// Check for a deleted Mayastor Volume, +// the object does not exist if deleted +func IsMSVDeleted(uuid string) bool { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "mayastorvolumes.openebs.io") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + + Expect(err).To(BeNil()) + Expect(msv).ToNot(BeNil()) + return false +} + +func DeleteMSV(uuid string) error { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Delete(context.TODO(), uuid, metav1.DeleteOptions{}) + return err +} + +// Retrieve the state of a Mayastor Volume +func GetMsvState(uuid string) string { + msv := GetMSV(uuid) + Expect(msv).ToNot(BeNil()) + return msv.State +} + +// Retrieve the nexus node hosting the Mayastor Volume, +// and the names of the replica nodes +func GetMsvNodes(uuid string) (string, []string) { + msv := GetMSV(uuid) + Expect(msv).ToNot(BeNil()) + return msv.Node, msv.Replicas +} + +// Return a group version resource for a MSV +func getMsvGvr() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } +} + +// Get the k8s MSV CRD +func getMsv(uuid string) (*unstructured.Unstructured, error) { + msvGVR := getMsvGvr() + return gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) +} + +// Get a field within the MSV. +// The "fields" argument specifies the path within the MSV where the field should be found. +// E.g. for the replicaCount field which is nested under the MSV spec the function should be called like: +// getMsvFieldValue(, "spec", "replicaCount") +func getMsvFieldValue(uuid string, fields ...string) (interface{}, error) { + msv, err := getMsv(uuid) + if err != nil { + return nil, fmt.Errorf("Failed to get MSV with error %v", err) + } + if msv == nil { + return nil, fmt.Errorf("MSV with uuid %s does not exist", uuid) + } + + field, found, err := unstructured.NestedFieldCopy(msv.Object, fields...) + if err != nil { + // The last field is the one that we were looking for. + lastFieldIndex := len(fields) - 1 + return nil, fmt.Errorf("Failed to get field %s with error %v", fields[lastFieldIndex], err) + } + if !found { + // The last field is the one that we were looking for. + lastFieldIndex := len(fields) - 1 + return nil, fmt.Errorf("Failed to find field %s", fields[lastFieldIndex]) + } + return field, nil +} + +// GetNumReplicas returns the number of replicas in the MSV. +// An error is returned if the number of replicas cannot be retrieved. +func GetNumReplicas(uuid string) (int64, error) { + // Get the number of replicas from the MSV. + repl, err := getMsvFieldValue(uuid, "spec", "replicaCount") + if err != nil { + return 0, err + } + if repl == nil { + return 0, fmt.Errorf("Failed to get replicaCount") + } + + return reflect.ValueOf(repl).Interface().(int64), nil +} + +// UpdateNumReplicas sets the number of replicas in the MSV to the desired number. +// An error is returned if the number of replicas cannot be updated. +func UpdateNumReplicas(uuid string, numReplicas int64) error { + msv, err := getMsv(uuid) + if err != nil { + return fmt.Errorf("Failed to get MSV with error %v", err) + } + if msv == nil { + return fmt.Errorf("MSV not found") + } + + // Set the number of replicas in the MSV. + err = unstructured.SetNestedField(msv.Object, numReplicas, "spec", "replicaCount") + if err != nil { + return err + } + + // Update the k8s MSV object. + msvGVR := getMsvGvr() + _, err = gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Update(context.TODO(), msv, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("Failed to update MSV: %v", err) + } + return nil +} + +// GetNumChildren returns the number of nexus children listed in the MSV +func GetNumChildren(uuid string) int { + children, err := getMsvFieldValue(uuid, "status", "nexus", "children") + if err != nil { + return 0 + } + if children == nil { + return 0 + } + + switch reflect.TypeOf(children).Kind() { + case reflect.Slice: + return reflect.ValueOf(children).Len() + } + return 0 +} + +// NexusChild represents the information stored in the MSV about the child +type NexusChild struct { + State string + URI string +} + +// GetChildren returns a slice containing information about the children. +// An error is returned if the child information cannot be retrieved. +func GetChildren(uuid string) ([]NexusChild, error) { + children, err := getMsvFieldValue(uuid, "status", "nexus", "children") + if err != nil { + return nil, fmt.Errorf("Failed to get children with error %v", err) + } + if children == nil { + return nil, fmt.Errorf("Failed to find children") + } + + nexusChildren := make([]NexusChild, 2) + + switch reflect.TypeOf(children).Kind() { + case reflect.Slice: + s := reflect.ValueOf(children) + for i := 0; i < s.Len(); i++ { + child := s.Index(i).Elem() + if child.Kind() == reflect.Map { + for _, key := range child.MapKeys() { + skey := key.Interface().(string) + switch skey { + case "state": + nexusChildren[i].State = child.MapIndex(key).Interface().(string) + case "uri": + nexusChildren[i].URI = child.MapIndex(key).Interface().(string) + } + } + } + } + } + + return nexusChildren, nil +} + +// GetNexusState returns the nexus state from the MSV. +// An error is returned if the nexus state cannot be retrieved. +func GetNexusState(uuid string) (string, error) { + // Get the state of the nexus from the MSV. + state, err := getMsvFieldValue(uuid, "status", "nexus", "state") + if err != nil { + return "", err + } + if state == nil { + return "", fmt.Errorf("Failed to get nexus state") + } + + return reflect.ValueOf(state).Interface().(string), nil +} + +// IsVolumePublished returns true if the volume is published. +// A volume is published if the "targetNodes" field exists in the MSV. +func IsVolumePublished(uuid string) bool { + _, err := getMsvFieldValue(uuid, "status", "targetNodes") + if err != nil { + return false + } + return true +} + +func CheckForMSVs() (bool, error) { + logf.Log.Info("CheckForMSVs") + foundResources := false + + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err == nil && msvs != nil && len(msvs.Items) != 0 { + logf.Log.Info("CheckForVolumeResources: found MayastorVolumes", + "MayastorVolumes", msvs.Items) + foundResources = true + } + return foundResources, err +} diff --git a/test/e2e/common/util_node.go b/test/e2e/common/util_node.go new file mode 100644 index 000000000..e2b57eaf5 --- /dev/null +++ b/test/e2e/common/util_node.go @@ -0,0 +1,73 @@ +package common + +// Utility functions for manipulation of nodes. +import ( + "context" + "errors" + "fmt" + "os/exec" + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" +) + +type NodeLocation struct { + NodeName string + IPAddress string + MayastorNode bool +} + +// returns vector of populated NodeLocation structs +func GetNodeLocs() ([]NodeLocation, error) { + nodeList := corev1.NodeList{} + + if gTestEnv.K8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil { + return nil, errors.New("failed to list nodes") + } + NodeLocs := make([]NodeLocation, 0, len(nodeList.Items)) + for _, k8snode := range nodeList.Items { + addrstr := "" + namestr := "" + mayastorNode := false + for label, value := range k8snode.Labels { + if label == "openebs.io/engine" && value == "mayastor" { + mayastorNode = true + } + } + for _, addr := range k8snode.Status.Addresses { + if addr.Type == corev1.NodeInternalIP { + addrstr = addr.Address + } + if addr.Type == corev1.NodeHostName { + namestr = addr.Address + } + } + if namestr != "" && addrstr != "" { + NodeLocs = append(NodeLocs, NodeLocation{NodeName: namestr, IPAddress: addrstr, MayastorNode: mayastorNode}) + } else { + return nil, errors.New("node lacks expected fields") + } + } + return NodeLocs, nil +} + +// TODO remove dependency on kubectl +// label is a string in the form "key=value" +// function still succeeds if label already present +func LabelNode(nodename string, label string, value string) { + labelAssign := fmt.Sprintf("%s=%s", label, value) + cmd := exec.Command("kubectl", "label", "node", nodename, labelAssign, "--overwrite=true") + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// TODO remove dependency on kubectl +// function still succeeds if label not present +func UnlabelNode(nodename string, label string) { + cmd := exec.Command("kubectl", "label", "node", nodename, label+"-") + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} diff --git a/test/e2e/common/util_pvc.go b/test/e2e/common/util_pvc.go new file mode 100644 index 000000000..9253a2423 --- /dev/null +++ b/test/e2e/common/util_pvc.go @@ -0,0 +1,259 @@ +package common + +// Utility functions for Persistent Volume Claims and Persistent Volumes +import ( + "context" + "fmt" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" + + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var defTimeoutSecs = "90s" + +// Check for a deleted Persistent Volume Claim, +// either the object does not exist +// or the status phase is invalid. +func IsPVCDeleted(volName string) bool { + pvc, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "persistentvolumeclaims") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + // After the PVC has been deleted it may still accessible, but status phase will be invalid + Expect(err).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + switch pvc.Status.Phase { + case + corev1.ClaimBound, + corev1.ClaimPending, + corev1.ClaimLost: + return false + default: + return true + } +} + +// Check for a deleted Persistent Volume, +// either the object does not exist +// or the status phase is invalid. +func IsPVDeleted(volName string) bool { + pv, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "persistentvolumes") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + // After the PV has been deleted it may still accessible, but status phase will be invalid + Expect(err).To(BeNil()) + Expect(pv).ToNot(BeNil()) + switch pv.Status.Phase { + case + corev1.VolumeBound, + corev1.VolumeAvailable, + corev1.VolumeFailed, + corev1.VolumePending, + corev1.VolumeReleased: + return false + default: + return true + } +} + +// IsPvcBound returns true if a PVC with the given name is bound otherwise false is returned. +func IsPvcBound(pvcName string) bool { + return GetPvcStatusPhase(pvcName) == corev1.ClaimBound +} + +// Retrieve status phase of a Persistent Volume Claim +func GetPvcStatusPhase(volname string) (phase corev1.PersistentVolumeClaimPhase) { + pvc, getPvcErr := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + return pvc.Status.Phase +} + +// Retrieve status phase of a Persistent Volume +func GetPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { + pv, getPvErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) + Expect(getPvErr).To(BeNil()) + Expect(pv).ToNot(BeNil()) + return pv.Status.Phase +} + +// Create a PVC and verify that +// 1. The PVC status transitions to bound, +// 2. The associated PV is created and its status transitions bound +// 3. The associated MV is created and has a State "healthy" +func MkPVC(volName string, scName string) string { + fmt.Printf("creating %s, %s\n", volName, scName) + // PVC create options + createOpts := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volName, + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("64Mi"), + }, + }, + }, + } + + // Create the PVC. + PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims + _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) + Expect(createErr).To(BeNil()) + + // Confirm the PVC has been created. + pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PVC to be bound. + Eventually(func() corev1.PersistentVolumeClaimPhase { + return GetPvcStatusPhase(volName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(corev1.ClaimBound)) + + // Refresh the PVC contents, so that we can get the PV name. + pvc, getPvcErr = PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PV to be provisioned + Eventually(func() *corev1.PersistentVolume { + pv, getPvErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if getPvErr != nil { + return nil + } + return pv + + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Not(BeNil())) + + // Wait for the PV to be bound. + Eventually(func() corev1.PersistentVolumePhase { + return GetPvStatusPhase(pvc.Spec.VolumeName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(corev1.VolumeBound)) + + Eventually(func() *MayastorVolStatus { + return GetMSV(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, + "1s", + ).Should(Not(BeNil())) + + return string(pvc.ObjectMeta.UID) +} + +// Delete the PVC and verify that +// 1. The PVC is deleted +// 2. The associated PV is deleted +// 3. The associated MV is deleted +func RmPVC(volName string, scName string) { + fmt.Printf("removing %s, %s\n", volName, scName) + + PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims + + // Confirm the PVC has been created. + pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Delete the PVC + deleteErr := PVCApi("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) + Expect(deleteErr).To(BeNil()) + + // Wait for the PVC to be deleted. + Eventually(func() bool { + return IsPVCDeleted(volName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the PV to be deleted. + Eventually(func() bool { + return IsPVDeleted(pvc.Spec.VolumeName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the MSV to be deleted. + Eventually(func() bool { + return IsMSVDeleted(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) +} + +/// Create a PVC in default namespace, no options and no context +func CreatePVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(), pvc, metav1.CreateOptions{}) +} + +/// Retrieve a PVC in default namespace, no options and no context +func GetPVC(volName string) (*v1.PersistentVolumeClaim, error) { + return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) +} + +/// Delete a PVC in default namespace, no options and no context +func DeletePVC(volName string) error { + return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) +} + +/// Retrieve a PV in default namespace, no options and no context +func GetPV(volName string) (*v1.PersistentVolume, error) { + return gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) +} + +func CheckForPVCs() (bool, error) { + logf.Log.Info("CheckForPVCs") + foundResources := false + + pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if err == nil && pvcs != nil && len(pvcs.Items) != 0 { + logf.Log.Info("CheckForVolumeResources: found PersistentVolumeClaims", + "PersistentVolumeClaims", pvcs.Items) + foundResources = true + } + return foundResources, err +} + +func CheckForPVs() (bool, error) { + logf.Log.Info("CheckForPVs") + foundResources := false + + pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err == nil && pvs != nil && len(pvs.Items) != 0 { + logf.Log.Info("CheckForVolumeResources: found PersistentVolumes", + "PersistentVolumes", pvs.Items) + foundResources = true + } + return foundResources, err +} diff --git a/test/e2e/common/util_testpods.go b/test/e2e/common/util_testpods.go new file mode 100644 index 000000000..04194360d --- /dev/null +++ b/test/e2e/common/util_testpods.go @@ -0,0 +1,115 @@ +package common + +// Utility functions for test pods. +import ( + "context" + "fmt" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "os/exec" + + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +func RunFio(podName string, duration int) { + argRuntime := fmt.Sprintf("--runtime=%d", duration) + cmd := exec.Command( + "kubectl", + "exec", + "-it", + podName, + "--", + "fio", + "--name=benchtest", + "--size=50m", + "--filename=/volume/test", + "--direct=1", + "--rw=randrw", + "--ioengine=libaio", + "--bs=4k", + "--iodepth=16", + "--numjobs=1", + "--time_based", + argRuntime, + ) + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +func IsPodRunning(podName string) bool { + var pod corev1.Pod + if gTestEnv.K8sClient.Get(context.TODO(), types.NamespacedName{Name: podName, Namespace: "default"}, &pod) != nil { + return false + } + return pod.Status.Phase == v1.PodRunning +} + +/// Create a Pod in default namespace, no options and no context +func CreatePod(podDef *corev1.Pod) (*corev1.Pod, error) { + return gTestEnv.KubeInt.CoreV1().Pods("default").Create(context.TODO(), podDef, metav1.CreateOptions{}) +} + +/// Delete a Pod in default namespace, no options and no context +func DeletePod(podName string) error { + return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) +} + +func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { + podDef := CreateFioPodDef(podName, volName) + return CreatePod(podDef) +} + +/// Create a test fio pod in default namespace, no options and no context +/// mayastor volume is mounted on /volume +func CreateFioPodDef(podName string, volName string) *corev1.Pod { + podDef := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: podName, + Image: "dmonakhov/alpine-fio", + Args: []string{"sleep", "1000000"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "ms-volume", + MountPath: "/volume", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "ms-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: volName, + }, + }, + }, + }, + }, + } + return &podDef +} + +func CheckForTestPods() (bool, error) { + logf.Log.Info("CheckForTestPods") + foundPods := false + + pods, err := gTestEnv.KubeInt.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) + if err == nil && pods != nil && len(pods.Items) != 0 { + logf.Log.Info("CheckForTestPods", + "Pods", pods.Items) + foundPods = true + } + return foundPods, err +} diff --git a/test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go index 92b4061a9..36a97afa9 100644 --- a/test/e2e/uninstall/uninstall_test.go +++ b/test/e2e/uninstall/uninstall_test.go @@ -93,7 +93,7 @@ func teardownMayastor() { pvcsDeleted, pvcsFound = common.DeleteAllVolumeResources() } - common.DeletePools() + common.DeleteAllPools() logf.Log.Info("Cleanup done, Uninstalling mayastor") // Deletes can stall indefinitely, try to mitigate this From 54fbef4ef13ef725dbaae01d8ac4fb4aa835933b Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Fri, 19 Feb 2021 16:08:05 +0100 Subject: [PATCH 25/78] ci(e2e): run e2e on shared worker Because of new beefier worker nodes for Jenkins we can run e2e tests in parallel with other jobs thus keeping more ci capacity for other jobs --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0fe189cc0..4f7125b3a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -202,7 +202,7 @@ pipeline { } } stage('run e2e') { - agent { label 'nixos-mayastor' } + agent { label 'nixos' } environment { GIT_COMMIT_SHORT = sh( // using printf to get rid of trailing newline From 386e39af9a388225cfb7458b2abb075d44329d4d Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Wed, 10 Feb 2021 15:48:34 +0000 Subject: [PATCH 26/78] chore: do profile-based yaml generation The purpose of profiles is to provide reasonable defaults for yaml templates for each of the major use cases: * develop: used by developers to run mayastor * release: used for stable releases of mayastor * test: used by mayastor e2e tests In addition to that I have: * increased debug level of moac when running in develop env * fixed wrong usage of -l parameter (indexing starts at 0) * introduced rule of thumb for cpu count and hugepage mem (1:1) Resolves: CAS-699 --- chart/templates/_helpers.tpl | 6 +- chart/templates/mayastor-daemonset.yaml | 14 +- chart/templates/moac-deployment.yaml | 4 +- chart/values.yaml | 12 +- deploy/mayastor-daemonset.yaml | 20 +-- deploy/moac-deployment.yaml | 2 +- scripts/generate-deploy-yamls.sh | 166 +++++++++++++++++++----- test/e2e/install/install_test.go | 2 +- 8 files changed, 162 insertions(+), 64 deletions(-) diff --git a/chart/templates/_helpers.tpl b/chart/templates/_helpers.tpl index 7b4753649..34c605d1e 100644 --- a/chart/templates/_helpers.tpl +++ b/chart/templates/_helpers.tpl @@ -1,7 +1,7 @@ {{/* Enforce trailing slash to mayastorImagesPrefix or leave empty */}} {{- define "mayastorImagesPrefix" -}} -{{- if .Values.mayastorImagesRepo }} -{{- printf "%s/" (.Values.mayastorImagesRepo | trimSuffix "/") }} +{{- if .Values.mayastorImagesRegistry }} +{{- printf "%s/" (.Values.mayastorImagesRegistry | trimSuffix "/") }} {{- else }} {{- "" }} {{- end }} @@ -13,6 +13,6 @@ {{- if gt $i 0 }} {{- printf "," }} {{- end }} -{{- printf "%d" (add $i 1) }} +{{- printf "%d" $i }} {{- end }} {{- end }} diff --git a/chart/templates/mayastor-daemonset.yaml b/chart/templates/mayastor-daemonset.yaml index c84957d64..0c7ba6a25 100644 --- a/chart/templates/mayastor-daemonset.yaml +++ b/chart/templates/mayastor-daemonset.yaml @@ -45,16 +45,10 @@ spec: - name: IMPORT_NEXUSES value: "false" args: - # In order to select what cores mayastor should be running on, a mask or a list can be specified. - # For example: -m 0x1 will tell mayastor to only use one core which is equivalent to -l 1 - # Using a mask of 0x3 will use the first 2 cores, which is equivalent to -l 1-2 - # - # The -l argument supports ranges to be able to do the same as passing a mask for example: - # -l 1,2,10-20 means use core 1, 2, 10 to 20 - # - # Note: - # 1. When both -m and -l are specified the -l argument is takes precedence. - # 2. Ensure that the CPU resources are updated accordingly. If you use 2 CPUs, the CPU: field should also read 2. + # The -l argument accepts cpu-list. Indexing starts at zero. + # For example -l 1,2,10-20 means use core 1, 2, 10 to 20. + # Note: Ensure that the CPU resources are updated accordingly. + # If you use 2 CPUs, the CPU: field should also read 2. - "-N$(MY_NODE_NAME)" - "-g$(MY_POD_IP)" - "-nnats" diff --git a/chart/templates/moac-deployment.yaml b/chart/templates/moac-deployment.yaml index 83b429566..d77556c0b 100644 --- a/chart/templates/moac-deployment.yaml +++ b/chart/templates/moac-deployment.yaml @@ -49,8 +49,8 @@ spec: - "--csi-address=$(CSI_ENDPOINT)" - "--namespace=$(MY_POD_NAMESPACE)" - "--port=4000" - - "--message-bus=nats" - - "-v" + - "--message-bus=nats"{{ if .Values.moacDebug }} + - "-vv"{{ end }} env: - name: CSI_ENDPOINT value: /var/lib/csi/sockets/pluginproxy/csi.sock diff --git a/chart/values.yaml b/chart/values.yaml index dcd79b1a9..318f8abfd 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -1,8 +1,12 @@ -mayastorImagesTag: develop +mayastorImagesTag: latest mayastorImagePullPolicy: Always -mayastorCpuCount: "2" +mayastorCpuCount: "1" mayastorHugePagesGiB: "1" -mayastorImagesRepo: "" +mayastorImagesRegistry: "" mayastorPools: - node: "NODE_NAME" - device: "DEVICE" \ No newline at end of file + device: "DEVICE" +# This option is intended for development yamls and motivated by the problem of +# moac that does not update status of msp resource in some cases. Feel free to +# remove when no longer needed. +moacDebug: false \ No newline at end of file diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index bc2b85e41..e7f4e2aeb 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -47,21 +47,15 @@ spec: - name: IMPORT_NEXUSES value: "false" args: - # In order to select what cores mayastor should be running on, a mask or a list can be specified. - # For example: -m 0x1 will tell mayastor to only use one core which is equivalent to -l 1 - # Using a mask of 0x3 will use the first 2 cores, which is equivalent to -l 1-2 - # - # The -l argument supports ranges to be able to do the same as passing a mask for example: - # -l 1,2,10-20 means use core 1, 2, 10 to 20 - # - # Note: - # 1. When both -m and -l are specified the -l argument is takes precedence. - # 2. Ensure that the CPU resources are updated accordingly. If you use 2 CPUs, the CPU: field should also read 2. + # The -l argument accepts cpu-list. Indexing starts at zero. + # For example -l 1,2,10-20 means use core 1, 2, 10 to 20. + # Note: Ensure that the CPU resources are updated accordingly. + # If you use 2 CPUs, the CPU: field should also read 2. - "-N$(MY_NODE_NAME)" - "-g$(MY_POD_IP)" - "-nnats" - "-y/var/local/mayastor/config.yaml" - - "-l1,2" + - "-l0" securityContext: privileged: true volumeMounts: @@ -80,11 +74,11 @@ spec: # belong to Guaranteed QoS class, hence can never get evicted in case of # pressure unless they exceed those limits. limits and requests must be the same. limits: - cpu: "2" + cpu: "1" memory: "512Mi" hugepages-2Mi: "1Gi" requests: - cpu: "2" + cpu: "1" memory: "512Mi" hugepages-2Mi: "1Gi" ports: diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index e4c096b1f..f53a5167a 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -52,7 +52,7 @@ spec: - "--namespace=$(MY_POD_NAMESPACE)" - "--port=4000" - "--message-bus=nats" - - "-v" + - "-vv" env: - name: CSI_ENDPOINT value: /var/lib/csi/sockets/pluginproxy/csi.sock diff --git a/scripts/generate-deploy-yamls.sh b/scripts/generate-deploy-yamls.sh index ea2e47b58..9e0bf30da 100755 --- a/scripts/generate-deploy-yamls.sh +++ b/scripts/generate-deploy-yamls.sh @@ -1,52 +1,158 @@ -#! /bin/sh +#!/bin/sh + +# This is a wrapper script for helm to generate yaml files for deploying +# mayastor. It provides reasonable defaults for helm values based on +# selected profile. Easy to use and minimizing risk of error. +# Keep the script as simple as possible - ad-hoc use cases can be addressed +# by running helm directly. set -e -if [ "x$1" = x ]; then -cat <] [] +SCRIPTDIR="$(realpath "$(dirname "$0")")" + +# Internal variables tunable by options +cores= +moac_debug=false +output_dir="$SCRIPTDIR/../deploy" +pools= +profile= +pull_policy= +registry= +tag= + +help() { + cat < + +Common options: + -c # of cpu cores for mayastor overriding the profile's default. + -h/--help Display help message and exit. + -o Directory to store the generated yaml files (default $output_dir) + -p Node name and associated pool device (the option may repeat). + -r Docker image registry of mayastor images (default none). + -t Tag of mayastor images overriding the profile's default. + +Profiles: + develop: Used by developers of mayastor. + release: Recommended for stable releases deployed by users. + test: Used by mayastor e2e tests. EOF - exit 1 -fi +} -SCRIPTDIR="$(realpath "$(dirname "$0")")" +# Parse common options +while [ "$#" -gt 0 ]; do + case "$1" in + -c) + shift + cores=$1 + ;; + -h|--help) + help + exit 0 + ;; + -o) + shift + output_dir=$1 + ;; + -p) + shift + pools="$pools $1" + ;; + -r) + shift + registry=$1 + ;; + -t) + shift + tag=$1 + ;; + -*) + echo "Unknown option: $1" + help + exit 1 + ;; + *) + profile=$1 + shift + break + ;; + esac + shift +done -if [ "$1" = "-t" ]; then - TARGET_DIR="$2" - shift 2 -else - TARGET_DIR="$SCRIPTDIR/../deploy" -fi -if [ ! -d "$TARGET_DIR" ]; then - mkdir -p "$TARGET_DIR" +# The space after profile name is reserved for profile specific options which +# we don't have yet. +if [ "$#" -gt 0 ]; then + help + exit 1 fi -if [ "x$2" = x ]; then - mayastor_images_repo="NONE" +# In most of the cases the tag will be a specific version that does not change +# so save dockerhub bandwidth and don't always pull the image. +if [ -n "$tag" ]; then + pull_policy=IfNotPresent else - mayastor_images_repo="$2" + pull_policy=Always fi +# Set profile defaults +case "$profile" in + "develop") + [ -z "$cores" ] && cores=1 + [ -z "$tag" ] && tag=develop + moac_debug=true + ;; + "release") + [ -z "$cores" ] && cores=1 + [ -z "$tag" ] && tag=latest + ;; + "test") + [ -z "$cores" ] && cores=2 + [ -z "$tag" ] && tag=ci + moac_debug=true + ;; + *) + echo "Missing or invalid profile name. Type \"$0 --help\"" + exit 1 + ;; +esac + set -u -if ! which helm > /dev/null 2>&1; then - echo "Install helm to path >v3.4.1" - echo "https://github.com/helm/helm/releases/tag/v3.4.1" - exit 1 +if ! which helm >/dev/null 2>&1; then + echo "Install helm (>v3.4.1) to PATH" + exit 1 fi -tmpd=$(mktemp -d /tmp/generate-deploy.sh.XXXXXXXX) +if [ ! -d "$output_dir" ]; then + mkdir -p "$output_dir" +fi + +tmpd=$(mktemp -d /tmp/generate-deploy-yamls.sh.XXXXXXXX) # shellcheck disable=SC2064 trap "rm -fr '$tmpd'" HUP QUIT EXIT TERM INT -template_params="mayastorImagesTag=$1" -if [ "$mayastor_images_repo" != "NONE" ]; then - template_params="$template_params,mayastorImagesRepo=$mayastor_images_repo" +# A rule of thumb: # of cpu cores equals to Gigs of hugepage memory +template_params="mayastorImagesTag=$tag" +template_params="$template_params,mayastorImagePullPolicy=$pull_policy" +template_params="$template_params,mayastorCpuCount=$cores" +template_params="$template_params,mayastorHugePagesGiB=$cores" +template_params="$template_params,moacDebug=$moac_debug" +if [ -n "$registry" ]; then + template_params="$template_params,mayastorImagesRegistry=$registry" +fi +if [ -n "$pools" ]; then + i=0 + for pool in $pools; do + node=$(echo "$pool" | sed 's/,.*//') + device=$(echo "$pool" | sed 's/.*,//') + template_params="$template_params,mayastorPools[$i].node=$node" + template_params="$template_params,mayastorPools[$i].device=$device" + i=$((i + 1)) + done fi + helm template --set "$template_params" mayastor "$SCRIPTDIR/../chart" --output-dir="$tmpd" --namespace mayastor -mv "$tmpd"/mayastor/templates/*.yaml "$TARGET_DIR" +mv "$tmpd"/mayastor/templates/*.yaml "$output_dir/" diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index 7fd6cdb1f..7ae8f5bef 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -137,7 +137,7 @@ func getTemplateYamlDir() string { } func generateYamls(imageTag string, registryAddress string) { - bashcmd := fmt.Sprintf("../../../scripts/generate-deploy-yamls.sh -t ../../../test-yamls %s %s", imageTag, registryAddress) + bashcmd := fmt.Sprintf("../../../scripts/generate-deploy-yamls.sh -o ../../../test-yamls -t %s -r %s test", imageTag, registryAddress) cmd := exec.Command("bash", "-c", bashcmd) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) From 48f846fe1b7459950065df4233829f6822100084 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 19 Feb 2021 12:19:02 +0000 Subject: [PATCH 27/78] chore(operators): purge rust operator from repo Remove the rust node operator from the tree as it will be rewritten in go and might not even live in this repo. --- Cargo.lock | 485 +---------------------- Cargo.toml | 1 - control-plane/deployer/src/infra/mod.rs | 118 +----- control-plane/deployer/src/lib.rs | 9 +- control-plane/operators/Cargo.toml | 37 -- control-plane/operators/node/src/main.rs | 281 ------------- control-plane/rest/Cargo.toml | 2 +- nix/pkgs/control-plane/cargo-project.nix | 4 +- nix/pkgs/control-plane/default.nix | 9 - nix/pkgs/images/default.nix | 19 - nix/pkgs/mayastor/default.nix | 2 +- 11 files changed, 17 insertions(+), 950 deletions(-) delete mode 100644 control-plane/operators/Cargo.toml delete mode 100644 control-plane/operators/node/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 7b727328b..d821aed64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,16 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "actix-codec" version = "0.3.0" @@ -348,15 +338,6 @@ dependencies = [ "url", ] -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" -dependencies = [ - "const-random", -] - [[package]] name = "aho-corasick" version = "0.7.15" @@ -390,12 +371,6 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" -[[package]] -name = "array_tool" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f8cb5d814eb646a863c4f24978cff2880c4be96ad8cde2c0f0678732902e271" - [[package]] name = "arrayref" version = "0.3.6" @@ -425,19 +400,6 @@ dependencies = [ "futures-core", ] -[[package]] -name = "async-compression" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72c1f1154e234325b50864a349b9c8e56939e266a4c307c0f159812df2f9537" -dependencies = [ - "bytes 0.5.6", - "flate2", - "futures-core", - "memchr", - "pin-project-lite 0.2.4", -] - [[package]] name = "async-executor" version = "1.4.0" @@ -1012,28 +974,6 @@ dependencies = [ "cache-padded", ] -[[package]] -name = "const-random" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f590d95d011aa80b063ffe3253422ed5aa462af4e9867d43ce8337562bac77c4" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615f6e27d000a2bffbc7f2f6a8669179378fa27ee4d0a509e985dfc0a7defb40" -dependencies = [ - "getrandom 0.2.2", - "lazy_static", - "proc-macro-hack", - "tiny-keccak", -] - [[package]] name = "const_fn" version = "0.4.5" @@ -1069,17 +1009,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys 0.7.0", - "libc", -] - -[[package]] -name = "core-foundation" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" -dependencies = [ - "core-foundation-sys 0.8.2", + "core-foundation-sys", "libc", ] @@ -1089,12 +1019,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" -[[package]] -name = "core-foundation-sys" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" - [[package]] name = "cpuid-bool" version = "0.1.2" @@ -1208,12 +1132,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "csi" version = "0.2.0" @@ -1369,17 +1287,6 @@ dependencies = [ "syn 1.0.60", ] -[[package]] -name = "dashmap" -version = "3.11.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f260e2fc850179ef410018660006951c1b55b79e8087e87111a2c388994b9b5" -dependencies = [ - "ahash", - "cfg-if 0.1.10", - "num_cpus", -] - [[package]] name = "dashmap" version = "4.0.2" @@ -1412,17 +1319,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2 1.0.24", - "quote 1.0.9", - "syn 1.0.60", -] - [[package]] name = "derive_builder" version = "0.7.2" @@ -1738,21 +1634,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.0.1" @@ -2163,19 +2044,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "hyper-tls" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" -dependencies = [ - "bytes 0.5.6", - "hyper", - "native-tls", - "tokio", - "tokio-tls", -] - [[package]] name = "hyper-unix-connector" version = "0.1.5" @@ -2266,15 +2134,9 @@ dependencies = [ "socket2", "widestring", "winapi 0.3.9", - "winreg 0.6.2", + "winreg", ] -[[package]] -name = "ipnet" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" - [[package]] name = "ipnetwork" version = "0.17.0" @@ -2323,19 +2185,6 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" -[[package]] -name = "jsonpath_lib" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61352ec23883402b7d30b3313c16cbabefb8907361c4eb669d990cbb87ceee5a" -dependencies = [ - "array_tool", - "env_logger 0.7.1", - "log", - "serde", - "serde_json", -] - [[package]] name = "jsonrpc" version = "0.1.0" @@ -2351,23 +2200,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "k8s-openapi" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57f95fd36c08ce592e67400a0f1a66f432196997d5a7e9a97e8743c33d8a9312" -dependencies = [ - "base64 0.12.3", - "bytes 0.5.6", - "chrono", - "http 0.2.3", - "percent-encoding 2.1.0", - "serde", - "serde-value", - "serde_json", - "url", -] - [[package]] name = "kernel32-sys" version = "0.2.2" @@ -2378,68 +2210,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "kube" -version = "0.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3787d41d01ff816f93f1a73d20252f8a65887682206cfbf2d0f7d2d2b1b73fa" -dependencies = [ - "Inflector", - "base64 0.12.3", - "bytes 0.5.6", - "chrono", - "dirs", - "either", - "futures", - "futures-util", - "http 0.2.3", - "jsonpath_lib", - "k8s-openapi", - "log", - "openssl", - "pem", - "reqwest", - "serde", - "serde_json", - "serde_yaml", - "static_assertions", - "thiserror", - "time 0.2.25", - "tokio", - "url", -] - -[[package]] -name = "kube-derive" -version = "0.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd71bf282e5551ac0852afcf25352b7fb8dd9a66eed7b6e66a6ebbf6b5b2f475" -dependencies = [ - "Inflector", - "proc-macro2 1.0.24", - "quote 1.0.9", - "serde_json", - "syn 1.0.60", -] - -[[package]] -name = "kube-runtime" -version = "0.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9abc7b19889353e501e6bc7b2b9d7062b2e008ec256f11e9428ed8e56d046d2f" -dependencies = [ - "dashmap 3.11.10", - "derivative", - "futures", - "k8s-openapi", - "kube", - "pin-project 0.4.27", - "serde", - "smallvec", - "snafu", - "tokio", -] - [[package]] name = "language-tags" version = "0.2.2" @@ -2677,16 +2447,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.4.3" @@ -2767,24 +2527,6 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" -[[package]] -name = "native-tls" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework 2.0.0", - "security-framework-sys 2.0.0", - "tempfile", -] - [[package]] name = "nats" version = "0.8.6" @@ -2966,39 +2708,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" -dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "foreign-types", - "lazy_static", - "libc", - "openssl-sys", -] - [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -[[package]] -name = "openssl-sys" -version = "0.9.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" -dependencies = [ - "autocfg 1.0.1", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.11.2" @@ -3006,7 +2721,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3434e2a9d2aec539d91f4251bf9047cd53b4d3f386f9d336f4c8076c72a5256" dependencies = [ "async-trait", - "dashmap 4.0.2", + "dashmap", "fnv", "futures", "js-sys", @@ -3042,33 +2757,6 @@ dependencies = [ "opentelemetry", ] -[[package]] -name = "operators" -version = "0.1.0" -dependencies = [ - "actix-web", - "anyhow", - "either", - "humantime 2.1.0", - "k8s-openapi", - "kube", - "kube-derive", - "kube-runtime", - "mbus_api", - "opentelemetry", - "opentelemetry-jaeger", - "rest", - "rustls", - "serde", - "serde_json", - "structopt", - "strum", - "tokio", - "tracing", - "tracing-futures", - "tracing-subscriber", -] - [[package]] name = "ordered-float" version = "1.1.1" @@ -3078,15 +2766,6 @@ dependencies = [ "num-traits 0.2.14", ] -[[package]] -name = "ordered-float" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "766f840da25490628d8e63e529cd21c014f6600c6b8517add12a6fa6167a6218" -dependencies = [ - "num-traits 0.2.14", -] - [[package]] name = "paperclip" version = "0.5.0" @@ -3213,17 +2892,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -[[package]] -name = "pem" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" -dependencies = [ - "base64 0.13.0", - "once_cell", - "regex", -] - [[package]] name = "percent-encoding" version = "1.0.1" @@ -3752,43 +3420,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "reqwest" -version = "0.10.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" -dependencies = [ - "async-compression", - "base64 0.13.0", - "bytes 0.5.6", - "encoding_rs", - "futures-core", - "futures-util", - "http 0.2.3", - "http-body 0.3.1", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "lazy_static", - "log", - "mime", - "mime_guess", - "native-tls", - "percent-encoding 2.1.0", - "pin-project-lite 0.2.4", - "serde", - "serde_json", - "serde_urlencoded 0.7.0", - "tokio", - "tokio-tls", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg 0.7.0", -] - [[package]] name = "resolv-conf" version = "0.7.0" @@ -3928,7 +3559,7 @@ dependencies = [ "openssl-probe", "rustls", "schannel", - "security-framework 1.0.0", + "security-framework", ] [[package]] @@ -3976,23 +3607,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" dependencies = [ "bitflags", - "core-foundation 0.7.0", - "core-foundation-sys 0.7.0", - "libc", - "security-framework-sys 1.0.0", -] - -[[package]] -name = "security-framework" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1759c2e3c8580017a484a7ac56d3abc5a6c1feadf88db2f3633f12ae4268c69" -dependencies = [ - "bitflags", - "core-foundation 0.9.1", - "core-foundation-sys 0.8.2", + "core-foundation", + "core-foundation-sys", "libc", - "security-framework-sys 2.0.0", + "security-framework-sys", ] [[package]] @@ -4001,17 +3619,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" dependencies = [ - "core-foundation-sys 0.7.0", - "libc", -] - -[[package]] -name = "security-framework-sys" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f99b9d5e26d2a71633cc4f2ebae7cc9f874044e0c351a27e17892d76dce5678b" -dependencies = [ - "core-foundation-sys 0.8.2", + "core-foundation-sys", "libc", ] @@ -4057,16 +3665,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-value" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" -dependencies = [ - "ordered-float 2.1.1", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.123" @@ -4279,8 +3877,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" dependencies = [ "doc-comment", - "futures-core", - "pin-project 0.4.27", "snafu-derive", ] @@ -4335,12 +3931,6 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "stdweb" version = "0.4.20" @@ -4613,7 +4203,7 @@ dependencies = [ "byteorder", "integer-encoding", "log", - "ordered-float 1.1.1", + "ordered-float", "threadpool", ] @@ -4666,15 +4256,6 @@ dependencies = [ "syn 1.0.60", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinytemplate" version = "1.2.0" @@ -4747,16 +4328,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-util" version = "0.2.0" @@ -5172,15 +4743,6 @@ dependencies = [ "libudev-sys", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -5266,12 +4828,6 @@ dependencies = [ "rand 0.6.5", ] -[[package]] -name = "vcpkg" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" - [[package]] name = "vec-arena" version = "1.0.0" @@ -5331,8 +4887,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ "cfg-if 1.0.0", - "serde", - "serde_json", "wasm-bindgen-macro", ] @@ -5351,18 +4905,6 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "wasm-bindgen-macro" version = "0.2.70" @@ -5498,15 +5040,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "winreg" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "ws2_32-sys" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index 06bdc6388..4a46addfe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,6 @@ members = [ "control-plane/mbus-api", "composer", "control-plane/rest", - "control-plane/operators", "control-plane/macros", "control-plane/deployer", "control-plane/tests" diff --git a/control-plane/deployer/src/infra/mod.rs b/control-plane/deployer/src/infra/mod.rs index 1d7a3f85b..385bfb10f 100644 --- a/control-plane/deployer/src/infra/mod.rs +++ b/control-plane/deployer/src/infra/mod.rs @@ -115,110 +115,6 @@ macro_rules! impl_ctrlp_agents { }; } -#[macro_export] -macro_rules! impl_ctrlp_operators { - ($($name:ident,)+) => { - /// List of Control Plane Operators to deploy - #[derive(Debug, Clone)] - pub struct ControlPlaneOperators(Vec); - - /// All the Control Plane Operators - #[derive(Debug, Clone, StructOpt, ToString, EnumVariantNames)] - #[structopt(about = "Control Plane Operators")] - pub enum ControlPlaneOperator { - Empty(Empty), - $( - $name(paste!{[<$name Op>]}), - )+ - } - - paste! { - impl From<&ControlPlaneOperator> for Component { - fn from(ctrlp_svc: &ControlPlaneOperator) -> Self { - match ctrlp_svc { - ControlPlaneOperator::Empty(obj) => Component::Empty(obj.clone()), - $(ControlPlaneOperator::$name(obj) => Component::[<$name Op>](obj.clone()),)+ - } - } - } - } - - paste! { - impl FromStr for ControlPlaneOperator { - type Err = String; - - fn from_str(source: &str) -> Result { - Ok(match source.trim().to_ascii_lowercase().as_str() { - "" => Self::Empty(Default::default()), - $(stringify!([<$name:lower>]) => Self::$name(]}>::default()),)+ - _ => return Err(format!( - "\"{}\" is an invalid type of operator! Available types: {:?}", - source, - Self::VARIANTS - )), - }) - } - } - } - - $(#[async_trait] - impl ComponentAction for paste!{[<$name Op>]} { - fn configure(&self, options: &StartOptions, cfg: Builder) -> Result { - let name = format!("{}-op", stringify!($name).to_ascii_lowercase()); - if options.build { - let status = std::process::Command::new("cargo") - .args(&["build", "-p", "operators", "--bin", &name]) - .status()?; - build_error(&format!("the {} operator", name), status.code())?; - } - let rest = format!("http://rest.{}:8081", cfg.get_name()); - let host_kube_config = match &options.kube_config { - Some(config) => config.clone(), - None => { - match std::env::var("USER") { - Ok(user) => format!("/home/{}/.kube/config", user), - Err(_) => "/root/.kube/config".to_string(), - } - } - }; - let kube_config = match options.base_image { - Some(_) => "/root/.kube/config", - None => "/.kube/config", - }; - Ok(if options.jaeger { - let jaeger_config = format!("jaeger.{}:6831", cfg.get_name()); - cfg.add_container_spec( - ContainerSpec::from_binary( - &name, - Binary::from_dbg(&name) - .with_args(vec!["-r", &rest]) - .with_args(vec!["-j", &jaeger_config]), - ) - .with_bind(&host_kube_config, kube_config), - ) - } else { - cfg.add_container_spec( - ContainerSpec::from_binary( - &name, - Binary::from_dbg(&name).with_args(vec!["-r", &rest]) - ) - .with_bind(&host_kube_config, kube_config), - ) - }) - } - async fn start(&self, _options: &StartOptions, cfg: &ComposeTest) -> Result<(), Error> { - // todo: wait for the rest server to be up - let name = format!("{}-op", stringify!($name).to_ascii_lowercase()); - cfg.start(&name).await?; - Ok(()) - } - })+ - }; - ($($name:ident), +) => { - impl_ctrlp_operators!($($name,)+); - }; -} - pub fn build_error(name: &str, status: Option) -> Result<(), Error> { let make_error = |extra: &str| { let error = format!("Failed to build {}: {}", name, extra); @@ -321,22 +217,16 @@ macro_rules! impl_component { impl Components { pub fn push_generic_components(&mut self, name: &str, component: Component) { - if !ControlPlaneAgent::VARIANTS.iter().any(|&s| s == name) && - !ControlPlaneOperator::VARIANTS.iter().any(|&s| &format!("{}Op", s) == name) { + if !ControlPlaneAgent::VARIANTS.iter().any(|&s| s == name) { self.0.push(component); } } pub fn new(options: StartOptions) -> Components { let agents = options.agents.clone(); - let operators = options.operators.clone().unwrap_or_default(); - let mut components = agents + let components = agents .iter() .map(Component::from) .collect::>(); - components.extend(operators - .iter() - .map(Component::from) - .collect::>()); let mut components = Components(components, options.clone()); $(components.push_generic_components(stringify!($name), $name::default().into());)+ @@ -442,11 +332,7 @@ impl_component! { Volume, 4, JsonGrpc, 4, Mayastor, 5, - NodeOp, 6, } // Message Bus Control Plane Agents impl_ctrlp_agents!(Node, Pool, Volume, JsonGrpc); - -// Kubernetes Mayastor Low-level Operators -impl_ctrlp_operators!(Node); diff --git a/control-plane/deployer/src/lib.rs b/control-plane/deployer/src/lib.rs index dd524f392..50586aa45 100644 --- a/control-plane/deployer/src/lib.rs +++ b/control-plane/deployer/src/lib.rs @@ -65,11 +65,6 @@ pub struct StartOptions { )] pub agents: Vec, - /// Use the following Control Plane Operators - /// Specify one operator at a time or as a list - #[structopt(short, long, value_delimiter = ",")] - pub operators: Option>, - /// Kubernetes Config file if using operators /// [default: "~/.kube/config"] #[structopt(short, long)] @@ -127,8 +122,8 @@ impl StartOptions { self.build = build; self } - pub fn with_mayastors(mut self, mayastors: i32) -> Self { - self.mayastors = mayastors as u32; + pub fn with_mayastors(mut self, mayastors: u32) -> Self { + self.mayastors = mayastors; self } pub fn with_show_info(mut self, show_info: bool) -> Self { diff --git a/control-plane/operators/Cargo.toml b/control-plane/operators/Cargo.toml deleted file mode 100644 index 297e9dc71..000000000 --- a/control-plane/operators/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "operators" -version = "0.1.0" -authors = ["Tiago Castro "] -edition = "2018" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[[bin]] -name = "node-op" -path = "node/src/main.rs" - -[dependencies] -rustls = "0.18" -actix-web = { version = "3.2.0", features = ["rustls"] } -serde_json = "1.0" -structopt = "0.3.15" -tokio = { version = "0.2", features = ["full"] } -anyhow = "1.0.32" -mbus_api = { path = "../mbus-api" } -strum = "0.19" -humantime = "2.0.1" -kube = "0.43.0" -kube-runtime = "0.43.0" -kube-derive = "0.43.0" -k8s-openapi = { version = "0.9.0", default-features = false, features = ["v1_18"] } -either = "1.6.0" -tracing = "0.1" -tracing-subscriber = "0.2" -tracing-futures = "0.2.4" -opentelemetry-jaeger = { version = "0.10", features = ["tokio"] } -opentelemetry = "0.11.2" -rest = { path = "../rest" } - -[dependencies.serde] -features = ["derive"] -version = "1.0" \ No newline at end of file diff --git a/control-plane/operators/node/src/main.rs b/control-plane/operators/node/src/main.rs deleted file mode 100644 index 535635268..000000000 --- a/control-plane/operators/node/src/main.rs +++ /dev/null @@ -1,281 +0,0 @@ -use kube::api::{Api, DeleteParams, ListParams, Meta, PostParams}; -use kube_derive::CustomResource; -use rest_client::versions::v0::*; -use serde::{Deserialize, Serialize}; -use std::convert::TryFrom; -use structopt::StructOpt; -use tracing::{debug, error, info, instrument}; - -#[derive(Debug, StructOpt)] -struct CliArgs { - /// The Rest Server URL to connect to - #[structopt(long, short, default_value = "https://localhost:8080")] - rest: String, - - /// Polling period - #[structopt(long, short, default_value = "30s")] - period: humantime::Duration, - - /// Trace rest requests to the Jaeger endpoint agent - #[structopt(long, short)] - jaeger: Option, -} - -#[derive(CustomResource, Deserialize, Serialize, Clone, Debug)] -#[kube( - group = "openebs.io", - version = "v1alpha1", - kind = "MayastorNode", - namespaced -)] -#[kube(apiextensions = "v1beta1")] -#[kube(status = "String")] -#[serde(rename_all = "camelCase")] -pub struct MayastorNodeSpec { - pub grpc_endpoint: String, -} - -impl TryFrom<&MayastorNode> for Node { - type Error = strum::ParseError; - fn try_from(kube_node: &MayastorNode) -> Result { - Ok(Node { - id: NodeId::from(kube_node.name()), - grpc_endpoint: kube_node.spec.grpc_endpoint.clone(), - state: kube_node - .status - .as_ref() - .unwrap_or(&"".to_string()) - .parse()?, - }) - } -} - -use opentelemetry::{ - global, - sdk::{propagation::TraceContextPropagator, trace::Tracer}, -}; -use opentelemetry_jaeger::Uninstall; - -fn init_tracing() -> Option<(Tracer, Uninstall)> { - if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("info").init(); - } - if let Some(agent) = CliArgs::from_args().jaeger { - tracing::info!("Starting jaeger trace pipeline at {}...", agent); - // Start a new jaeger trace pipeline - global::set_text_map_propagator(TraceContextPropagator::new()); - let (_tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() - .with_agent_endpoint(agent) - .with_service_name("node-operator") - .install() - .expect("Jaeger pipeline install error"); - Some((_tracer, _uninstall)) - } else { - None - } -} - -#[actix_web::main] -async fn main() -> anyhow::Result<()> { - // need to keep the jaeger pipeline tracer alive, if enabled - let _tracer = init_tracing(); - - let polling_period = CliArgs::from_args().period.into(); - - let rest_cli = rest_client::ActixRestClient::new( - &CliArgs::from_args().rest, - CliArgs::from_args().jaeger.is_some(), - )?; - - let kube_client = kube::Client::try_default().await?; - let namespace = "mayastor"; - - // Validate that our "CRD" is up to date? - - // Manage the MayastorNode CR - let nodes_api: Api = - Api::namespaced(kube_client.clone(), namespace); - - loop { - // Poll for kubernetes nodes and rest nodes - // Reconcile from rest into kubernetes - if let Err(error) = polling_work(&nodes_api, rest_cli.v0()).await { - error!("Error while polling: {}", error); - } - - // Sleep till the next poll - tokio::time::delay_for(polling_period).await; - } -} - -/// This isn't quite a reconciler as no action is taken from k8s MayastorNodes -/// (msn), in fact, they should not be updated by the user. -/// We simply forward/translate control plane nodes into k8s nodes. -#[instrument(skip(nodes_api, rest_cli))] -async fn polling_work( - nodes_api: &Api, - rest_cli: impl RestClient, -) -> anyhow::Result<()> { - // Fetch all nodes as seen by the control plane via REST - let rest_nodes = rest_cli.get_nodes().await?; - debug!("Retrieved rest nodes: {:?}", rest_nodes); - - // Fetch all node CRD's from k8s - let kube_nodes = nodes_get_all(&nodes_api).await?; - debug!("Retrieved kube nodes: {:?}", kube_nodes); - - // control plane nodes which do not exist in k8s - let new_nodes = rest_nodes - .iter() - .filter(|node| { - !kube_nodes - .iter() - .any(|kube_node| kube_node.name() == node.id.to_string()) - }) - .collect::>(); - - // k8s nodes which no longer exist in the control plane - let delete_nodes = kube_nodes - .iter() - .filter(|kube_node| { - !rest_nodes - .iter() - .any(|node| kube_node.name() == node.id.to_string()) - }) - .collect::>(); - - // k8s nodes are out of date so need an update - let update_nodes = rest_nodes - .iter() - .filter(|&node| { - kube_nodes.iter().any(|kube_node| { - let node_from_kube = Node::try_from(kube_node); - if let Ok(kube_node) = node_from_kube.as_ref() { - kube_node != node - } else { - error!( - "Node {:#?} is not formatted properly.", - node_from_kube - ); - true - } - }) - }) - .collect::>(); - - if !new_nodes.is_empty() { - info!("Creating nodes: {:?}", new_nodes); - - for node in new_nodes { - if let Err(error) = node_create(&nodes_api, &node).await { - error!( - "Failed to create kube_node: {}, error={}", - node.id, error - ); - } - } - } - - if !update_nodes.is_empty() { - info!("Updating nodes: {:?}", update_nodes); - - for node in update_nodes { - if let Err(error) = node_update(&nodes_api, &node).await { - error!( - "Failed to update kube_node: {}, error={:?}", - node.id, error - ); - } - } - } - - if !delete_nodes.is_empty() { - info!("Deleting nodes: {:?}", delete_nodes); - - for node in delete_nodes { - if let Err(error) = node_delete(&nodes_api, &Meta::name(node)).await - { - error!( - "Failed to delete kube_node: {}, error={:?}", - Meta::name(node), - error - ); - } - } - } - - Ok(()) -} - -#[instrument(skip(nodes_api))] -async fn nodes_get_all( - nodes_api: &Api, -) -> anyhow::Result> { - let list_params = ListParams::default(); - let kube_nodes = nodes_api.list(&list_params).await?.items; - Ok(kube_nodes) -} - -#[instrument(skip(nodes_api))] -async fn node_create( - nodes_api: &Api, - node: &Node, -) -> anyhow::Result<()> { - let kube_node = MayastorNode::new( - node.id.as_str(), - MayastorNodeSpec { - grpc_endpoint: node.grpc_endpoint.clone(), - }, - ); - - let post_params = PostParams::default(); - let mut kube_node = nodes_api.create(&post_params, &kube_node).await?; - - let status = Some(node.state.to_string()); - kube_node.status = status.clone(); - let kube_node = nodes_api - .replace_status( - &Meta::name(&kube_node), - &post_params, - serde_json::to_vec(&kube_node)?, - ) - .await?; - assert_eq!(kube_node.status, status); - - Ok(()) -} - -#[instrument(skip(nodes_api))] -async fn node_update( - nodes_api: &Api, - node: &Node, -) -> anyhow::Result<()> { - let post_params = PostParams::default(); - let status = Some(node.state.to_string()); - - let mut kube_node = nodes_api.get(node.id.as_str()).await?; - kube_node.status = status.clone(); - - let kube_node = nodes_api - .replace_status( - &Meta::name(&kube_node), - &post_params, - serde_json::to_vec(&kube_node)?, - ) - .await?; - assert_eq!(kube_node.status, status); - - Ok(()) -} - -#[instrument(skip(nodes_api))] -async fn node_delete( - nodes_api: &Api, - name: &str, -) -> anyhow::Result<()> { - let delete_params = DeleteParams::default(); - let _ = nodes_api.delete(name, &delete_params).await?; - Ok(()) -} diff --git a/control-plane/rest/Cargo.toml b/control-plane/rest/Cargo.toml index fe132c33b..147eb6c84 100644 --- a/control-plane/rest/Cargo.toml +++ b/control-plane/rest/Cargo.toml @@ -20,7 +20,7 @@ actix-web = { version = "3.2.0", features = ["rustls"] } actix-service = "1.0.6" mbus_api = { path = "../mbus-api" } async-trait = "0.1.41" -serde_json = "1.0" +serde_json = { version = "1.0", features = ["preserve_order"] } structopt = "0.3.15" futures = "0.3.6" tracing = "0.1" diff --git a/nix/pkgs/control-plane/cargo-project.nix b/nix/pkgs/control-plane/cargo-project.nix index 2cef59bc7..ad862767b 100644 --- a/nix/pkgs/control-plane/cargo-project.nix +++ b/nix/pkgs/control-plane/cargo-project.nix @@ -29,10 +29,10 @@ let buildProps = rec { name = "control-plane"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1iqmrl8qm8nw1hg219kdyxd1zk9c58p1avymjis3snxnlagafx37"; + cargoSha256 = "1r2g0ni8cxkphazbbkvzwdlcvkgk076llp18wqnkirj5d3xhbx4x"; inherit version; src = whitelistSource ../../../. (pkgs.callPackage ../mayastor { }).src_list; - cargoBuildFlags = [ "-p mbus_api" "-p agents" "-p rest" "-p operators" ]; + cargoBuildFlags = [ "-p mbus_api" "-p agents" "-p rest" ]; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; PROTOC = "${protobuf}/bin/protoc"; diff --git a/nix/pkgs/control-plane/default.nix b/nix/pkgs/control-plane/default.nix index 412ec78c7..6550b8633 100644 --- a/nix/pkgs/control-plane/default.nix +++ b/nix/pkgs/control-plane/default.nix @@ -15,21 +15,12 @@ let cp $src/bin/${name} $out/bin/${name}-agent ''; }; - operator = { name, src }: stdenv.mkDerivation { - inherit src; - name = "${name}-${version}"; - installPhase = '' - mkdir -p $out/bin - cp $src/bin/${name}-op $out/bin/${name}-operator - ''; - }; components = { src }: { kiiss = agent { inherit src; name = "kiiss"; }; node = agent { inherit src; name = "node"; }; pool = agent { inherit src; name = "pool"; }; volume = agent { inherit src; name = "volume"; }; rest = agent { inherit src; name = "rest"; }; - node-op = operator { inherit src; name = "node"; }; }; in { diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index 98db721eb..198ca7453 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -64,13 +64,6 @@ let mkdir -p var/tmp ''; }; - operatorImageProps = { - tag = version; - created = "now"; - config = { - Env = [ "PATH=${env}" ]; - }; - }; agentImageProps = { tag = version; created = "now"; @@ -89,15 +82,6 @@ let inherit build name; binary = "${name}-agent"; }; - build-operator-image = { build, name, config ? { } }: build-control-plane-image { - inherit build; - name = "${name}-op"; - binary = "${name}-operator"; - }; - - operator-images = { build }: { - node = build-operator-image { inherit build; name = "node"; }; - }; agent-images = { build }: { kiiss = build-agent-image { inherit build; name = "kiiss"; }; node = build-agent-image { inherit build; name = "node"; }; @@ -175,7 +159,4 @@ in agents = agent-images { build = "release"; }; agents-dev = agent-images { build = "debug"; }; - - operators = operator-images { build = "release"; }; - operators-dev = operator-images { build = "debug"; }; } diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 0f91c8ebb..7f60e8507 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -56,7 +56,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1s64shmbrihxqaz26iv69j9izjnhn2mprmzivpdn4s7262i60c3y"; + cargoSha256 = "1m0n418hp4h3j32j632l7pf2kl4pwzbzssx7h0wh5m90wsh41cy4"; inherit version; src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; From c20e45d5fe1c8ae899fc22125aacce2240c6182d Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 19 Feb 2021 15:49:02 +0000 Subject: [PATCH 28/78] refactor(rest_cli): improve rest client errors Anyhow requires errors to be Send which is not the case with the rest return Http types and errors so use snafu instead. Enhanced errors to include failure to send errors, http header etc.. --- control-plane/rest/src/lib.rs | 212 ++++++++++++++++++++------ control-plane/rest/src/versions/v0.rs | 132 +++++++--------- 2 files changed, 222 insertions(+), 122 deletions(-) diff --git a/control-plane/rest/src/lib.rs b/control-plane/rest/src/lib.rs index 697241ed9..a7b868274 100644 --- a/control-plane/rest/src/lib.rs +++ b/control-plane/rest/src/lib.rs @@ -17,13 +17,21 @@ pub mod versions; use actix_web::{ body::Body, - client::{Client, ClientResponse, PayloadError}, + client::{ + Client, + ClientBuilder, + ClientResponse, + PayloadError, + SendRequestError, + }, + dev::ResponseHead, web::Bytes, }; use actix_web_opentelemetry::ClientExt; use futures::Stream; use paperclip::actix::Apiv2Schema; use serde::{Deserialize, Serialize}; +use snafu::{ResultExt, Snafu}; use std::{io::BufReader, string::ToString}; /// Actix Rest Client @@ -38,11 +46,21 @@ impl ActixRestClient { /// creates a new client which uses the specified `url` /// uses the rustls connector if the url has the https scheme pub fn new(url: &str, trace: bool) -> anyhow::Result { + Self::new_timeout(url, trace, std::time::Duration::from_secs(5)) + } + /// creates a new client which uses the specified `url` + /// uses the rustls connector if the url has the https scheme + pub fn new_timeout( + url: &str, + trace: bool, + timeout: std::time::Duration, + ) -> anyhow::Result { let url: url::Url = url.parse()?; + let builder = Client::builder().timeout(timeout); match url.scheme() { - "https" => Self::new_https(&url, trace), - "http" => Ok(Self::new_http(&url, trace)), + "https" => Self::new_https(builder, &url, trace), + "http" => Ok(Self::new_http(builder, &url, trace)), invalid => { let msg = format!("Invalid url scheme: {}", invalid); Err(anyhow::Error::msg(msg)) @@ -50,7 +68,11 @@ impl ActixRestClient { } } /// creates a new secure client - fn new_https(url: &url::Url, trace: bool) -> anyhow::Result { + fn new_https( + client: ClientBuilder, + url: &url::Url, + trace: bool, + ) -> anyhow::Result { let cert_file = &mut BufReader::new( &std::include_bytes!("../certs/rsa/ca.cert")[..], ); @@ -62,8 +84,7 @@ impl ActixRestClient { .map_err(|_| anyhow::anyhow!("Add pem file to the root store!"))?; let connector = actix_web::client::Connector::new() .rustls(std::sync::Arc::new(config)); - let rest_client = - Client::builder().connector(connector.finish()).finish(); + let rest_client = client.connector(connector.finish()).finish(); Ok(Self { client: rest_client, @@ -72,14 +93,14 @@ impl ActixRestClient { }) } /// creates a new client - fn new_http(url: &url::Url, trace: bool) -> Self { + fn new_http(client: ClientBuilder, url: &url::Url, trace: bool) -> Self { Self { - client: Client::new(), + client: client.finish(), url: url.to_string().trim_end_matches('/').into(), trace, } } - async fn get_vec(&self, urn: String) -> anyhow::Result> + async fn get_vec(&self, urn: String) -> ClientResult> where for<'de> R: Deserialize<'de>, { @@ -91,12 +112,8 @@ impl ActixRestClient { self.client.get(uri.clone()).send().await }; - let rest_response = result.map_err(|error| { - anyhow::anyhow!( - "Failed to get uri '{}' from rest, err={:?}", - uri, - error - ) + let rest_response = result.context(Send { + details: format!("Failed to get_vec uri {}", uri), })?; Self::rest_vec_result(rest_response).await @@ -105,7 +122,7 @@ impl ActixRestClient { &self, urn: String, body: B, - ) -> anyhow::Result + ) -> Result where for<'de> R: Deserialize<'de>, { @@ -126,17 +143,13 @@ impl ActixRestClient { .await }; - let rest_response = result.map_err(|error| { - anyhow::anyhow!( - "Failed to put uri '{}' from rest, err={:?}", - uri, - error - ) + let rest_response = result.context(Send { + details: format!("Failed to put uri {}", uri), })?; Self::rest_result(rest_response).await } - async fn del(&self, urn: String) -> anyhow::Result + async fn del(&self, urn: String) -> ClientResult where for<'de> R: Deserialize<'de>, { @@ -148,12 +161,8 @@ impl ActixRestClient { self.client.delete(uri.clone()).send().await }; - let rest_response = result.map_err(|error| { - anyhow::anyhow!( - "Failed to delete uri '{}' from rest, err={:?}", - uri, - error - ) + let rest_response = result.context(Send { + details: format!("Failed to delete uri {}", uri), })?; Self::rest_result(rest_response).await @@ -161,38 +170,151 @@ impl ActixRestClient { async fn rest_vec_result( mut rest_response: ClientResponse, - ) -> anyhow::Result> + ) -> ClientResult> where S: Stream> + Unpin, for<'de> R: Deserialize<'de>, { - let rest_body = rest_response.body().await?; - if rest_response.status().is_success() { - match serde_json::from_slice(&rest_body) { - Ok(result) => Ok(result), - Err(_) => Ok(vec![serde_json::from_slice::(&rest_body)?]), + let status = rest_response.status(); + let headers = rest_response.headers().clone(); + let head = || { + let mut head = ResponseHead::new(status); + head.headers = headers.clone(); + head + }; + let body = rest_response.body().await.map_err(|_| { + ClientError::InvalidPayload { + head: head(), } + })?; + if status.is_success() { + match serde_json::from_slice(&body) { + Ok(r) => Ok(r), + Err(_error) => match serde_json::from_slice(&body) { + Ok(r) => Ok(vec![r]), + Err(_error) => Err(ClientError::InvalidBody { + head: head(), + body, + }), + }, + } + } else if body.is_empty() { + Err(ClientError::Header { + head: head(), + }) } else { - let error: serde_json::value::Value = - serde_json::from_slice(&rest_body)?; - Err(anyhow::anyhow!(error.to_string())) + let error = serde_json::from_slice::(&body) + .map_err(|_| ClientError::InvalidBody { + head: head(), + body, + })?; + Err(ClientError::Valid { + head: head(), + error, + }) } } async fn rest_result( mut rest_response: ClientResponse, - ) -> anyhow::Result + ) -> Result where S: Stream> + Unpin, for<'de> R: Deserialize<'de>, { - let rest_body = rest_response.body().await?; - if rest_response.status().is_success() { - Ok(serde_json::from_slice::(&rest_body)?) + let status = rest_response.status(); + let headers = rest_response.headers().clone(); + let head = || { + let mut head = ResponseHead::new(status); + head.headers = headers.clone(); + head + }; + let body = rest_response.body().await.map_err(|_| { + ClientError::InvalidPayload { + head: head(), + } + })?; + if status.is_success() { + let result = serde_json::from_slice(&body).map_err(|_| { + ClientError::InvalidBody { + head: head(), + body, + } + })?; + Ok(result) + } else if body.is_empty() { + Err(ClientError::Header { + head: head(), + }) } else { - let error: serde_json::value::Value = - serde_json::from_slice(&rest_body)?; - Err(anyhow::anyhow!(error.to_string())) + let error = serde_json::from_slice::(&body) + .map_err(|_| ClientError::InvalidBody { + head: head(), + body, + })?; + Err(ClientError::Valid { + head: head(), + error, + }) + } + } +} + +/// Result of a Rest Client Operation +/// T is the Object parsed from the Json body +pub type ClientResult = Result; + +/// Rest Client Error +#[derive(Debug, Snafu)] +pub enum ClientError { + /// Failed to send message to the server + #[snafu(display("{}, reason: {}", details, source))] + Send { + /// Message + details: String, + /// Source Request Error + source: SendRequestError, + }, + /// Invalid Resource Filter + #[snafu(display("Invalid Resource Filter: {}", details))] + InvalidFilter { + /// Message + details: String, + }, + /// Invalid Payload + #[snafu(display("Invalid payload, header: {:?}", head))] + InvalidPayload { + /// http Header + head: ResponseHead, + }, + /// Invalid Body + #[snafu(display("Invalid body, header: {:?}", head))] + InvalidBody { + /// http Header + head: ResponseHead, + /// http Body + body: Bytes, + }, + /// No Body + #[snafu(display("No body, header: {:?}", head))] + Header { + /// http Header + head: ResponseHead, + }, + /// Body in JSON format + #[snafu(display("Http status: {}, error: {}", head.status, error.to_string()))] + Valid { + /// http Header + head: ResponseHead, + /// JSON error + error: serde_json::Value, + }, +} + +impl ClientError { + fn filter(message: &str) -> ClientError { + ClientError::InvalidFilter { + details: message.to_string(), } } } diff --git a/control-plane/rest/src/versions/v0.rs b/control-plane/rest/src/versions/v0.rs index c24df0ce6..c569aac3c 100644 --- a/control-plane/rest/src/versions/v0.rs +++ b/control-plane/rest/src/versions/v0.rs @@ -1,6 +1,6 @@ #![allow(clippy::field_reassign_with_default)] use super::super::ActixRestClient; -use crate::JsonGeneric; +use crate::{ClientError, ClientResult, JsonGeneric}; use actix_web::{ body::Body, http::StatusCode, @@ -180,74 +180,65 @@ pub struct GetBlockDeviceQueryParams { #[async_trait(?Send)] pub trait RestClient { /// Get all the known nodes - async fn get_nodes(&self) -> anyhow::Result>; + async fn get_nodes(&self) -> ClientResult>; /// Get all the known pools - async fn get_pools(&self, filter: Filter) -> anyhow::Result>; + async fn get_pools(&self, filter: Filter) -> ClientResult>; /// Create new pool with arguments - async fn create_pool(&self, args: CreatePool) -> anyhow::Result; + async fn create_pool(&self, args: CreatePool) -> ClientResult; /// Destroy pool with arguments - async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()>; + async fn destroy_pool(&self, args: DestroyPool) -> ClientResult<()>; /// Get all the known replicas - async fn get_replicas( - &self, - filter: Filter, - ) -> anyhow::Result>; + async fn get_replicas(&self, filter: Filter) -> ClientResult>; /// Create new replica with arguments async fn create_replica( &self, args: CreateReplica, - ) -> anyhow::Result; + ) -> ClientResult; /// Destroy replica with arguments - async fn destroy_replica(&self, args: DestroyReplica) - -> anyhow::Result<()>; + async fn destroy_replica(&self, args: DestroyReplica) -> ClientResult<()>; /// Share replica with arguments - async fn share_replica(&self, args: ShareReplica) - -> anyhow::Result; + async fn share_replica(&self, args: ShareReplica) -> ClientResult; /// Unshare replica with arguments - async fn unshare_replica(&self, args: UnshareReplica) - -> anyhow::Result<()>; + async fn unshare_replica(&self, args: UnshareReplica) -> ClientResult<()>; /// Get all the known nexuses - async fn get_nexuses(&self, filter: Filter) -> anyhow::Result>; + async fn get_nexuses(&self, filter: Filter) -> ClientResult>; /// Create new nexus with arguments - async fn create_nexus(&self, args: CreateNexus) -> anyhow::Result; + async fn create_nexus(&self, args: CreateNexus) -> ClientResult; /// Destroy nexus with arguments - async fn destroy_nexus(&self, args: DestroyNexus) -> anyhow::Result<()>; + async fn destroy_nexus(&self, args: DestroyNexus) -> ClientResult<()>; /// Share nexus - async fn share_nexus(&self, args: ShareNexus) -> anyhow::Result; + async fn share_nexus(&self, args: ShareNexus) -> ClientResult; /// Unshare nexus - async fn unshare_nexus(&self, args: UnshareNexus) -> anyhow::Result<()>; + async fn unshare_nexus(&self, args: UnshareNexus) -> ClientResult<()>; /// Remove nexus child async fn remove_nexus_child( &self, args: RemoveNexusChild, - ) -> anyhow::Result<()>; + ) -> ClientResult<()>; /// Add nexus child - async fn add_nexus_child( - &self, - args: AddNexusChild, - ) -> anyhow::Result; + async fn add_nexus_child(&self, args: AddNexusChild) + -> ClientResult; /// Get all children by filter async fn get_nexus_children( &self, filter: Filter, - ) -> anyhow::Result>; + ) -> ClientResult>; /// Get all volumes by filter - async fn get_volumes(&self, filter: Filter) -> anyhow::Result>; + async fn get_volumes(&self, filter: Filter) -> ClientResult>; /// Create volume - async fn create_volume(&self, args: CreateVolume) - -> anyhow::Result; + async fn create_volume(&self, args: CreateVolume) -> ClientResult; /// Destroy volume - async fn destroy_volume(&self, args: DestroyVolume) -> anyhow::Result<()>; + async fn destroy_volume(&self, args: DestroyVolume) -> ClientResult<()>; /// Generic JSON gRPC call async fn json_grpc( &self, args: JsonGrpcRequest, - ) -> anyhow::Result; + ) -> ClientResult; /// Get block devices async fn get_block_devices( &self, args: GetBlockDevices, - ) -> anyhow::Result>; + ) -> ClientResult>; } #[derive(Display, Debug)] @@ -287,19 +278,19 @@ macro_rules! get_filter { }; } -fn get_filtered_urn(filter: Filter, r: &RestUrns) -> anyhow::Result { +fn get_filtered_urn(filter: Filter, r: &RestUrns) -> ClientResult { let urn = match r { RestUrns::GetNodes(_) => match filter { Filter::None => "nodes".to_string(), Filter::Node(id) => format!("nodes/{}", id), - _ => return Err(anyhow::Error::msg("Invalid filter for Nodes")), + _ => return Err(ClientError::filter("Invalid filter for Nodes")), }, RestUrns::GetPools(_) => match filter { Filter::None => "pools".to_string(), Filter::Node(id) => format!("nodes/{}/pools", id), Filter::Pool(id) => format!("pools/{}", id), Filter::NodePool(n, p) => format!("nodes/{}/pools/{}", n, p), - _ => return Err(anyhow::Error::msg("Invalid filter for pools")), + _ => return Err(ClientError::filter("Invalid filter for pools")), }, RestUrns::GetReplicas(_) => match filter { Filter::None => "replicas".to_string(), @@ -314,27 +305,29 @@ fn get_filtered_urn(filter: Filter, r: &RestUrns) -> anyhow::Result { format!("nodes/{}/pools/{}/replicas/{}", n, p, r) } Filter::PoolReplica(p, r) => format!("pools/{}/replicas/{}", p, r), - _ => return Err(anyhow::Error::msg("Invalid filter for replicas")), + _ => { + return Err(ClientError::filter("Invalid filter for replicas")) + } }, RestUrns::GetNexuses(_) => match filter { Filter::None => "nexuses".to_string(), Filter::Node(n) => format!("nodes/{}/nexuses", n), Filter::NodeNexus(n, x) => format!("nodes/{}/nexuses/{}", n, x), Filter::Nexus(x) => format!("nexuses/{}", x), - _ => return Err(anyhow::Error::msg("Invalid filter for nexuses")), + _ => return Err(ClientError::filter("Invalid filter for nexuses")), }, RestUrns::GetChildren(_) => match filter { Filter::NodeNexus(n, x) => { format!("nodes/{}/nexuses/{}/children", n, x) } Filter::Nexus(x) => format!("nexuses/{}/children", x), - _ => return Err(anyhow::Error::msg("Invalid filter for nexuses")), + _ => return Err(ClientError::filter("Invalid filter for nexuses")), }, RestUrns::GetVolumes(_) => match filter { Filter::None => "volumes".to_string(), Filter::Node(n) => format!("nodes/{}/volumes", n), Filter::Volume(x) => format!("volumes/{}", x), - _ => return Err(anyhow::Error::msg("Invalid filter for volumes")), + _ => return Err(ClientError::filter("Invalid filter for volumes")), }, }; @@ -343,32 +336,29 @@ fn get_filtered_urn(filter: Filter, r: &RestUrns) -> anyhow::Result { #[async_trait(?Send)] impl RestClient for ActixRestClient { - async fn get_nodes(&self) -> anyhow::Result> { + async fn get_nodes(&self) -> ClientResult> { let nodes = get_all!(self, GetNodes).await?; Ok(nodes) } - async fn get_pools(&self, filter: Filter) -> anyhow::Result> { + async fn get_pools(&self, filter: Filter) -> ClientResult> { let pools = get_filter!(self, filter, GetPools).await?; Ok(pools) } - async fn create_pool(&self, args: CreatePool) -> anyhow::Result { + async fn create_pool(&self, args: CreatePool) -> ClientResult { let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.id); let pool = self.put(urn, CreatePoolBody::from(args)).await?; Ok(pool) } - async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()> { + async fn destroy_pool(&self, args: DestroyPool) -> ClientResult<()> { let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.id); self.del(urn).await?; Ok(()) } - async fn get_replicas( - &self, - filter: Filter, - ) -> anyhow::Result> { + async fn get_replicas(&self, filter: Filter) -> ClientResult> { let replicas = get_filter!(self, filter, GetReplicas).await?; Ok(replicas) } @@ -376,7 +366,7 @@ impl RestClient for ActixRestClient { async fn create_replica( &self, args: CreateReplica, - ) -> anyhow::Result { + ) -> ClientResult { let urn = format!( "/v0/nodes/{}/pools/{}/replicas/{}", &args.node, &args.pool, &args.uuid @@ -385,10 +375,7 @@ impl RestClient for ActixRestClient { Ok(replica) } - async fn destroy_replica( - &self, - args: DestroyReplica, - ) -> anyhow::Result<()> { + async fn destroy_replica(&self, args: DestroyReplica) -> ClientResult<()> { let urn = format!( "/v0/nodes/{}/pools/{}/replicas/{}", &args.node, &args.pool, &args.uuid @@ -398,10 +385,7 @@ impl RestClient for ActixRestClient { } /// Share replica with arguments - async fn share_replica( - &self, - args: ShareReplica, - ) -> anyhow::Result { + async fn share_replica(&self, args: ShareReplica) -> ClientResult { let urn = format!( "/v0/nodes/{}/pools/{}/replicas/{}/share/{}", &args.node, @@ -413,10 +397,7 @@ impl RestClient for ActixRestClient { Ok(share) } /// Unshare replica with arguments - async fn unshare_replica( - &self, - args: UnshareReplica, - ) -> anyhow::Result<()> { + async fn unshare_replica(&self, args: UnshareReplica) -> ClientResult<()> { let urn = format!( "/v0/nodes/{}/pools/{}/replicas/{}/share", &args.node, &args.pool, &args.uuid @@ -425,25 +406,25 @@ impl RestClient for ActixRestClient { Ok(()) } - async fn get_nexuses(&self, filter: Filter) -> anyhow::Result> { + async fn get_nexuses(&self, filter: Filter) -> ClientResult> { let nexuses = get_filter!(self, filter, GetNexuses).await?; Ok(nexuses) } - async fn create_nexus(&self, args: CreateNexus) -> anyhow::Result { + async fn create_nexus(&self, args: CreateNexus) -> ClientResult { let urn = format!("/v0/nodes/{}/nexuses/{}", &args.node, &args.uuid); let replica = self.put(urn, CreateNexusBody::from(args)).await?; Ok(replica) } - async fn destroy_nexus(&self, args: DestroyNexus) -> anyhow::Result<()> { + async fn destroy_nexus(&self, args: DestroyNexus) -> ClientResult<()> { let urn = format!("/v0/nodes/{}/nexuses/{}", &args.node, &args.uuid); self.del(urn).await?; Ok(()) } /// Share nexus - async fn share_nexus(&self, args: ShareNexus) -> anyhow::Result { + async fn share_nexus(&self, args: ShareNexus) -> ClientResult { let urn = format!( "/v0/nodes/{}/nexuses/{}/share/{}", &args.node, @@ -455,7 +436,7 @@ impl RestClient for ActixRestClient { } /// Unshare nexus - async fn unshare_nexus(&self, args: UnshareNexus) -> anyhow::Result<()> { + async fn unshare_nexus(&self, args: UnshareNexus) -> ClientResult<()> { let urn = format!("/v0/nodes/{}/nexuses/{}/share", &args.node, &args.uuid); self.del(urn).await?; @@ -465,7 +446,7 @@ impl RestClient for ActixRestClient { async fn remove_nexus_child( &self, args: RemoveNexusChild, - ) -> anyhow::Result<()> { + ) -> ClientResult<()> { let urn = match url::Url::parse(args.uri.as_str()) { Ok(uri) => { // remove initial '/' @@ -480,7 +461,7 @@ impl RestClient for ActixRestClient { async fn add_nexus_child( &self, args: AddNexusChild, - ) -> anyhow::Result { + ) -> ClientResult { let urn = format!( "/v0/nodes/{}/nexuses/{}/children/{}", &args.node, &args.nexus, &args.uri @@ -491,26 +472,23 @@ impl RestClient for ActixRestClient { async fn get_nexus_children( &self, filter: Filter, - ) -> anyhow::Result> { + ) -> ClientResult> { let children = get_filter!(self, filter, GetChildren).await?; Ok(children) } - async fn get_volumes(&self, filter: Filter) -> anyhow::Result> { + async fn get_volumes(&self, filter: Filter) -> ClientResult> { let volumes = get_filter!(self, filter, GetVolumes).await?; Ok(volumes) } - async fn create_volume( - &self, - args: CreateVolume, - ) -> anyhow::Result { + async fn create_volume(&self, args: CreateVolume) -> ClientResult { let urn = format!("/v0/volumes/{}", &args.uuid); let volume = self.put(urn, CreateVolumeBody::from(args)).await?; Ok(volume) } - async fn destroy_volume(&self, args: DestroyVolume) -> anyhow::Result<()> { + async fn destroy_volume(&self, args: DestroyVolume) -> ClientResult<()> { let urn = format!("/v0/volumes/{}", &args.uuid); self.del(urn).await?; Ok(()) @@ -519,7 +497,7 @@ impl RestClient for ActixRestClient { async fn json_grpc( &self, args: JsonGrpcRequest, - ) -> anyhow::Result { + ) -> ClientResult { let urn = format!("/v0/nodes/{}/jsongrpc/{}", args.node, args.method); self.put(urn, Body::from(args.params.to_string())).await } @@ -527,7 +505,7 @@ impl RestClient for ActixRestClient { async fn get_block_devices( &self, args: GetBlockDevices, - ) -> anyhow::Result> { + ) -> ClientResult> { let urn = format!("/v0/nodes/{}/block_devices?all={}", args.node, args.all); self.get_vec(urn).await From 1aeb34ae8efa17a65399eb346ee90a8c86508c18 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 19 Feb 2021 16:16:02 +0000 Subject: [PATCH 29/78] test(rest): add more tests using rest Add more rest tests around replica and the nexus. Get the correct error from the rest to avoid false negatives. Deferring the "replica outage" tests as those kind of tests were often failing on mayastor already. --- control-plane/rest/src/lib.rs | 73 +++++----- control-plane/tests/tests/common/mod.rs | 122 +++++++++++----- control-plane/tests/tests/nexus.rs | 184 +++++++++++++++++++++++- control-plane/tests/tests/replicas.rs | 102 ++++++++++++- 4 files changed, 407 insertions(+), 74 deletions(-) diff --git a/control-plane/rest/src/lib.rs b/control-plane/rest/src/lib.rs index a7b868274..1166f9c82 100644 --- a/control-plane/rest/src/lib.rs +++ b/control-plane/rest/src/lib.rs @@ -182,21 +182,20 @@ impl ActixRestClient { head.headers = headers.clone(); head }; - let body = rest_response.body().await.map_err(|_| { - ClientError::InvalidPayload { - head: head(), - } + let body = rest_response.body().await.context(InvalidPayload { + head: head(), })?; if status.is_success() { match serde_json::from_slice(&body) { Ok(r) => Ok(r), - Err(_error) => match serde_json::from_slice(&body) { - Ok(r) => Ok(vec![r]), - Err(_error) => Err(ClientError::InvalidBody { - head: head(), - body, - }), - }, + Err(_) => { + let result = + serde_json::from_slice(&body).context(InvalidBody { + head: head(), + body, + })?; + Ok(vec![result]) + } } } else if body.is_empty() { Err(ClientError::Header { @@ -204,11 +203,11 @@ impl ActixRestClient { }) } else { let error = serde_json::from_slice::(&body) - .map_err(|_| ClientError::InvalidBody { + .context(InvalidBody { head: head(), body, })?; - Err(ClientError::Valid { + Err(ClientError::RestServer { head: head(), error, }) @@ -229,18 +228,15 @@ impl ActixRestClient { head.headers = headers.clone(); head }; - let body = rest_response.body().await.map_err(|_| { - ClientError::InvalidPayload { - head: head(), - } + let body = rest_response.body().await.context(InvalidPayload { + head: head(), })?; if status.is_success() { - let result = serde_json::from_slice(&body).map_err(|_| { - ClientError::InvalidBody { + let result = + serde_json::from_slice(&body).context(InvalidBody { head: head(), body, - } - })?; + })?; Ok(result) } else if body.is_empty() { Err(ClientError::Header { @@ -248,11 +244,11 @@ impl ActixRestClient { }) } else { let error = serde_json::from_slice::(&body) - .map_err(|_| ClientError::InvalidBody { + .context(InvalidBody { head: head(), body, })?; - Err(ClientError::Valid { + Err(ClientError::RestServer { head: head(), error, }) @@ -267,7 +263,7 @@ pub type ClientResult = Result; /// Rest Client Error #[derive(Debug, Snafu)] pub enum ClientError { - /// Failed to send message to the server + /// Failed to send message to the server (details in source) #[snafu(display("{}, reason: {}", details, source))] Send { /// Message @@ -275,35 +271,48 @@ pub enum ClientError { /// Source Request Error source: SendRequestError, }, - /// Invalid Resource Filter + /// Invalid Resource Filter so couldn't send the request #[snafu(display("Invalid Resource Filter: {}", details))] InvalidFilter { /// Message details: String, }, - /// Invalid Payload - #[snafu(display("Invalid payload, header: {:?}", head))] + /// Response an error code and with an invalid payload + #[snafu(display( + "Invalid payload, header: {:?}, reason: {}", + head, + source + ))] InvalidPayload { /// http Header head: ResponseHead, + /// source payload error + source: PayloadError, }, - /// Invalid Body - #[snafu(display("Invalid body, header: {:?}", head))] + /// Response an error code and also with an invalid body + #[snafu(display( + "Invalid body, header: {:?}, body: {:?}, reason: {}", + head, + body, + source + ))] InvalidBody { /// http Header head: ResponseHead, /// http Body body: Bytes, + /// source json deserialize error + source: serde_json::Error, }, - /// No Body + /// Response an error code and only the header (and so no additional info) #[snafu(display("No body, header: {:?}", head))] Header { /// http Header head: ResponseHead, }, - /// Body in JSON format + /// Error within the Body in valid JSON format, returned by the Rest Server #[snafu(display("Http status: {}, error: {}", head.status, error.to_string()))] - Valid { + RestServer { /// http Header head: ResponseHead, /// JSON error diff --git a/control-plane/tests/tests/common/mod.rs b/control-plane/tests/tests/common/mod.rs index f8ecc399f..0bdd79a06 100644 --- a/control-plane/tests/tests/common/mod.rs +++ b/control-plane/tests/tests/common/mod.rs @@ -9,6 +9,7 @@ use opentelemetry::{ }; use opentelemetry_jaeger::Uninstall; +use rest_client::ClientError; pub use rest_client::{ versions::v0::{self, RestClient}, ActixRestClient, @@ -55,7 +56,7 @@ impl Cluster { } /// replica id with index for `pool` index and `replica` index - pub fn replica(pool: u32, replica: u32) -> v0::ReplicaId { + pub fn replica(pool: usize, replica: u32) -> v0::ReplicaId { let mut uuid = v0::ReplicaId::default().to_string(); let _ = uuid.drain(27 .. uuid.len()); format!("{}{:01x}{:08x}", uuid, pool as u8, replica).into() @@ -69,12 +70,17 @@ impl Cluster { /// New cluster async fn new( trace_rest: bool, + timeout_rest: std::time::Duration, components: Components, composer: ComposeTest, jaeger: (Tracer, Uninstall), ) -> Result { - let rest_client = - ActixRestClient::new("https://localhost:8080", trace_rest).unwrap(); + let rest_client = ActixRestClient::new_timeout( + "https://localhost:8080", + trace_rest, + timeout_rest, + ) + .unwrap(); components .start_wait(&composer, std::time::Duration::from_secs(10)) @@ -104,19 +110,26 @@ fn option_str(input: Option) -> String { /// string Eg, testing the replica share protocol: /// test_result(Ok(Nvmf), async move { ... }) /// test_result(Err(NBD), async move { ... }) -pub async fn test_result( +pub async fn test_result( expected: &Result, future: F, ) -> Result<(), anyhow::Error> where - F: std::future::Future>, - R: std::fmt::Display, + F: std::future::Future>, E: std::fmt::Debug, O: std::fmt::Debug, { match future.await { Ok(_) if expected.is_ok() => Ok(()), - Err(_) if expected.is_err() => Ok(()), + Err(error) if expected.is_err() => match error { + ClientError::RestServer { + .. + } => Ok(()), + _ => { + // not the error we were waiting for + Err(anyhow::anyhow!("Invalid rest response: {}", error)) + } + }, Err(error) => Err(anyhow::anyhow!( "Expected '{:#?}' but failed with '{}'!", expected, @@ -138,12 +151,26 @@ macro_rules! result_either { }; } +#[derive(Clone)] +enum PoolDisk { + Malloc(u64), + Uri(String), +} + /// Builder for the Cluster pub struct ClusterBuilder { opts: StartOptions, - pools: u32, - replicas: (u32, u64, v0::Protocol), + pools: Vec, + replicas: Replica, trace: bool, + timeout: std::time::Duration, +} + +#[derive(Default)] +pub struct Replica { + count: u32, + size: u64, + share: v0::Protocol, } impl ClusterBuilder { @@ -151,9 +178,10 @@ impl ClusterBuilder { pub fn builder() -> Self { ClusterBuilder { opts: default_options(), - pools: 0, - replicas: (0, 0, v0::Protocol::Off), + pools: vec![], + replicas: Default::default(), trace: true, + timeout: std::time::Duration::from_secs(3), } } /// Update the start options @@ -169,19 +197,40 @@ impl ClusterBuilder { self.trace = enabled; self } + /// Rest request timeout + pub fn with_rest_timeout(mut self, timeout: std::time::Duration) -> Self { + self.timeout = timeout; + self + } /// Add `count` malloc pools (100MiB size) to each node pub fn with_pools(mut self, count: u32) -> Self { - self.pools = count; + for _ in 0 .. count { + self.pools.push(PoolDisk::Malloc(100 * 1024 * 1024)); + } + self + } + /// Add pool with `disk` to each node + pub fn with_pool(mut self, disk: &str) -> Self { + self.pools.push(PoolDisk::Uri(disk.to_string())); self } - /// Add `count` replicas to each node per pool + /// Specify `count` replicas to add to each node per pool pub fn with_replicas( mut self, count: u32, size: u64, share: v0::Protocol, ) -> Self { - self.replicas = (count, size, share); + self.replicas = Replica { + count, + size, + share, + }; + self + } + /// Specify `count` mayastors for the cluster + pub fn with_mayastors(mut self, count: u32) -> Self { + self.opts = self.opts.with_mayastors(count); self } /// Build into the resulting Cluster using a composer closure, eg: @@ -229,8 +278,14 @@ impl ClusterBuilder { .unwrap(); let composer = compose_builder.build().await?; - let cluster = - Cluster::new(self.trace, components, composer, jaeger).await?; + let cluster = Cluster::new( + self.trace, + self.timeout, + components, + composer, + jaeger, + ) + .await?; if self.opts.show_info { for container in cluster.composer.list_cluster_containers().await? { @@ -272,23 +327,24 @@ impl ClusterBuilder { } fn pools(&self) -> Vec { let mut pools = vec![]; - for pool_index in 0 .. self.pools { - for node in 0 .. self.opts.mayastors { + + for node in 0 .. self.opts.mayastors { + for pool_index in 0 .. self.pools.len() { + let pool = &self.pools[pool_index]; let mut pool = Pool { node: Mayastor::name(node, &self.opts), - kind: PoolKind::Malloc, - size_mb: 100, + disk: pool.clone(), index: (pool_index + 1) as u32, replicas: vec![], }; - for replica_index in 0 .. self.replicas.0 { + for replica_index in 0 .. self.replicas.count { pool.replicas.push(v0::CreateReplica { node: pool.node.clone().into(), uuid: Cluster::replica(pool_index, replica_index), pool: pool.id(), - size: self.replicas.1, + size: self.replicas.size, thin: false, - share: self.replicas.2.clone(), + share: self.replicas.share.clone(), }); } pools.push(pool); @@ -298,18 +354,9 @@ impl ClusterBuilder { } } -#[allow(dead_code)] -enum PoolKind { - Malloc, - Aio, - Uring, - Nvmf, -} - struct Pool { node: String, - kind: PoolKind, - size_mb: u32, + disk: PoolDisk, index: u32, replicas: Vec, } @@ -319,11 +366,12 @@ impl Pool { format!("{}-pool-{}", self.node, self.index).into() } fn disk(&self) -> String { - match self.kind { - PoolKind::Malloc => { - format!("malloc:///disk{}?size_mb={}", self.index, self.size_mb) + match &self.disk { + PoolDisk::Malloc(size) => { + let size = size / (1024 * 1024); + format!("malloc:///disk{}?size_mb={}", self.index, size) } - _ => panic!("kind not supported!"), + PoolDisk::Uri(uri) => uri.clone(), } } } diff --git a/control-plane/tests/tests/nexus.rs b/control-plane/tests/tests/nexus.rs index d1497c57a..537b34fd8 100644 --- a/control-plane/tests/tests/nexus.rs +++ b/control-plane/tests/tests/nexus.rs @@ -4,22 +4,198 @@ pub mod common; use common::*; #[actix_rt::test] -async fn create_nexus() { +async fn create_nexus_malloc() { + let cluster = ClusterBuilder::builder().build().await.unwrap(); + + cluster + .rest_v0() + .create_nexus(v0::CreateNexus { + node: cluster.node(0), + uuid: v0::NexusId::new(), + size: 10 * 1024 * 1024, + children: vec!["malloc:///disk?size_mb=100".into()], + }) + .await + .unwrap(); +} + +// FIXME: CAS-737 +#[actix_rt::test] +#[allow_fail] +async fn create_nexus_sizes() { + let cluster = ClusterBuilder::builder() + .with_rest_timeout(std::time::Duration::from_secs(1)) + // don't log whilst we have the allow_fail + .compose_build(|c| c.with_logs(false)) + .await + .unwrap(); + + for size_mb in &vec![6, 10, 100] { + let size = size_mb * 1024 * 1024; + let disk = || format!("malloc:///disk?size_mb={}", size_mb); + let sizes = vec![Ok(size / 2), Ok(size), Err(size + 512)]; + for test in sizes { + let size = result_either!(test); + test_result(&test, async { + let nexus = cluster + .rest_v0() + .create_nexus(v0::CreateNexus { + node: cluster.node(0), + uuid: v0::NexusId::new(), + size, + children: vec![disk().into()], + }) + .await; + if let Ok(nexus) = &nexus { + cluster + .rest_v0() + .destroy_nexus(v0::DestroyNexus { + node: nexus.node.clone(), + uuid: nexus.uuid.clone(), + }) + .await + .unwrap(); + } + nexus + }) + .await + .unwrap(); + } + } + + for size_mb in &vec![1, 2, 4] { + let size = size_mb * 1024 * 1024; + let disk = || format!("malloc:///disk?size_mb={}", size_mb); + let sizes = vec![Err(size / 2), Err(size), Err(size + 512)]; + for test in sizes { + let size = result_either!(test); + test_result(&test, async { + let nexus = cluster + .rest_v0() + .create_nexus(v0::CreateNexus { + node: cluster.node(0), + uuid: v0::NexusId::new(), + size, + children: vec![disk().into()], + }) + .await; + if let Ok(nexus) = &nexus { + cluster + .rest_v0() + .destroy_nexus(v0::DestroyNexus { + node: nexus.node.clone(), + uuid: nexus.uuid.clone(), + }) + .await + .unwrap(); + } + nexus + }) + .await + .unwrap(); + } + } +} + +#[actix_rt::test] +async fn create_nexus_local_replica() { + let size = 10 * 1024 * 1024; let cluster = ClusterBuilder::builder() .with_pools(1) - .with_replicas(2, 5 * 1024 * 1024, v0::Protocol::Off) + .with_replicas(1, size, v0::Protocol::Off) .build() .await .unwrap(); + let replica = format!("loopback:///{}", Cluster::replica(0, 0)); cluster .rest_v0() .create_nexus(v0::CreateNexus { node: cluster.node(0), uuid: v0::NexusId::new(), - size: 10 * 1024 * 1024, - children: vec!["malloc:///disk?size_mb=100".into()], + size, + children: vec![replica.into()], + }) + .await + .unwrap(); +} + +#[actix_rt::test] +async fn create_nexus_replicas() { + let size = 10 * 1024 * 1024; + let cluster = ClusterBuilder::builder() + .with_pools(1) + .with_replicas(1, size, v0::Protocol::Off) + .with_mayastors(2) + .build() + .await + .unwrap(); + + let local = format!("loopback:///{}", Cluster::replica(0, 0)); + let remote = cluster + .rest_v0() + .share_replica(v0::ShareReplica { + node: cluster.node(1), + pool: cluster.pool(1, 0), + uuid: Cluster::replica(0, 0), + protocol: v0::Protocol::Nvmf, + }) + .await + .unwrap(); + + cluster + .rest_v0() + .create_nexus(v0::CreateNexus { + node: cluster.node(0), + uuid: v0::NexusId::new(), + size, + children: vec![local.into(), remote.into()], + }) + .await + .unwrap(); +} + +#[actix_rt::test] +async fn create_nexus_replica_not_available() { + let size = 10 * 1024 * 1024; + let cluster = ClusterBuilder::builder() + .with_pools(1) + .with_replicas(1, size, v0::Protocol::Off) + .with_mayastors(2) + .build() + .await + .unwrap(); + + let local = format!("loopback:///{}", Cluster::replica(0, 0)); + let remote = cluster + .rest_v0() + .share_replica(v0::ShareReplica { + node: cluster.node(1), + pool: cluster.pool(1, 0), + uuid: Cluster::replica(0, 0), + protocol: v0::Protocol::Nvmf, + }) + .await + .unwrap(); + cluster + .rest_v0() + .share_replica(v0::ShareReplica { + node: cluster.node(1), + pool: cluster.pool(1, 0), + uuid: Cluster::replica(0, 0), + protocol: v0::Protocol::Off, }) .await .unwrap(); + + cluster + .rest_v0() + .create_nexus(v0::CreateNexus { + node: cluster.node(0), + uuid: v0::NexusId::new(), + size, + children: vec![local.into(), remote.into()], + }) + .await + .expect_err("One replica is not present so nexus shouldn't be created"); } diff --git a/control-plane/tests/tests/replicas.rs b/control-plane/tests/tests/replicas.rs index ebe121116..a4ba7460b 100644 --- a/control-plane/tests/tests/replicas.rs +++ b/control-plane/tests/tests/replicas.rs @@ -71,6 +71,56 @@ async fn create_replica_protocols() { } } +#[actix_rt::test] +async fn create_replica_sizes() { + let size = 100 * 1024 * 1024; + let disk = format!("malloc:///disk?size_mb={}", size / (1024 * 1024)); + let cluster = ClusterBuilder::builder() + .with_pool(&disk) + .build() + .await + .unwrap(); + + let pool = cluster + .rest_v0() + .get_pools(v0::Filter::Pool(cluster.pool(0, 0))) + .await + .unwrap(); + let capacity = pool.first().unwrap().capacity; + assert!(size > capacity && capacity > 0); + let sizes = vec![Ok(capacity / 2), Ok(capacity), Err(capacity + 512)]; + for test in sizes { + let size = result_either!(test); + test_result(&test, async { + let result = cluster + .rest_v0() + .create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: v0::ReplicaId::new(), + pool: cluster.pool(0, 0), + size, + thin: false, + share: Default::default(), + }) + .await; + if let Ok(replica) = &result { + cluster + .rest_v0() + .destroy_replica(v0::DestroyReplica { + node: replica.node.clone(), + pool: replica.pool.clone(), + uuid: replica.uuid.clone(), + }) + .await + .unwrap(); + } + result + }) + .await + .unwrap(); + } +} + // FIXME: CAS-731 #[actix_rt::test] #[allow_fail] @@ -118,7 +168,7 @@ async fn create_replica_idempotent_different_sizes() { &test, cluster.rest_v0().create_replica(v0::CreateReplica { node: cluster.node(0), - uuid: v0::ReplicaId::new(), + uuid: replica.uuid.clone(), pool: cluster.pool(0, 0), size, thin: replica.thin, @@ -129,3 +179,53 @@ async fn create_replica_idempotent_different_sizes() { .unwrap(); } } + +// FIXME: CAS-731 +#[actix_rt::test] +#[allow_fail] +async fn create_replica_idempotent_different_protocols() { + let cluster = ClusterBuilder::builder() + .with_pools(1) + // don't log whilst we have the allow_fail + .compose_build(|c| c.with_logs(false)) + .await + .unwrap(); + + let uuid = v0::ReplicaId::new(); + let size = 5 * 1024 * 1024; + let replica = cluster + .rest_v0() + .create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: uuid.clone(), + pool: cluster.pool(0, 0), + size, + thin: false, + share: v0::Protocol::Off, + }) + .await + .unwrap(); + assert_eq!(&replica.uuid, &uuid); + + let protocols = vec![ + Ok(v0::Protocol::Off), + Err(v0::Protocol::Iscsi), + Err(v0::Protocol::Nvmf), + ]; + for test in protocols { + let protocol = result_either!(&test); + test_result( + &test, + cluster.rest_v0().create_replica(v0::CreateReplica { + node: cluster.node(0), + uuid: replica.uuid.clone(), + pool: replica.pool.clone(), + size: replica.size, + thin: replica.thin, + share: protocol.clone(), + }), + ) + .await + .unwrap(); + } +} From 4b9a9cea0ab478300dfc8cddea1c4816e9f9bd29 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Mon, 22 Feb 2021 17:49:01 +0000 Subject: [PATCH 30/78] test(local): add compose override flags Add environment variables which override the following compose flags: clean, allow_clean_on_panic, logs_on_panic. The used format for the variables is COMPOSE_$FLAG_NAME_CAPS, eg: COMPOSE_LOGS_ON_PANIC This should facilitate debugging the tests locally, without having to modify the test code... --- composer/src/lib.rs | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/composer/src/lib.rs b/composer/src/lib.rs index f8f4bc6f8..e05906c73 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -512,10 +512,41 @@ impl Builder { Ok(compose) } + fn override_flags(flag: &mut bool, flag_name: &str) { + let key = format!("COMPOSE_{}", flag_name.to_ascii_uppercase()); + if let Some(val) = std::env::var_os(&key) { + let clean = match val.to_str().unwrap_or_default() { + "true" => true, + "false" => false, + _ => return, + }; + if clean != *flag { + tracing::warn!( + "env::{} => Overriding the {} flag to {}", + key, + flag_name, + clean + ); + *flag = clean; + } + } + } + /// override clean flags with environment variable + /// useful for testing without having to change the code + fn override_clean(&mut self) { + Self::override_flags(&mut self.clean, "clean"); + Self::override_flags( + &mut self.allow_clean_on_panic, + "allow_clean_on_panic", + ); + Self::override_flags(&mut self.logs_on_panic, "logs_on_panic"); + } + /// build the config but don't start the containers async fn build_only( - self, + mut self, ) -> Result> { + self.override_clean(); let net: Ipv4Network = self.network.parse()?; let path = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); From f6328c2a250b235d94cc9fc342c670e586f5804e Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Wed, 10 Feb 2021 11:07:00 +0000 Subject: [PATCH 31/78] test(e2e): perform back-to-back CI runs Add parameter to build to indicate a continuous test. A successful completed run will trigger another run if the parameter e2e_continuous is set to true. --- Jenkinsfile | 132 ++++++++++++++++++++++++++----- scripts/e2e-test.sh | 6 +- test/e2e/install/install_test.go | 17 +--- 3 files changed, 115 insertions(+), 40 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4f7125b3a..7855f2e95 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -13,8 +13,12 @@ def k8s_job="" xray_projectkey='MQ' xray_on_demand_testplan='MQ-1' xray_nightly_testplan='MQ-17' +xray_continuous_testplan='MQ-33' xray_test_execution_type='10059' +// if e2e run does not build its own images, which tag to use when pulling +e2e_continuous_image_tag='v0.7.1' + // Searches previous builds to find first non aborted one def getLastNonAbortedBuild(build) { if (build == null) { @@ -28,6 +32,19 @@ def getLastNonAbortedBuild(build) { } } +def getTestPlan() { + if (params.e2e_continuous == true) { + return xray_continuous_testplan + } + def causes = currentBuild.getBuildCauses() + for(cause in causes) { + if ("${cause}".contains("hudson.triggers.TimerTrigger\$TimerTriggerCause")) { + return xray_nightly_testplan + } + } + return xray_on_demand_testplan +} + // Send out a slack message if branch got broken or has recovered def notifySlackUponStateChange(build) { def cur = build.getResult() @@ -48,15 +65,14 @@ def notifySlackUponStateChange(build) { } } } - -def getTestPlan() { - def causes = currentBuild.getBuildCauses() - for(cause in causes) { - if ("${cause}".contains("hudson.triggers.TimerTrigger\$TimerTriggerCause")) { - return xray_nightly_testplan - } +def notifySlackUponE2EFailure(build) { + if (build.getResult() != 'SUCCESS') { + slackSend( + channel: '#mayastor-backend', + color: 'danger', + message: "E2E continuous testing has failed (<${env.BUILD_URL}|Open>)" + ) } - return xray_on_demand_testplan } // Will ABORT current job for cases when we don't want to build @@ -72,11 +88,37 @@ String cron_schedule = BRANCH_NAME == "develop" ? "0 2 * * *" : "" // Some long e2e tests are not suitable to be run for each PR boolean run_extended_e2e_tests = (env.BRANCH_NAME != 'staging' && env.BRANCH_NAME != 'trying') ? true : false +// Determine which stages to run +if (params.e2e_continuous == true) { + run_linter = false + rust_test = false + grpc_test = false + moac_test = false + e2e_test = true + e2e_test_set = 'install basic_volume_io csi resource_check uninstall' + // use images from dockerhub tagged with e2e_continuous_image_tag instead of building from current source + e2e_build_images = false + // do not push images even when running on master/develop/release branches + do_not_push_images = true +} else { + run_linter = true + rust_test = true + grpc_test = true + moac_test = true + e2e_test = true + e2e_test_set = '' + e2e_build_images = true + do_not_push_images = false +} + pipeline { agent none options { timeout(time: 2, unit: 'HOURS') } + parameters { + booleanParam(defaultValue: false, name: 'e2e_continuous') + } triggers { cron(cron_schedule) } @@ -103,6 +145,7 @@ pipeline { anyOf { branch 'master' branch 'release/*' + expression { run_linter == false } } } } @@ -125,6 +168,9 @@ pipeline { } parallel { stage('rust unit tests') { + when{ + expression { rust_test == true } + } agent { label 'nixos-mayastor' } steps { sh 'printenv' @@ -138,6 +184,9 @@ pipeline { } } stage('grpc tests') { + when{ + expression { grpc_test == true } + } agent { label 'nixos-mayastor' } steps { sh 'printenv' @@ -150,6 +199,9 @@ pipeline { } } stage('moac unit tests') { + when{ + expression { moac_test == true } + } agent { label 'nixos-mayastor' } steps { sh 'printenv' @@ -162,23 +214,30 @@ pipeline { } } stage('e2e tests') { + when{ + expression { e2e_test == true } + } stages { stage('e2e docker images') { + when{ + expression { e2e_build_images == true } + } agent { label 'nixos-mayastor' } steps { // e2e tests are the most demanding step for space on the disk so we // test the free space here rather than repeating the same code in all // stages. sh "./scripts/reclaim-space.sh 10" + // Build images (REGISTRY is set in jenkin's global configuration). // Note: We might want to build and test dev images that have more // assertions instead but that complicates e2e tests a bit. sh "./scripts/release.sh --alias-tag ci --registry \"${env.REGISTRY}\"" + } + post { // Always remove all docker images because they are usually used just once // and underlaying pkgs are already cached by nix so they can be easily // recreated. - } - post { always { sh 'docker image prune --all --force' } @@ -224,11 +283,26 @@ pipeline { fingerprintArtifacts: true ) sh 'kubectl get nodes -o wide' + script { - def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\" --logs --logsdir \"./logs/mayastor\" " + def tag = '' + if (e2e_build_images == true) { + tag = env.GIT_COMMIT_SHORT + } else { + tag = e2e_continuous_image_tag + } + def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${tag}\" --logs --logsdir \"./logs/mayastor\" " + + // building images also means using the CI registry + if (e2e_build_images == true) { + cmd = cmd + " --registry \"" + env.REGISTRY + "\"" + } if (run_extended_e2e_tests) { cmd = cmd + " --extended" } + if (e2e_test_set != '') { + cmd = cmd + " --tests \"" + e2e_test_set + "\"" + } sh "nix-shell --run '${cmd}'" } } @@ -263,8 +337,9 @@ pipeline { ) } } - always { // always send the junit results back to Xray and Jenkins + always { archiveArtifacts 'logs/**/*.*' + // always send the junit results back to Xray and Jenkins junit 'e2e.*.xml' script { def xray_testplan = getTestPlan() @@ -279,14 +354,14 @@ pipeline { inputInfoSwitcher: 'fileContent', importInfo: """{ "fields": { - "summary": "Build ${env.BUILD_NUMBER}", + "summary": "Build #${env.BUILD_NUMBER}, branch: ${env.BRANCH_name}", "project": { "key": "${xray_projectkey}" }, "issuetype": { "id": "${xray_test_execution_type}" }, - "description": "Results for build ${env.BUILD_NUMBER} at ${env.BUILD_URL}" + "description": "Results for build #${env.BUILD_NUMBER} at ${env.BUILD_URL}" } }""" ]) @@ -319,17 +394,29 @@ pipeline { } } } - } - } - } + post { + success { + script { + if (params.e2e_continuous == true && env.E2E_CONTINUOUS_ENABLE == "true") { + build job: env.BRANCH_NAME, wait: false, parameters: [[$class: 'BooleanParameterValue', name: 'e2e_continuous', value: true]] + } + } + } + } + }// end of "e2e tests" stage + }// parallel stages block + }// end of test stage stage('push images') { agent { label 'nixos-mayastor' } when { beforeAgent true - anyOf { - branch 'master' - branch 'release/*' - branch 'develop' + allOf { + expression { do_not_push_images == false } + anyOf { + branch 'master' + branch 'release/*' + branch 'develop' + } } } steps { @@ -375,6 +462,9 @@ pipeline { if (env.BRANCH_NAME == 'develop') { notifySlackUponStateChange(currentBuild) } + if (params.e2e_continuous == true) { + notifySlackUponE2EFailure(currentBuild) + } } } } diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index dd783f7d1..3bcb57069 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -22,7 +22,7 @@ EXTENDED_TESTS="" device= registry= tag="ci" -# sript state variables +# script state variables tests="" run_extended_tests= on_fail="stop" @@ -140,9 +140,7 @@ if [ -n "$tag" ]; then export e2e_image_tag="$tag" fi -if [ -n "$registry" ]; then - export e2e_docker_registry="$registry" -fi +export e2e_docker_registry="$registry" # can be empty string if [ -z "$tests" ]; then tests="$TESTS" diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index 7ae8f5bef..b78106503 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -42,7 +42,6 @@ var testEnv *envtest.Environment /// or any node in the cluster if the master noe does not exist /// TODO Refine how we workout the address of the test-registry func getTestClusterDetails() (string, string, int, []string, error) { - var master = "" var nme = 0 nodeList := coreV1.NodeList{} if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { @@ -54,9 +53,6 @@ func getTestClusterDetails() (string, string, int, []string, error) { if k8Addr.Type == coreV1.NodeInternalIP { nodeIPs[ix] = k8Addr.Address for label, value := range k8node.Labels { - if label == "node-role.kubernetes.io/master" { - master = k8Addr.Address - } if label == "openebs.io/engine" && value == "mayastor" { nme++ } @@ -96,16 +92,7 @@ func getTestClusterDetails() (string, string, int, []string, error) { tag = "ci" } registry := os.Getenv("e2e_docker_registry") - if len(registry) == 0 { - // a registry was not specified - // If there is master node, use its IP address as the registry IP address - if len(master) != 0 { - registry = master + ":30291" - } else { - /// Otherwise choose the IP address of first node in the list as the registry IP address - registry = nodeIPs[0] + ":30291" - } - } + return tag, registry, nme, mayastorNodes, nil } @@ -137,7 +124,7 @@ func getTemplateYamlDir() string { } func generateYamls(imageTag string, registryAddress string) { - bashcmd := fmt.Sprintf("../../../scripts/generate-deploy-yamls.sh -o ../../../test-yamls -t %s -r %s test", imageTag, registryAddress) + bashcmd := fmt.Sprintf("../../../scripts/generate-deploy-yamls.sh -o ../../../test-yamls -t '%s' -r '%s' test", imageTag, registryAddress) cmd := exec.Command("bash", "-c", bashcmd) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) From 6fc8d3c38ffee82834f92f1029cfe531b1fefb6c Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Tue, 26 Jan 2021 10:57:42 -0800 Subject: [PATCH 32/78] feat: enable nix-shell on aarch64 Signed-off-by: Ana Hobden --- .cargo/config | 2 +- nix/pkgs/libiscsi/default.nix | 2 -- nix/pkgs/libspdk/default.nix | 5 ++--- spdk-sys/build.rs | 5 ++--- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.cargo/config b/.cargo/config index 60e428cef..bf32ea81e 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,5 +1,5 @@ [build] -rustflags = "-C target-cpu=nehalem" +rustflags = "-C target-cpu=native" [profile.release] lto = "fat" diff --git a/nix/pkgs/libiscsi/default.nix b/nix/pkgs/libiscsi/default.nix index 7466ed733..3bee398ac 100644 --- a/nix/pkgs/libiscsi/default.nix +++ b/nix/pkgs/libiscsi/default.nix @@ -21,7 +21,5 @@ stdenv.mkDerivation rec { homepage = "https://github.com/sahlberg/libiscsi"; licenses = stdenv.lib.licenses.gpl2; maintainers = "gila@openebs.io"; - platforms = stdenv.lib.platforms.x86_64; }; - } diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 0bc15b549..e1641ae7b 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -50,10 +50,9 @@ let ]; configureFlags = [ - "--target-arch=nehalem" + "--target-arch=native" "--without-isal" "--with-iscsi-initiator" - "--with-crypto" "--with-uring" ]; @@ -61,7 +60,7 @@ let enableParallelBuilding = true; preConfigure = '' - substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default + substituteInPlace dpdk/config/defconfig_aarch64-native-linux-gcc --replace native default ''; configurePhase = '' diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 0a187bab7..1d07becd9 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -40,8 +40,8 @@ fn build_wrapper() { fn main() { #![allow(unreachable_code)] - #[cfg(not(target_arch = "x86_64"))] - panic!("spdk-sys crate is only for x86_64 cpu architecture"); + // #[cfg(not(target_arch = "x86_64"))] + // panic!("spdk-sys crate is only for x86_64 cpu architecture"); #[cfg(not(target_os = "linux"))] panic!("spdk-sys crate works only on linux"); @@ -98,7 +98,6 @@ fn main() { .derive_default(true) .derive_debug(true) .derive_copy(true) - .clang_arg("-march=nehalem") .prepend_enum_name(false) .generate_inline_functions(true) .parse_callbacks(Box::new(MacroCallback { From 5b7c949b2bdc2e441443bc630a55c178c2700605 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Fri, 29 Jan 2021 07:33:14 -0800 Subject: [PATCH 33/78] feat: enable cargo build on aarch64 Signed-off-by: Ana Hobden --- mayastor/src/bdev/nexus/nexus_fn_table.rs | 2 +- mayastor/src/bin/spdk.rs | 2 +- mayastor/src/core/channel.rs | 2 +- mayastor/src/core/env.rs | 6 +++--- mayastor/src/replica.rs | 4 ++-- mayastor/src/subsys/nvmf/target.rs | 2 +- nix/pkgs/libspdk/default.nix | 22 +++++++++++----------- spdk-sys/logwrapper.c | 4 ++-- spdk-sys/src/lib.rs | 2 +- 9 files changed, 23 insertions(+), 23 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index b53de5014..cf7b27749 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -189,7 +189,7 @@ impl NexusFnTable { unsafe { spdk_json_write_named_array_begin( w, - "children\0".as_ptr() as *mut i8, + "children\0".as_ptr() as *const u8, ); }; diff --git a/mayastor/src/bin/spdk.rs b/mayastor/src/bin/spdk.rs index 23f738986..d710ad7bc 100644 --- a/mayastor/src/bin/spdk.rs +++ b/mayastor/src/bin/spdk.rs @@ -46,7 +46,7 @@ fn main() -> Result<(), std::io::Error> { if spdk_app_parse_args( (c_args.len() as c_int) - 1, - c_args.as_ptr() as *mut *mut i8, + c_args.as_ptr() as *mut *mut u8, &mut opts, null_mut(), // extra short options i.e. "f:S:" null_mut(), // extra long options diff --git a/mayastor/src/core/channel.rs b/mayastor/src/core/channel.rs index 7b31ff9d7..ae924738f 100644 --- a/mayastor/src/core/channel.rs +++ b/mayastor/src/core/channel.rs @@ -28,7 +28,7 @@ impl IoChannel { (*self.0) .dev .add(std::mem::size_of::<*mut spdk_io_channel>()) - as *mut i8, + as *const u8, ) .to_str() .unwrap() diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 7a914fa64..cdb829473 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -486,7 +486,7 @@ impl MayastorEnvironment { let mut cargs = args .iter() .map(|arg| arg.as_ptr()) - .collect::>(); + .collect::>(); cargs.push(std::ptr::null()); debug!("EAL arguments {:?}", args); @@ -494,7 +494,7 @@ impl MayastorEnvironment { if unsafe { rte_eal_init( (cargs.len() as libc::c_int) - 1, - cargs.as_ptr() as *mut *mut i8, + cargs.as_ptr() as *mut *mut u8, ) } < 0 { @@ -576,7 +576,7 @@ impl MayastorEnvironment { } else { info!("RPC server listening at: {}", ctx.rpc.to_str().unwrap()); unsafe { - spdk_rpc_initialize(ctx.rpc.as_ptr() as *mut i8); + spdk_rpc_initialize(ctx.rpc.as_ptr() as *mut u8); spdk_rpc_set_state(SPDK_RPC_RUNTIME); }; diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index 2f7e6a1a5..bb4fb8b27 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -180,14 +180,14 @@ impl Replica { pub fn get_pool_name(&self) -> &str { unsafe { let lvs = &*(*self.lvol_ptr).lvol_store; - CStr::from_ptr(&lvs.name as *const i8).to_str().unwrap() + CStr::from_ptr(&lvs.name as *const u8).to_str().unwrap() } } /// Get uuid (= name) of the replica. pub fn get_uuid(&self) -> &str { unsafe { - CStr::from_ptr(&(*self.lvol_ptr).name as *const i8) + CStr::from_ptr(&(*self.lvol_ptr).name as *const u8) .to_str() .unwrap() } diff --git a/mayastor/src/subsys/nvmf/target.rs b/mayastor/src/subsys/nvmf/target.rs index d83dcfaec..8446807b4 100644 --- a/mayastor/src/subsys/nvmf/target.rs +++ b/mayastor/src/subsys/nvmf/target.rs @@ -293,7 +293,7 @@ impl Target { let discovery = unsafe { NvmfSubsystem::from(spdk_nvmf_subsystem_create( self.tgt.as_ptr(), - SPDK_NVMF_DISCOVERY_NQN.as_ptr() as *const i8, + SPDK_NVMF_DISCOVERY_NQN.as_ptr() as *const u8, SPDK_NVMF_SUBTYPE_DISCOVERY, 0, )) diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index e1641ae7b..643784f71 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -54,15 +54,15 @@ let "--without-isal" "--with-iscsi-initiator" "--with-uring" + # "--with-crypto" + "--disable-examples" + "--disable-unit-tests" + "--disable-tests" ]; enableParallelBuilding = true; - preConfigure = '' - substituteInPlace dpdk/config/defconfig_aarch64-native-linux-gcc --replace native default - ''; - configurePhase = '' patchShebangs ./. ./configure $configureFlags @@ -77,13 +77,13 @@ let find . -type f -name 'libspdk_ut_mock.a' -delete $CC -shared -o libspdk.so \ - -lc -laio -liscsi -lnuma -ldl -lrt -luuid -lpthread -lcrypto \ - -luring \ - -Wl,--whole-archive \ - $(find build/lib -type f -name 'libspdk_*.a*' -o -name 'librte_*.a*') \ - $(find dpdk/build/lib -type f -name 'librte_*.a*') \ - $(find intel-ipsec-mb -type f -name 'libIPSec_*.a*') \ - -Wl,--no-whole-archive + -lc -laio -liscsi -lnuma -ldl -lrt -luuid -lpthread -lcrypto \ + -luring \ + -Wl,--whole-archive \ + $(find build/lib -type f -name 'libspdk_*.a*' -o -name 'librte_*.a*') \ + $(find dpdk/build/lib -type f -name 'librte_*.a*') \ + $(find intel-ipsec-mb -type f -name 'libIPSec_*.a*') \ + -Wl,--no-whole-archive ''; installPhase = '' diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index 6e6426e16..e4a400b34 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -2,10 +2,10 @@ void maya_log(int level, const char *file, const int line, const char *func, - const char *format, va_list args) + const char *format, va_list *args) { char buf[1024] = {0}; - int n_written = vsnprintf(buf, sizeof(buf), format, args); + int n_written = vsnprintf(buf, sizeof(buf), format, *args); logfn(level, file, line, func, &buf[0], n_written); } diff --git a/spdk-sys/src/lib.rs b/spdk-sys/src/lib.rs index 46688ac1a..1a0a5656d 100644 --- a/spdk-sys/src/lib.rs +++ b/spdk-sys/src/lib.rs @@ -35,7 +35,7 @@ extern "C" { line: i32, func: *const c_char, format: *const c_char, - args: *mut __va_list_tag, + args: __va_list, ); pub static mut logfn: LogProto; From 1a06bf2ca493b2f8dd789c3b9ffdbf3513be8444 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Fri, 29 Jan 2021 13:48:47 -0800 Subject: [PATCH 34/78] feat: aarch64 passing tests Signed-off-by: Ana Hobden --- shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/shell.nix b/shell.nix index a16644e31..6d17c3122 100644 --- a/shell.nix +++ b/shell.nix @@ -24,6 +24,7 @@ mkShell { docker-compose kubectl kind + docker clang cowsay e2fsprogs From 22e7b17ed9ad170816faed96d83bf56c863146de Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Tue, 2 Feb 2021 15:46:28 -0800 Subject: [PATCH 35/78] chore: finangle multi ISA builds Signed-off-by: Ana Hobden --- .cargo/config | 4 ++-- nix/pkgs/libspdk/default.nix | 8 ++++++-- rust-toolchain | 1 + spdk-sys/build.rs | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 rust-toolchain diff --git a/.cargo/config b/.cargo/config index bf32ea81e..52c28d445 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,5 +1,5 @@ -[build] -rustflags = "-C target-cpu=native" +[target.x86_64-unknown-linux-gnu] +rustflags = ["-Ctarget-cpu=neleham"] [profile.release] lto = "fat" diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 643784f71..71dec3ca1 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -15,6 +15,7 @@ , openssl , python3 , stdenv +, system ? builtins.currentSystem }: let # Derivation attributes for production version of libspdk @@ -49,8 +50,11 @@ let openssl ]; - configureFlags = [ - "--target-arch=native" + configureFlags = (if (system == "x86_64-linux") then + [ "--target-arch=neleham" ] + else + [] + ) ++ [ "--without-isal" "--with-iscsi-initiator" "--with-uring" diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 000000000..2bf5ad044 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +stable diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 1d07becd9..3bad291b9 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -40,8 +40,8 @@ fn build_wrapper() { fn main() { #![allow(unreachable_code)] - // #[cfg(not(target_arch = "x86_64"))] - // panic!("spdk-sys crate is only for x86_64 cpu architecture"); + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + panic!("spdk-sys crate is only for x86_64 (Nehelam or later) and aarch64 ISAs."); #[cfg(not(target_os = "linux"))] panic!("spdk-sys crate works only on linux"); From a97175e3028d58050eada11f3e017a52614c1cf2 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Wed, 3 Feb 2021 12:54:26 -0800 Subject: [PATCH 36/78] chore: re-enable on x86_64 Signed-off-by: Ana Hobden --- .cargo/config | 2 +- mayastor/src/bdev/nexus/nexus_fn_table.rs | 7 +++++-- mayastor/src/bin/spdk.rs | 8 +++++--- mayastor/src/core/channel.rs | 7 +++++-- mayastor/src/core/env.rs | 6 +++--- mayastor/src/replica.rs | 5 +++-- mayastor/src/subsys/nvmf/target.rs | 2 +- nix/pkgs/libspdk/default.nix | 11 ++++++++--- spdk-sys/src/lib.rs | 16 ++++++++++++++++ 9 files changed, 47 insertions(+), 17 deletions(-) diff --git a/.cargo/config b/.cargo/config index 52c28d445..841d01c22 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -rustflags = ["-Ctarget-cpu=neleham"] +rustflags = ["-C", "target-cpu=nehalem"] [profile.release] lto = "fat" diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index cf7b27749..0a28098d4 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -1,4 +1,7 @@ -use std::ffi::{c_void, CString}; +use std::{ + ffi::{c_void, CString}, + os::raw::c_char, +}; use once_cell::sync::Lazy; @@ -189,7 +192,7 @@ impl NexusFnTable { unsafe { spdk_json_write_named_array_begin( w, - "children\0".as_ptr() as *const u8, + "children\0".as_ptr() as *const c_char, ); }; diff --git a/mayastor/src/bin/spdk.rs b/mayastor/src/bin/spdk.rs index d710ad7bc..49edcbe5a 100644 --- a/mayastor/src/bin/spdk.rs +++ b/mayastor/src/bin/spdk.rs @@ -12,10 +12,12 @@ use std::{ iter::Iterator, ptr::null_mut, vec::Vec, + os::raw::{ + c_char, + c_int, + }, }; -use libc::{c_char, c_int}; - use mayastor::delay; use spdk_sys::{ spdk_app_fini, @@ -46,7 +48,7 @@ fn main() -> Result<(), std::io::Error> { if spdk_app_parse_args( (c_args.len() as c_int) - 1, - c_args.as_ptr() as *mut *mut u8, + c_args.as_ptr() as *mut *mut c_char, &mut opts, null_mut(), // extra short options i.e. "f:S:" null_mut(), // extra long options diff --git a/mayastor/src/core/channel.rs b/mayastor/src/core/channel.rs index ae924738f..1b2081fe6 100644 --- a/mayastor/src/core/channel.rs +++ b/mayastor/src/core/channel.rs @@ -1,4 +1,7 @@ -use std::fmt::{Debug, Error, Formatter}; +use std::{ + fmt::{Debug, Error, Formatter}, + os::raw::c_char, +}; use spdk_sys::{spdk_io_channel, spdk_put_io_channel}; @@ -28,7 +31,7 @@ impl IoChannel { (*self.0) .dev .add(std::mem::size_of::<*mut spdk_io_channel>()) - as *const u8, + as *const c_char, ) .to_str() .unwrap() diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index cdb829473..d9ed6ef48 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -486,7 +486,7 @@ impl MayastorEnvironment { let mut cargs = args .iter() .map(|arg| arg.as_ptr()) - .collect::>(); + .collect::>(); cargs.push(std::ptr::null()); debug!("EAL arguments {:?}", args); @@ -494,7 +494,7 @@ impl MayastorEnvironment { if unsafe { rte_eal_init( (cargs.len() as libc::c_int) - 1, - cargs.as_ptr() as *mut *mut u8, + cargs.as_ptr() as *mut *mut c_char, ) } < 0 { @@ -576,7 +576,7 @@ impl MayastorEnvironment { } else { info!("RPC server listening at: {}", ctx.rpc.to_str().unwrap()); unsafe { - spdk_rpc_initialize(ctx.rpc.as_ptr() as *mut u8); + spdk_rpc_initialize(ctx.rpc.as_ptr() as *mut c_char); spdk_rpc_set_state(SPDK_RPC_RUNTIME); }; diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index bb4fb8b27..129348dc2 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -4,6 +4,7 @@ //! an lvol). Here we define methods for easy management of replicas. #![allow(dead_code)] use std::ffi::CStr; +use std::os::raw::c_char; use ::rpc::mayastor as rpc; use snafu::{ResultExt, Snafu}; @@ -180,14 +181,14 @@ impl Replica { pub fn get_pool_name(&self) -> &str { unsafe { let lvs = &*(*self.lvol_ptr).lvol_store; - CStr::from_ptr(&lvs.name as *const u8).to_str().unwrap() + CStr::from_ptr(&lvs.name as *const c_char).to_str().unwrap() } } /// Get uuid (= name) of the replica. pub fn get_uuid(&self) -> &str { unsafe { - CStr::from_ptr(&(*self.lvol_ptr).name as *const u8) + CStr::from_ptr(&(*self.lvol_ptr).name as *const c_char) .to_str() .unwrap() } diff --git a/mayastor/src/subsys/nvmf/target.rs b/mayastor/src/subsys/nvmf/target.rs index 8446807b4..ee32179ce 100644 --- a/mayastor/src/subsys/nvmf/target.rs +++ b/mayastor/src/subsys/nvmf/target.rs @@ -293,7 +293,7 @@ impl Target { let discovery = unsafe { NvmfSubsystem::from(spdk_nvmf_subsystem_create( self.tgt.as_ptr(), - SPDK_NVMF_DISCOVERY_NQN.as_ptr() as *const u8, + SPDK_NVMF_DISCOVERY_NQN.as_ptr() as *const std::os::raw::c_char, SPDK_NVMF_SUBTYPE_DISCOVERY, 0, )) diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 71dec3ca1..1245a33fb 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -51,22 +51,27 @@ let ]; configureFlags = (if (system == "x86_64-linux") then - [ "--target-arch=neleham" ] + [ + "--target-arch=nehalem" + "--with-crypto" + ] else [] ) ++ [ "--without-isal" "--with-iscsi-initiator" "--with-uring" - # "--with-crypto" "--disable-examples" "--disable-unit-tests" "--disable-tests" ]; - enableParallelBuilding = true; + preConfigure = '' + substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default + ''; + configurePhase = '' patchShebangs ./. ./configure $configureFlags diff --git a/spdk-sys/src/lib.rs b/spdk-sys/src/lib.rs index 1a0a5656d..fe8b35e01 100644 --- a/spdk-sys/src/lib.rs +++ b/spdk-sys/src/lib.rs @@ -27,6 +27,22 @@ pub type LogProto = Option< ), >; +#[cfg(target_arch = "x86_64")] +#[link(name = "logwrapper", kind = "static")] +extern "C" { + pub fn maya_log( + level: i32, + file: *const c_char, + line: i32, + func: *const c_char, + format: *const c_char, + args: *mut __va_list_tag, + ); + + pub static mut logfn: LogProto; +} + +#[cfg(target_arch = "aarch64")] #[link(name = "logwrapper", kind = "static")] extern "C" { pub fn maya_log( From 643764352e237ff21a861949a7c1204c21a9e0ff Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Thu, 4 Feb 2021 07:43:16 -0800 Subject: [PATCH 37/78] chore: compat finangling Signed-off-by: Ana Hobden --- nix/pkgs/libspdk/default.nix | 13 +++++++------ spdk-sys/logwrapper.c | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 1245a33fb..801f370c0 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -14,7 +14,7 @@ , numactl , openssl , python3 -, stdenv +, stdenv , system ? builtins.currentSystem }: let @@ -53,17 +53,18 @@ let configureFlags = (if (system == "x86_64-linux") then [ "--target-arch=nehalem" - "--with-crypto" + ] + else if (system == "aarch64-linux") then + [ + "--target-arch=armv8-a+crypto" ] else [] ) ++ [ + #"--with-crypto" "--without-isal" "--with-iscsi-initiator" "--with-uring" - "--disable-examples" - "--disable-unit-tests" - "--disable-tests" ]; enableParallelBuilding = true; @@ -73,7 +74,7 @@ let ''; configurePhase = '' - patchShebangs ./. + patchShebangs ./. > /dev/null ./configure $configureFlags ''; diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index e4a400b34..6e6426e16 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -2,10 +2,10 @@ void maya_log(int level, const char *file, const int line, const char *func, - const char *format, va_list *args) + const char *format, va_list args) { char buf[1024] = {0}; - int n_written = vsnprintf(buf, sizeof(buf), format, *args); + int n_written = vsnprintf(buf, sizeof(buf), format, args); logfn(level, file, line, func, &buf[0], n_written); } From 05e1af4ea7bc164855cbaa11bb2233e55bb0c96d Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Thu, 4 Feb 2021 14:05:44 -0800 Subject: [PATCH 38/78] fix: correct safety of maya_log Signed-off-by: Ana Hobden --- mayastor/.cargo/config | 3 +++ spdk-sys/logwrapper.c | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mayastor/.cargo/config b/mayastor/.cargo/config index 3cce93847..d56081d83 100644 --- a/mayastor/.cargo/config +++ b/mayastor/.cargo/config @@ -2,3 +2,6 @@ # cargo will ask you nicely to type your password [target.x86_64-unknown-linux-gnu] runner = ".cargo/runner.sh" + +[target.aarch64-unknown-linux-gnu] +runner = ".cargo/runner.sh" diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index 6e6426e16..db45ddc55 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -5,7 +5,7 @@ maya_log(int level, const char *file, const int line, const char *func, const char *format, va_list args) { char buf[1024] = {0}; - int n_written = vsnprintf(buf, sizeof(buf), format, args); - logfn(level, file, line, func, &buf[0], n_written); + unsigned int would_have_written = vsnprintf(buf, sizeof(buf), format, args); + logfn(level, file, line, func, &buf[0], ((would_have_written > sizeof(buf)) ? sizeof(buf) : would_have_written)); } From 3eaf6ce96ee09d998b6970fb3b43d0a5cec0b4bc Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Mon, 8 Feb 2021 13:16:31 -0800 Subject: [PATCH 39/78] fix: try to log the full spdk msg Signed-off-by: Ana Hobden --- spdk-sys/logwrapper.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index db45ddc55..c7d713777 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -4,8 +4,26 @@ void maya_log(int level, const char *file, const int line, const char *func, const char *format, va_list args) { - char buf[1024] = {0}; - unsigned int would_have_written = vsnprintf(buf, sizeof(buf), format, args); - logfn(level, file, line, func, &buf[0], ((would_have_written > sizeof(buf)) ? sizeof(buf) : would_have_written)); + // There is a delicate balance here! This `buf` ideally should not be resized, since a realloc is expensive. + char buf[4096] = {0}; + unsigned int should_have_written = vsnprintf(buf, sizeof(buf), format, args); + + if (should_have_written > sizeof(buf)) { + logfn(level, file, line, func, &buf[0], sizeof(buf)); + } else { + // If `should_have_written` is bigger than `buf`, then the message is too long. + // Instead, we'll try to malloc onto the heap and log with that instead. + char *dynamic_buf = malloc(should_have_written); + if (!dynamic_buf) { + // We are out of memory. Trying to allocate more is not going to work out ok. + // Since C strings need `\0` on the end, we'll do that. + buf[sizeof(buf) - 1] = '\0'; + logfn(level, file, line, func, &buf[0], sizeof(buf)); + } else { + vsnprintf(dynamic_buf, should_have_written, format, args); + logfn(level, file, line, func, &dynamic_buf[0], sizeof(dynamic_buf)); + free(dynamic_buf); + } + } } From 42ba4b5a4f228a1f3fa3824ff72bd3095ce84918 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Mon, 8 Feb 2021 15:17:24 -0800 Subject: [PATCH 40/78] chore: try to enable cross builds Signed-off-by: Ana Hobden --- default.nix | 5 +++ nix/pkgs/libspdk/default.nix | 76 +++++++++++++++++++++++++++-------- nix/pkgs/mayastor/default.nix | 20 ++++----- 3 files changed, 73 insertions(+), 28 deletions(-) diff --git a/default.nix b/default.nix index f2aa9dc4b..6d6c97c4c 100644 --- a/default.nix +++ b/default.nix @@ -1,3 +1,7 @@ +{ + crossSystem ? null +}: + let sources = import ./nix/sources.nix; pkgs = import sources.nixpkgs { @@ -5,6 +9,7 @@ let (_: _: { inherit sources; }) (import ./nix/mayastor-overlay.nix) ]; + inherit crossSystem; }; in pkgs diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 801f370c0..e4bcc5052 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -1,21 +1,35 @@ { binutils , cunit , fetchFromGitHub -, pkgconfig +, pkg-config , lcov , libaio , libiscsi +, libbpf +, libelf , liburing , libuuid +, libpcap +, libbsd +, libexecinfo , nasm +, cmake , ninja +, jansson , meson , ncurses , numactl , openssl , python3 -, stdenv -, system ? builtins.currentSystem +, stdenv +, libtool +, yasm +, targetPlatform +, buildPlatform +, buildPackages +, llvmPackages_11 +, gcc +, zlib }: let # Derivation attributes for production version of libspdk @@ -34,12 +48,16 @@ let nativeBuildInputs = [ meson ninja - pkgconfig + pkg-config python3 + llvmPackages_11.clang + gcc + cmake ]; buildInputs = [ binutils + libtool libaio libiscsi.dev liburing @@ -48,39 +66,65 @@ let ncurses numactl openssl + libpcap + libbsd + jansson + libbpf + libelf + libexecinfo + zlib ]; - configureFlags = (if (system == "x86_64-linux") then + configureFlags = (if (targetPlatform.config == "x86_64-unknown-linux-gnu") then [ "--target-arch=nehalem" + "--with-crypto" ] - else if (system == "aarch64-linux") then + else if (targetPlatform.config == "aarch64-unknown-linux-gnu") then [ "--target-arch=armv8-a+crypto" ] else [] - ) ++ [ - #"--with-crypto" + ) ++ + (if (targetPlatform.config != buildPlatform.config) then [ "--cross-prefix=${targetPlatform.config}" ] else []) ++ + [ "--without-isal" "--with-iscsi-initiator" "--with-uring" + "--disable-examples" + "--disable-unit-tests" + "--disable-tests" ]; enableParallelBuilding = true; - preConfigure = '' - substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default - ''; - configurePhase = '' patchShebangs ./. > /dev/null - ./configure $configureFlags ''; hardeningDisable = [ "all" ]; - buildPhase = '' + buildPhase = (if (targetPlatform.config == "x86_64-unknown-linux-gnu") then + '' + substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default + meson build dpdk + '' + else if (targetPlatform.config == "aarch64-unknown-linux-gnu") then + '' + substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default + substituteInPlace dpdk/config/arm/arm64_armv8_linux_gcc --replace aarch64-linux-gnu- aarch64-unknown-linux-gnu- + meson build dpdk --cross-file dpdk/config/arm/arm64_armv8_linux_gcc + '' + else + "" + ) + '' + ./configure ${builtins.concatStringsSep + " " + (builtins.filter + (opt: (builtins.match "--build=.*" opt) == null) + configureFlags) + } make -j`nproc` find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_sock_uring.a' -delete @@ -122,7 +166,7 @@ let }; in { - release = stdenv.mkDerivation (drvAttrs // { + release = llvmPackages_11.stdenv.mkDerivation (drvAttrs // { pname = "libspdk"; separateDebugInfo = true; dontStrip = false; @@ -131,7 +175,7 @@ in "--disable-unit-tests" ]; }); - debug = stdenv.mkDerivation (drvAttrs // { + debug = llvmPackages_11.stdenv.mkDerivation (drvAttrs // { pname = "libspdk-dev"; separateDebugInfo = false; dontStrip = true; diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 7f60e8507..5c393d140 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -2,7 +2,6 @@ , clang , dockerTools , e2fsprogs -, git , lib , libaio , libiscsi @@ -19,8 +18,10 @@ , sources , xfsprogs , utillinux -, rustup -, docker-compose +, llvmPackages_11 +, targetPackages +, buildPackages +, targetPlatform }: let channel = import ../../lib/rust.nix { inherit sources; }; @@ -36,8 +37,7 @@ let lib.hasPrefix (toString (src + "/${allowedPrefix}")) path) allowedPrefixes) src; - version_drv = import ../../lib/version.nix { inherit lib stdenv git; }; - version = builtins.readFile "${version_drv}"; + version = (builtins.fromTOML (builtins.readFile ../../../mayastor/Cargo.toml)).package.version; src_list = [ "Cargo.lock" "Cargo.toml" @@ -63,17 +63,13 @@ let PROTOC = "${protobuf}/bin/protoc"; PROTOC_INCLUDE = "${protobuf}/include"; - # Before editing dependencies, consider: - # https://nixos.org/manual/nixpkgs/stable/#ssec-cross-dependency-implementation - # https://nixos.org/manual/nixpkgs/stable/#ssec-stdenv-dependencies - basePackages = [ - ]; nativeBuildInputs = [ - clang pkg-config + protobuf + llvmPackages_11.clang ]; buildInputs = [ - llvmPackages.libclang + llvmPackages_11.libclang protobuf libaio libiscsi.lib From 2ec6a0236e778115638d0cc641b3a068452327bd Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Tue, 23 Feb 2021 13:13:49 -0800 Subject: [PATCH 41/78] chore: delay cross builds Signed-off-by: Ana Hobden --- nix/pkgs/libspdk/default.nix | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index e4bcc5052..7ec68c4ab 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -101,30 +101,17 @@ let configurePhase = '' patchShebangs ./. > /dev/null + ./configure ${builtins.concatStringsSep + " " + (builtins.filter + (opt: (builtins.match "--build=.*" opt) == null) + configureFlags) + } ''; hardeningDisable = [ "all" ]; - buildPhase = (if (targetPlatform.config == "x86_64-unknown-linux-gnu") then - '' - substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default - meson build dpdk - '' - else if (targetPlatform.config == "aarch64-unknown-linux-gnu") then - '' - substituteInPlace dpdk/config/defconfig_x86_64-native-linux-gcc --replace native default - substituteInPlace dpdk/config/arm/arm64_armv8_linux_gcc --replace aarch64-linux-gnu- aarch64-unknown-linux-gnu- - meson build dpdk --cross-file dpdk/config/arm/arm64_armv8_linux_gcc - '' - else - "" - ) + '' - ./configure ${builtins.concatStringsSep - " " - (builtins.filter - (opt: (builtins.match "--build=.*" opt) == null) - configureFlags) - } + buildPhase = '' make -j`nproc` find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_sock_uring.a' -delete From 91a0f6babf7753824f7b6cba84f5991b8bfc6f14 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Tue, 23 Feb 2021 13:42:38 -0800 Subject: [PATCH 42/78] chore: update docs Signed-off-by: Ana Hobden --- doc/run.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/run.md b/doc/run.md index 59d375f9b..6f2dd9094 100644 --- a/doc/run.md +++ b/doc/run.md @@ -5,7 +5,7 @@ Mayastor supports the following [Instruction Set Architectures (ISA)][isa]: + x86_64 (Nehalem or later) - + aarch64 support (**Early access on [`aarch64`][aarch64-branch] -- Use caution**) + + aarch64 Your system will need several [control groups][control-groups] configured. From 4d3f0ff175a52142b2cf0b7e0e69b13322c914c1 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Wed, 10 Feb 2021 14:33:06 -0800 Subject: [PATCH 43/78] fix: mayastor-client nexus create accept many children Signed-off-by: Ana Hobden --- mayastor/src/bin/mayastor-client/nexus_cli.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mayastor/src/bin/mayastor-client/nexus_cli.rs b/mayastor/src/bin/mayastor-client/nexus_cli.rs index 15ffa6207..57a167733 100644 --- a/mayastor/src/bin/mayastor-client/nexus_cli.rs +++ b/mayastor/src/bin/mayastor-client/nexus_cli.rs @@ -155,9 +155,8 @@ async fn nexus_create( let size = parse_size(matches.value_of("size").unwrap()) .map_err(|s| Status::invalid_argument(format!("Bad size '{}'", s)))?; let children = matches - .value_of("children") - .unwrap() - .split_whitespace() + .values_of("children") + .unwrap() // It's required, it'll be here. .map(|c| c.to_string()) .collect::>(); From 7f0a5801fe6395b11f83824ae677110a6c47f726 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Thu, 11 Feb 2021 15:06:59 -0800 Subject: [PATCH 44/78] feat: unify mayastor-client address/port flags Unifies the `-a` and `-p` flags to a `-h` flag which takes a full url (optionally) without a scheme. Further, this makes the flag global, so it can be used in any position: ```bash RUST_BACKTRACE=1 RUST_LOG=mayastor=trace cargo run -- nexus list cargo run -- --bind 127.0.0.1 nexus list cargo run -- --bind 127.0.0.1:10124 nexus list cargo run -- -b http://127.0.0.1:10124 nexus list cargo run -- --bind http://127.0.0.1 nexus list cargo run -- nexus list --bind 127.0.0.1 cargo run -- nexus list --bind 127.0.0.1:10124 cargo run -- nexus list -b http://127.0.0.1:10124 cargo run -- nexus list --bind http://127.0.0.1 ``` Signed-off-by: Ana Hobden --- Cargo.lock | 1 + mayastor/Cargo.toml | 3 +- mayastor/src/bin/mayastor-client/context.rs | 75 ++++++++++++++++---- mayastor/src/bin/mayastor-client/main.rs | 76 +++++++++++---------- 4 files changed, 106 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d821aed64..30cac0dd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2351,6 +2351,7 @@ dependencies = [ "futures", "futures-timer", "git-version", + "http 0.1.21", "io-uring", "ioctl-gen", "ipnetwork", diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index 11f6f7a50..4979062cb 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -40,7 +40,7 @@ async-trait = "0.1.36" atty = "0.2" bincode = "1.2" byte-unit = "3.0.1" -bytes = "0.4.12" +bytes = "0.4" # We are blocked on updating http until we update tonic. chrono = "0.4" clap = "2.33.0" colored_json = "*" @@ -51,6 +51,7 @@ env_logger = "0.7" futures = "0.3" futures-timer = "2.0" git-version = "0.3" +http = "0.1" # We are blocked on updating http until we update tonic. io-uring = "0.4.0" ioctl-gen = "0.1.1" jsonrpc = { path = "../jsonrpc"} diff --git a/mayastor/src/bin/mayastor-client/context.rs b/mayastor/src/bin/mayastor-client/context.rs index 8cd1b2a9b..691d25e9d 100644 --- a/mayastor/src/bin/mayastor-client/context.rs +++ b/mayastor/src/bin/mayastor-client/context.rs @@ -1,7 +1,35 @@ use crate::{BdevClient, JsonClient, MayaClient}; use byte_unit::Byte; +use bytes::Bytes; use clap::ArgMatches; -use std::cmp::max; +use http::uri::{Authority, PathAndQuery, Scheme, Uri}; +use snafu::{Backtrace, ResultExt, Snafu}; +use std::{cmp::max, str::FromStr}; +use tonic::transport::Endpoint; + +#[derive(Debug, Snafu)] +pub enum Error { + #[snafu(display("Invalid URI bytes"))] + InvalidUriBytes { + source: http::uri::InvalidUriBytes, + backtrace: Backtrace, + }, + #[snafu(display("Invalid URI parts"))] + InvalidUriParts { + source: http::uri::InvalidUriParts, + backtrace: Backtrace, + }, + #[snafu(display("Invalid URI"))] + TonicInvalidUri { + source: tonic::codegen::http::uri::InvalidUri, + backtrace: Backtrace, + }, + #[snafu(display("Invalid URI"))] + InvalidUri { + source: http::uri::InvalidUri, + backtrace: Backtrace, + }, +} pub struct Context { pub(crate) client: MayaClient, @@ -12,7 +40,7 @@ pub struct Context { } impl Context { - pub(crate) async fn new(matches: &ArgMatches<'_>) -> Self { + pub(crate) async fn new(matches: &ArgMatches<'_>) -> Result { let verbosity = if matches.is_present("quiet") { 0 } else { @@ -22,27 +50,48 @@ impl Context { .value_of("units") .and_then(|u| u.chars().next()) .unwrap_or('b'); - let endpoint = { - let addr = matches.value_of("address").unwrap_or("127.0.0.1"); - let port = value_t!(matches.value_of("port"), u16).unwrap_or(10124); - format!("{}:{}", addr, port) + // Ensure the provided host is defaulted & normalized to what we expect. + // TODO: This can be significantly cleaned up when we update tonic 0.1 + // and its deps. + let host = if let Some(host) = matches.value_of("bind") { + let uri = + Uri::from_shared(Bytes::from(host)).context(InvalidUriBytes)?; + let mut parts = uri.into_parts(); + if parts.scheme.is_none() { + parts.scheme = Scheme::from_str("http").ok(); + } + if let Some(ref mut authority) = parts.authority { + if authority.port_part().is_none() { + parts.authority = Authority::from_shared(Bytes::from( + format!("{}:{}", authority.host(), 10124), + )) + .ok() + } + } + if parts.path_and_query.is_none() { + parts.path_and_query = PathAndQuery::from_str("/").ok(); + } + let uri = Uri::from_parts(parts).context(InvalidUriParts)?; + Endpoint::from_shared(uri.to_string()).context(TonicInvalidUri)? + } else { + Endpoint::from_static("http://127.0.0.1:10124") }; - let uri = format!("http://{}", endpoint); + if verbosity > 1 { - println!("Connecting to {}", uri); + println!("Connecting to {:?}", host); } - let client = MayaClient::connect(uri.clone()).await.unwrap(); - let bdev = BdevClient::connect(uri.clone()).await.unwrap(); - let json = JsonClient::connect(uri).await.unwrap(); + let client = MayaClient::connect(host.clone()).await.unwrap(); + let bdev = BdevClient::connect(host.clone()).await.unwrap(); + let json = JsonClient::connect(host).await.unwrap(); - Context { + Ok(Context { client, bdev, json, verbosity, units, - } + }) } pub(crate) fn v1(&self, s: &str) { if self.verbosity > 0 { diff --git a/mayastor/src/bin/mayastor-client/main.rs b/mayastor/src/bin/mayastor-client/main.rs index a4846a946..1b92bcd14 100644 --- a/mayastor/src/bin/mayastor-client/main.rs +++ b/mayastor/src/bin/mayastor-client/main.rs @@ -1,18 +1,15 @@ -#[macro_use] -extern crate clap; - use byte_unit::Byte; use clap::{App, AppSettings, Arg}; -use tonic::{transport::Channel, Status}; +use snafu::{Backtrace, ResultExt, Snafu}; +use tonic::transport::Channel; +use crate::context::Context; use ::rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, json_rpc_client::JsonRpcClient, mayastor_client::MayastorClient, }; -use crate::context::Context; - mod bdev_cli; mod context; mod device_cli; @@ -29,12 +26,28 @@ type MayaClient = MayastorClient; type BdevClient = BdevRpcClient; type JsonClient = JsonRpcClient; +#[derive(Debug, Snafu)] +pub enum Error { + #[snafu(display("gRPC status: {}", source))] + GrpcStatus { + source: tonic::Status, + backtrace: Backtrace, + }, + #[snafu(display("Context building error: {}", source))] + ContextError { + source: context::Error, + backtrace: Backtrace, + }, +} + +pub type Result = std::result::Result; + pub(crate) fn parse_size(src: &str) -> Result { Byte::from_str(src).map_err(|_| src.to_string()) } #[tokio::main(max_threads = 2)] -async fn main() -> Result<(), Status> { +async fn main() -> crate::Result<()> { env_logger::init(); let matches = App::new("Mayastor CLI") @@ -45,18 +58,13 @@ async fn main() -> Result<(), Status> { AppSettings::ColorAlways]) .about("CLI utility for Mayastor") .arg( - Arg::with_name("address") - .short("a") - .long("address") - .default_value("127.0.0.1") + Arg::with_name("bind") + .short("b") + .long("bind") + .default_value("http://127.0.0.1:10124") .value_name("HOST") - .help("IP address of mayastor instance")) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .default_value("10124").value_name("NUMBER") - .help("Port number of mayastor server")) + .help("The URI of mayastor instance") + .global(true)) .arg( Arg::with_name("quiet") .short("q") @@ -68,7 +76,8 @@ async fn main() -> Result<(), Status> { .long("verbose") .multiple(true) .help("Verbose output") - .conflicts_with("quiet")) + .conflicts_with("quiet") + .global(true)) .arg( Arg::with_name("units") .short("u") @@ -89,22 +98,19 @@ async fn main() -> Result<(), Status> { .subcommand(jsonrpc_cli::subcommands()) .get_matches(); - let ctx = Context::new(&matches).await; - - match matches.subcommand() { - ("bdev", Some(args)) => bdev_cli::handler(ctx, args).await?, - ("device", Some(args)) => device_cli::handler(ctx, args).await?, - ("nexus", Some(args)) => nexus_cli::handler(ctx, args).await?, - ("perf", Some(args)) => perf_cli::handler(ctx, args).await?, - ("pool", Some(args)) => pool_cli::handler(ctx, args).await?, - ("replica", Some(args)) => replica_cli::handler(ctx, args).await?, - ("rebuild", Some(args)) => rebuild_cli::handler(ctx, args).await?, - ("snapshot", Some(args)) => snapshot_cli::handler(ctx, args).await?, - ("jsonrpc", Some(args)) => { - jsonrpc_cli::json_rpc_call(ctx, args).await? - } + let ctx = Context::new(&matches).await.context(ContextError)?; - _ => eprintln!("Internal Error: Not implemented"), + let status = match matches.subcommand() { + ("bdev", Some(args)) => bdev_cli::handler(ctx, args).await, + ("device", Some(args)) => device_cli::handler(ctx, args).await, + ("nexus", Some(args)) => nexus_cli::handler(ctx, args).await, + ("perf", Some(args)) => perf_cli::handler(ctx, args).await, + ("pool", Some(args)) => pool_cli::handler(ctx, args).await, + ("replica", Some(args)) => replica_cli::handler(ctx, args).await, + ("rebuild", Some(args)) => rebuild_cli::handler(ctx, args).await, + ("snapshot", Some(args)) => snapshot_cli::handler(ctx, args).await, + ("jsonrpc", Some(args)) => jsonrpc_cli::json_rpc_call(ctx, args).await, + _ => panic!("Command not found"), }; - Ok(()) + status.context(GrpcStatus) } From ae482c561d14590986e40dd8a6c28099fa9c9af0 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Mon, 22 Feb 2021 12:31:42 -0800 Subject: [PATCH 45/78] chore: fix cli tests to use -b Signed-off-by: Ana Hobden --- nix/pkgs/mayastor/default.nix | 2 +- test/grpc/test_cli.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 7f60e8507..d12615e3b 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -56,7 +56,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1m0n418hp4h3j32j632l7pf2kl4pwzbzssx7h0wh5m90wsh41cy4"; + cargoSha256 = "q2Dp9IuwxHSwZEkEiIc49M4d/BI/GyS+lnTrs4TKRs8="; inherit version; src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; diff --git a/test/grpc/test_cli.js b/test/grpc/test_cli.js index d5b6e5dd0..4572f4d43 100644 --- a/test/grpc/test_cli.js +++ b/test/grpc/test_cli.js @@ -28,7 +28,7 @@ const CLIENT_CMD = path.join( 'debug', 'mayastor-client' ); -const EGRESS_CMD = CLIENT_CMD + ' -p ' + EGRESS_PORT; +const EGRESS_CMD = CLIENT_CMD + ' --bind 127.0.0.1:' + EGRESS_PORT; let mayastorMockServer; From 347d37aea413948ab6c8e5aa9af267fd7f36a93f Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Wed, 24 Feb 2021 15:38:47 +0000 Subject: [PATCH 46/78] feat(nexus): allow shared replica to be added as local child Move to a build of SPDK that allows that and change the existing nexus_multipath cargo test to test that scenario. This also means a bdev could be shared over multiple protocols so disallow that for a bdev that is already claimed. Add a message for the iSCSI Error::CreateTarget for consistency with nvmf. Rename the cargo test that checks it to nexus_share_test for clarity. --- mayastor/src/subsys/nvmf/subsystem.rs | 5 +++++ mayastor/src/target/iscsi.rs | 11 +++++++++-- mayastor/tests/nexus_multipath.rs | 17 +++-------------- mayastor/tests/nexus_share.rs | 2 +- nix/pkgs/libspdk/default.nix | 4 ++-- 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index 10e5a3689..3774e5f34 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -123,6 +123,11 @@ impl TryFrom for NvmfSubsystem { type Error = Error; fn try_from(bdev: Bdev) -> Result { + if bdev.is_claimed() { + return Err(Error::CreateTarget { + msg: "already shared".to_string(), + }); + } let ss = NvmfSubsystem::new(bdev.name().as_str())?; ss.set_ana_reporting(true)?; ss.allow_any(true); diff --git a/mayastor/src/target/iscsi.rs b/mayastor/src/target/iscsi.rs index 176e617b3..7f8995ed8 100644 --- a/mayastor/src/target/iscsi.rs +++ b/mayastor/src/target/iscsi.rs @@ -60,7 +60,7 @@ pub enum Error { #[snafu(display("Failed to create default initiator group"))] CreateInitiatorGroup {}, #[snafu(display("Failed to create iscsi target"))] - CreateTarget {}, + CreateTarget { msg: String }, #[snafu(display("Failed to destroy iscsi target"))] DestroyTarget { source: Errno }, } @@ -217,7 +217,9 @@ fn share_as_iscsi_target( }; if tgt.is_null() { error!("Failed to create iscsi target {}", bdev.name()); - Err(Error::CreateTarget {}) + Err(Error::CreateTarget { + msg: "tgt pointer is None".to_string(), + }) } else { let _ = unsafe { spdk_bdev_module_claim_bdev( @@ -233,6 +235,11 @@ fn share_as_iscsi_target( /// Export given bdev over iscsi. That involves creating iscsi target and /// adding the bdev as LUN to it. pub fn share(bdev_name: &str, bdev: &Bdev, side: Side) -> Result { + if bdev.is_claimed() { + return Err(Error::CreateTarget { + msg: "already shared".to_string(), + }); + } let iqn = match side { Side::Nexus => share_as_iscsi_target( bdev_name, diff --git a/mayastor/tests/nexus_multipath.rs b/mayastor/tests/nexus_multipath.rs index adcb0f37d..c61c965f4 100644 --- a/mayastor/tests/nexus_multipath.rs +++ b/mayastor/tests/nexus_multipath.rs @@ -1,5 +1,5 @@ //! Multipath NVMf tests -//! Create the same nexus on both nodes with a replica on 1 node their child. +//! Create the same nexus on both nodes with a replica on 1 node as their child. use mayastor::{ bdev::{nexus_create, nexus_lookup}, core::MayastorCliArgs, @@ -10,7 +10,6 @@ use rpc::mayastor::{ CreateReplicaRequest, PublishNexusRequest, ShareProtocolNexus, - ShareReplicaRequest, }; use std::process::Command; @@ -45,7 +44,7 @@ async fn nexus_multipath() { .await .unwrap(); - // create replica, not shared + // create replica, shared over nvmf hdls[0] .mayastor .create_replica(CreateReplicaRequest { @@ -53,7 +52,7 @@ async fn nexus_multipath() { pool: POOL_NAME.to_string(), size: 32 * 1024 * 1024, thin: false, - share: 0, + share: 1, }) .await .unwrap(); @@ -69,16 +68,6 @@ async fn nexus_multipath() { .await .unwrap(); - // share replica - hdls[0] - .mayastor - .share_replica(ShareReplicaRequest { - uuid: UUID.to_string(), - share: 1, - }) - .await - .unwrap(); - let mayastor = MayastorTest::new(MayastorCliArgs::default()); let ip0 = hdls[0].endpoint.ip(); let nexus_name = format!("nexus-{}", UUID); diff --git a/mayastor/tests/nexus_share.rs b/mayastor/tests/nexus_share.rs index 199a48754..f6a6f17c3 100644 --- a/mayastor/tests/nexus_share.rs +++ b/mayastor/tests/nexus_share.rs @@ -14,7 +14,7 @@ pub mod common; use common::MayastorTest; #[tokio::test] -async fn nexus_test() { +async fn nexus_share_test() { let args = MayastorCliArgs { reactor_mask: "0x3".into(), ..Default::default() diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 0bc15b549..83f6d79dd 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -24,8 +24,8 @@ let src = fetchFromGitHub { owner = "openebs"; repo = "spdk"; - rev = "37164626e403cca75afac7e8a47cd53b730bc921"; - sha256 = "0gkdqqs990hgblz0rlkg8355klxnxi2cdvy5p6ws9nqz8cxwrg14"; + rev = "b0768fdc07c5b0c8d7a6eb7d43499a45e55b9a9d"; + sha256 = "14lx0g7701l14npjfqqrfcfp2rhba2x4545m396gqilpg838i8x4"; #sha256 = stdenv.lib.fakeSha256; fetchSubmodules = true; }; From b85a95ec01a41ba5cd565d414e4cee6249f4825c Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Wed, 24 Feb 2021 11:03:33 -0800 Subject: [PATCH 47/78] chore: clean up and refine spdk use Signed-off-by: Ana Hobden --- rust-toolchain | 1 - spdk-sys/build.rs | 9 +++++++-- spdk-sys/logwrapper.c | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) delete mode 100644 rust-toolchain diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 2bf5ad044..000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -stable diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 3bad291b9..934708f64 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -41,7 +41,7 @@ fn build_wrapper() { fn main() { #![allow(unreachable_code)] #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] - panic!("spdk-sys crate is only for x86_64 (Nehelam or later) and aarch64 ISAs."); + panic!("spdk-sys crate is only for x86_64 (Nehalem or later) and aarch64 (with crypto) ISAs."); #[cfg(not(target_os = "linux"))] panic!("spdk-sys crate works only on linux"); @@ -102,7 +102,12 @@ fn main() { .generate_inline_functions(true) .parse_callbacks(Box::new(MacroCallback { macros, - })) + })); + + #[cfg(target_arch = "x86_64")] + let bindings = bindings.clang_arg("-march=nehalem"); + + let bindings = bindings .generate() .expect("Unable to generate bindings"); diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index c7d713777..7192515b3 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -6,7 +6,7 @@ maya_log(int level, const char *file, const int line, const char *func, { // There is a delicate balance here! This `buf` ideally should not be resized, since a realloc is expensive. char buf[4096] = {0}; - unsigned int should_have_written = vsnprintf(buf, sizeof(buf), format, args); + int should_have_written = vsnprintf(buf, sizeof(buf), format, args); if (should_have_written > sizeof(buf)) { logfn(level, file, line, func, &buf[0], sizeof(buf)); From e342bd9775674d7b7186d8cb1df596a67a955d3c Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Wed, 10 Feb 2021 11:07:00 +0000 Subject: [PATCH 48/78] test(e2e): extend e2e continuous tests Define the continuous tests in the test script rather than the Jenkinsfile. Add rebuild and replica e2e tests. Ensure commit status is not affected by continuous tests. --- Jenkinsfile | 53 ++++++++++++++++++++------------------------ scripts/e2e-test.sh | 54 +++++++++++++++++++++++++++++++++++---------- 2 files changed, 66 insertions(+), 41 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7855f2e95..f4c8b4d90 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -85,8 +85,6 @@ if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && // Only schedule regular builds on develop branch, so we don't need to guard against it String cron_schedule = BRANCH_NAME == "develop" ? "0 2 * * *" : "" -// Some long e2e tests are not suitable to be run for each PR -boolean run_extended_e2e_tests = (env.BRANCH_NAME != 'staging' && env.BRANCH_NAME != 'trying') ? true : false // Determine which stages to run if (params.e2e_continuous == true) { @@ -95,7 +93,7 @@ if (params.e2e_continuous == true) { grpc_test = false moac_test = false e2e_test = true - e2e_test_set = 'install basic_volume_io csi resource_check uninstall' + e2e_test_profile = "continuous" // use images from dockerhub tagged with e2e_continuous_image_tag instead of building from current source e2e_build_images = false // do not push images even when running on master/develop/release branches @@ -106,7 +104,8 @@ if (params.e2e_continuous == true) { grpc_test = true moac_test = true e2e_test = true - e2e_test_set = '' + // Some long e2e tests are not suitable to be run for each PR + e2e_test_profile = (env.BRANCH_NAME != 'staging' && env.BRANCH_NAME != 'trying') ? "extended" : "ondemand" e2e_build_images = true do_not_push_images = false } @@ -291,18 +290,12 @@ pipeline { } else { tag = e2e_continuous_image_tag } - def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${tag}\" --logs --logsdir \"./logs/mayastor\" " + def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${tag}\" --logs --logsdir \"./logs/mayastor\" --profile \"${e2e_test_profile}\" " // building images also means using the CI registry if (e2e_build_images == true) { cmd = cmd + " --registry \"" + env.REGISTRY + "\"" } - if (run_extended_e2e_tests) { - cmd = cmd + " --extended" - } - if (e2e_test_set != '') { - cmd = cmd + " --tests \"" + e2e_test_set + "\"" - } sh "nix-shell --run '${cmd}'" } } @@ -445,25 +438,27 @@ pipeline { // If no tests were run then we should neither be updating commit // status in github nor send any slack messages if (currentBuild.result != null) { - step([ - $class: 'GitHubCommitStatusSetter', - errorHandlers: [[$class: "ChangingBuildStatusErrorHandler", result: "UNSTABLE"]], - contextSource: [ - $class: 'ManuallyEnteredCommitContextSource', - context: 'continuous-integration/jenkins/branch' - ], - statusResultSource: [ - $class: 'ConditionalStatusResultSource', - results: [ - [$class: 'AnyBuildResult', message: 'Pipeline result', state: currentBuild.getResult()] + // Do not update the commit status for continuous tests + if (params.e2e_continuous == false) { + step([ + $class: 'GitHubCommitStatusSetter', + errorHandlers: [[$class: "ChangingBuildStatusErrorHandler", result: "UNSTABLE"]], + contextSource: [ + $class: 'ManuallyEnteredCommitContextSource', + context: 'continuous-integration/jenkins/branch' + ], + statusResultSource: [ + $class: 'ConditionalStatusResultSource', + results: [ + [$class: 'AnyBuildResult', message: 'Pipeline result', state: currentBuild.getResult()] + ] ] - ] - ]) - if (env.BRANCH_NAME == 'develop') { - notifySlackUponStateChange(currentBuild) - } - if (params.e2e_continuous == true) { - notifySlackUponE2EFailure(currentBuild) + ]) + if (env.BRANCH_NAME == 'develop') { + notifySlackUponStateChange(currentBuild) + } + } else { + notifySlackUponE2EFailure(currentBuild) } } } diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 3bcb57069..c545000ab 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -14,8 +14,10 @@ REPORTSDIR=$(realpath "$SCRIPTDIR/..") # 2. replicas_pod_remove SHOULD be the last test before uninstall # this is a disruptive test. #TESTS="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall" -TESTS="install basic_volume_io csi resource_check uninstall" -EXTENDED_TESTS="" +ALL_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall" +ONDEMAND_TESTS="install basic_volume_io csi resource_check uninstall" +EXTENDED_TESTS="install basic_volume_io csi resource_check uninstall" +CONTINUOUS_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall" # Global state variables # test configuration state variables @@ -24,7 +26,8 @@ registry= tag="ci" # script state variables tests="" -run_extended_tests= +custom_tests="" +profile="default" on_fail="stop" uninstall_cleanup="n" generate_logs=0 @@ -41,7 +44,8 @@ Options: --tests Lists of tests to run, delimited by spaces (default: "$tests") Note: the last 2 tests should be (if they are to be run) node_disconnect/replica_pod_remove uninstall - --extended Run long running tests also. + --profile + Run the tests corresponding to the profile (default: run all tests) --reportsdir Path to use for junit xml test reports (default: repo root) --logs Generate logs and cluster state dump at the end of successful test run, prior to uninstall. @@ -72,7 +76,7 @@ while [ "$#" -gt 0 ]; do ;; -T|--tests) shift - tests="$1" + custom_tests="$1" ;; -R|--reportsdir) shift @@ -92,8 +96,9 @@ while [ "$#" -gt 0 ]; do logsdir="$PWD/$logsdir" fi ;; - -e|--extended) - run_extended_tests=1 + --profile) + shift + profile="$1" ;; --onfail) shift @@ -142,13 +147,38 @@ fi export e2e_docker_registry="$registry" # can be empty string -if [ -z "$tests" ]; then - tests="$TESTS" - if [ -n "$run_extended_tests" ]; then - tests="$tests $EXTENDED_TESTS" +if [ -n "$custom_tests" ]; then + if [ "$profile" != "default" ]; then + echo "cannot specify --profile with --tests" + help + exit 1 fi + profile="custom" fi +case "$profile" in + continuous) + tests="$CONTINUOUS_TESTS" + ;; + extended) + tests="$EXTENDED_TESTS" + ;; + ondemand) + tests="$ONDEMAND_TESTS" + ;; + custom) + tests="$custom_tests" + ;; + default) + tests="$ALL_TESTS" + ;; + *) + echo "Unknown profile: $profile" + help + exit 1 + ;; +esac + export e2e_reports_dir="$REPORTSDIR" if [ ! -d "$e2e_reports_dir" ] ; then echo "Reports directory $e2e_reports_dir does not exist" @@ -197,7 +227,7 @@ echo " e2e_reports_dir=$e2e_reports_dir" echo " e2e_uninstall_cleanup=$e2e_uninstall_cleanup" echo "" echo "Script control settings:" -echo " run_extended_tests=$run_extended_tests" +echo " profile=$profile" echo " on_fail=$on_fail" echo " uninstall_cleanup=$uninstall_cleanup" echo " generate_logs=$generate_logs" From c417ad3fa4802da8b4eb52cf6863f0ceab88d092 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 25 Feb 2021 17:27:46 +0100 Subject: [PATCH 49/78] fix(moac): mayastor pool state is shown as blank Well known problem with a decent workaround now. After experimenting with tcp keep-alive that did not bring any relief I decided to brush the dust of my previous mini-feature with restarting watcher connections every n minutes. By default, as defined in the yaml file now, it is every 10 minutes. Most of the diff is about updating version of javascript k8s client. That change will be merged to the upstream eventually. What it does is that the MOD event is not emitted upon watcher restart if the resource has not changed. Hence it makes the watcher restarts really smooth. I have tested the change in Google Cloud. Resolves: CAS-645 and CAS-630 --- chart/templates/moac-deployment.yaml | 1 + csi/moac/node-composition.nix | 10 +-- csi/moac/node-env.nix | 83 +++++++++++++++--------- csi/moac/node-packages.nix | 96 ++++++++++++++-------------- csi/moac/package-lock.json | 64 +++++++++---------- csi/moac/package.json | 2 +- deploy/moac-deployment.yaml | 1 + 7 files changed, 140 insertions(+), 117 deletions(-) diff --git a/chart/templates/moac-deployment.yaml b/chart/templates/moac-deployment.yaml index d77556c0b..c18359e2f 100644 --- a/chart/templates/moac-deployment.yaml +++ b/chart/templates/moac-deployment.yaml @@ -49,6 +49,7 @@ spec: - "--csi-address=$(CSI_ENDPOINT)" - "--namespace=$(MY_POD_NAMESPACE)" - "--port=4000" + - "--watcher-idle-timeout=600000" - "--message-bus=nats"{{ if .Values.moacDebug }} - "-vv"{{ end }} env: diff --git a/csi/moac/node-composition.nix b/csi/moac/node-composition.nix index 6441534a8..69a3cd035 100644 --- a/csi/moac/node-composition.nix +++ b/csi/moac/node-composition.nix @@ -1,18 +1,18 @@ -# This file has been generated by node2nix 1.8.0. Do not edit! +# This file has been generated by node2nix 1.9.0. Do not edit! {pkgs ? import { inherit system; - }, system ? builtins.currentSystem, nodejs-slim ? pkgs.nodejs-slim-12_x, nodejs ? pkgs."nodejs-12_x"}: + }, system ? builtins.currentSystem, nodejs-slim ? pkgs."nodejs-slim-12_x", nodejs ? pkgs."nodejs-12_x"}: let nodeEnv = import ./node-env.nix { - inherit (pkgs) stdenv python2 utillinux runCommand writeTextFile; - inherit nodejs; + inherit (pkgs) stdenv lib python2 runCommand writeTextFile; + inherit pkgs nodejs; inherit nodejs-slim; libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null; }; in import ./node-packages.nix { - inherit (pkgs) fetchurl fetchgit; + inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit; inherit nodeEnv; } diff --git a/csi/moac/node-env.nix b/csi/moac/node-env.nix index 4d35a5efa..514f0590e 100644 --- a/csi/moac/node-env.nix +++ b/csi/moac/node-env.nix @@ -1,8 +1,11 @@ # This file originates from node2nix -{stdenv, nodejs-slim, nodejs, python2, utillinux, libtool, runCommand, writeTextFile}: +{lib, stdenv, nodejs-slim, nodejs, python2, pkgs, libtool, runCommand, writeTextFile}: let + # Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master + utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux; + python = if nodejs ? python then nodejs.python else python2; # Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise @@ -38,8 +41,8 @@ let }; includeDependencies = {dependencies}: - stdenv.lib.optionalString (dependencies != []) - (stdenv.lib.concatMapStrings (dependency: + lib.optionalString (dependencies != []) + (lib.concatMapStrings (dependency: '' # Bundle the dependencies of the package mkdir -p node_modules @@ -100,7 +103,7 @@ let cd "$DIR/${packageName}" ${includeDependencies { inherit dependencies; }} cd .. - ${stdenv.lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} ''; pinpointDependencies = {dependencies, production}: @@ -161,12 +164,12 @@ let '' node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"} - ${stdenv.lib.optionalString (dependencies != []) + ${lib.optionalString (dependencies != []) '' if [ -d node_modules ] then cd node_modules - ${stdenv.lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies} + ${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies} cd .. fi ''} @@ -183,7 +186,7 @@ let cd "${packageName}" ${pinpointDependencies { inherit dependencies production; }} cd .. - ${stdenv.lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} fi ''; @@ -242,8 +245,8 @@ let if(fs.existsSync("./package-lock.json")) { var packageLock = JSON.parse(fs.readFileSync("./package-lock.json")); - if(packageLock.lockfileVersion !== 1) { - process.stderr.write("Sorry, I only understand lock file version 1!\n"); + if(![1, 2].includes(packageLock.lockfileVersion)) { + process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n"); process.exit(1); } @@ -344,8 +347,8 @@ let cd "${packageName}" runHook preRebuild - ${stdenv.lib.optionalString bypassCache '' - ${stdenv.lib.optionalString reconstructLock '' + ${lib.optionalString bypassCache '' + ${lib.optionalString reconstructLock '' if [ -f package-lock.json ] then echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!" @@ -361,14 +364,14 @@ let node ${addIntegrityFieldsScript} ''} - npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${stdenv.lib.optionalString production "--production"} rebuild + npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild if [ "''${dontNpmInstall-}" != "1" ] then # NPM tries to download packages even when they already exist if npm-shrinkwrap is used. rm -f npm-shrinkwrap.json - npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${stdenv.lib.optionalString production "--production"} install + npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} install fi ''; @@ -396,8 +399,8 @@ let stdenv.mkDerivation ({ name = "node_${name}-${version}"; buildInputs = [ tarWrapper python nodejs-slim nodejs ] - ++ stdenv.lib.optional (stdenv.isLinux) utillinux - ++ stdenv.lib.optional (stdenv.isDarwin) libtool + ++ lib.optional (stdenv.isLinux) utillinux + ++ lib.optional (stdenv.isDarwin) libtool ++ buildInputs; inherit nodejs; @@ -445,8 +448,8 @@ let ''; } // extraArgs); - # Builds a development shell - buildNodeShell = + # Builds a node environment (a node_modules folder and a set of binaries) + buildNodeDependencies = { name , packageName , version @@ -465,13 +468,13 @@ let let extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ]; - - nodeDependencies = stdenv.mkDerivation ({ + in + stdenv.mkDerivation ({ name = "node-dependencies-${name}-${version}"; buildInputs = [ tarWrapper python nodejs ] - ++ stdenv.lib.optional (stdenv.isLinux) utillinux - ++ stdenv.lib.optional (stdenv.isDarwin) libtool + ++ lib.optional (stdenv.isLinux) utillinux + ++ lib.optional (stdenv.isDarwin) libtool ++ buildInputs; inherit dontStrip; # Stripping may fail a build for some package deployments @@ -491,7 +494,7 @@ let # Create fake package.json to make the npm commands work properly cp ${src}/package.json . chmod 644 package.json - ${stdenv.lib.optionalString bypassCache '' + ${lib.optionalString bypassCache '' if [ -f ${src}/package-lock.json ] then cp ${src}/package-lock.json . @@ -500,23 +503,44 @@ let # Go to the parent folder to make sure that all packages are pinpointed cd .. - ${stdenv.lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} ${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }} # Expose the executables that were installed cd .. - ${stdenv.lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} mv ${packageName} lib ln -s $out/lib/node_modules/.bin $out/bin ''; } // extraArgs); + + # Builds a development shell + buildNodeShell = + { name + , packageName + , version + , src + , dependencies ? [] + , buildInputs ? [] + , production ? true + , npmFlags ? "" + , dontNpmInstall ? false + , bypassCache ? false + , reconstructLock ? false + , dontStrip ? true + , unpackPhase ? "true" + , buildPhase ? "true" + , ... }@args: + + let + nodeDependencies = buildNodeDependencies args; in stdenv.mkDerivation { name = "node-shell-${name}-${version}"; - buildInputs = [ python nodejs ] ++ stdenv.lib.optional (stdenv.isLinux) utillinux ++ buildInputs; + buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs; buildCommand = '' mkdir -p $out/bin cat > $out/bin/shell < Date: Fri, 26 Feb 2021 20:22:59 +0530 Subject: [PATCH 50/78] feat: make bdev share protocol argument as an option Signed-off-by: shubham --- mayastor/src/bin/mayastor-client/bdev_cli.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mayastor/src/bin/mayastor-client/bdev_cli.rs b/mayastor/src/bin/mayastor-client/bdev_cli.rs index 328b1a848..3aa8b83c4 100644 --- a/mayastor/src/bin/mayastor-client/bdev_cli.rs +++ b/mayastor/src/bin/mayastor-client/bdev_cli.rs @@ -37,12 +37,10 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let share = SubCommand::with_name("share") .about("share the given bdev") .arg(Arg::with_name("name").required(true).index(1)) - .arg( - Arg::with_name("protocol") - .help("the protocol to used to share the given bdev") - .required(false) - .default_value("nvmf") - .value_names(&["nvmf", "iscsi"]), + .arg(Arg::from_usage(" 'the protocol to used to share the given bdev'") + .short("p") + .possible_values(&["nvmf", "iscsi"]) + .default_value("nvmf"), ); let unshare = SubCommand::with_name("unshare") From 00a669ef3708f12001e0db59550da6ea7ea7e5f1 Mon Sep 17 00:00:00 2001 From: shubham Date: Fri, 26 Feb 2021 23:54:31 +0530 Subject: [PATCH 51/78] fix: maintain consistency with other args Signed-off-by: shubham --- mayastor/src/bin/mayastor-client/bdev_cli.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mayastor/src/bin/mayastor-client/bdev_cli.rs b/mayastor/src/bin/mayastor-client/bdev_cli.rs index 3aa8b83c4..f62e78b87 100644 --- a/mayastor/src/bin/mayastor-client/bdev_cli.rs +++ b/mayastor/src/bin/mayastor-client/bdev_cli.rs @@ -37,8 +37,11 @@ pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let share = SubCommand::with_name("share") .about("share the given bdev") .arg(Arg::with_name("name").required(true).index(1)) - .arg(Arg::from_usage(" 'the protocol to used to share the given bdev'") + .arg( + Arg::with_name("protocol") .short("p") + .help("the protocol to used to share the given bdev") + .required(false) .possible_values(&["nvmf", "iscsi"]) .default_value("nvmf"), ); From 0dcd7c287307548a4fdaf616cf178d6471328d55 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 1 Mar 2021 11:00:43 +0000 Subject: [PATCH 52/78] test(e2e): avoid conflicts in metrics port usage Disable metrics in the agent when running e2e tests to avoid a conflict in use of port 8080 if multiple agents run on the same machine. --- test/e2e/common/test.go | 2 ++ test/e2e/install/install_test.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/e2e/common/test.go b/test/e2e/common/test.go index 5b8b73134..e3612a4ef 100644 --- a/test/e2e/common/test.go +++ b/test/e2e/common/test.go @@ -46,6 +46,8 @@ func SetupTestEnv() { k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme.Scheme, + // We do not consume prometheus metrics. + MetricsBindAddress: "0", }) Expect(err).ToNot(HaveOccurred()) diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index b78106503..155d2b81c 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -284,6 +284,8 @@ var _ = BeforeSuite(func(done Done) { k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme.Scheme, + // We do not consume prometheus metrics. + MetricsBindAddress: "0", }) Expect(err).ToNot(HaveOccurred()) From f45a5c41ed283e37caf618bfc57af7ad785c08dd Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Mon, 1 Mar 2021 10:46:16 -0800 Subject: [PATCH 53/78] chore: touchup log wrapper lints and comments Signed-off-by: Ana Hobden --- spdk-sys/logwrapper.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index 7192515b3..1216fda20 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -1,14 +1,16 @@ #include "logwrapper.h" +#include +#include void maya_log(int level, const char *file, const int line, const char *func, const char *format, va_list args) { - // There is a delicate balance here! This `buf` ideally should not be resized, since a realloc is expensive. + // There is a delicate balance here! This `buf` ideally should not be resized, since a heap alloc is expensive. char buf[4096] = {0}; int should_have_written = vsnprintf(buf, sizeof(buf), format, args); - if (should_have_written > sizeof(buf)) { + if (should_have_written > (int) sizeof(buf)) { logfn(level, file, line, func, &buf[0], sizeof(buf)); } else { // If `should_have_written` is bigger than `buf`, then the message is too long. From 0a34468544fed49cd680e480c2fb8043c98c8b2f Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Tue, 23 Feb 2021 16:10:56 -0800 Subject: [PATCH 54/78] feat: make bdev create parameters strict Signed-off-by: Ana Hobden chore: clean formatting Signed-off-by: Ana Hobden --- mayastor/src/bdev/dev.rs | 24 +++++++++++++++++++++++- mayastor/src/bdev/dev/aio.rs | 6 ++---- mayastor/src/bdev/dev/iscsi.rs | 6 ++---- mayastor/src/bdev/dev/loopback.rs | 12 ++++++++---- mayastor/src/bdev/dev/malloc.rs | 4 +++- mayastor/src/bdev/dev/null.rs | 4 +++- mayastor/src/bdev/dev/nvmf.rs | 6 ++---- mayastor/src/bdev/dev/uring.rs | 6 ++---- mayastor/src/bdev/util/uri.rs | 16 +--------------- 9 files changed, 46 insertions(+), 38 deletions(-) diff --git a/mayastor/src/bdev/dev.rs b/mayastor/src/bdev/dev.rs index ae8314b82..3246ae5a3 100644 --- a/mayastor/src/bdev/dev.rs +++ b/mayastor/src/bdev/dev.rs @@ -20,7 +20,7 @@ //! bdev::Uri::parse(&uri)?.create().await?; //! ``` -use std::convert::TryFrom; +use std::{collections::HashMap, convert::TryFrom}; use snafu::ResultExt; use url::Url; @@ -76,3 +76,25 @@ impl Uri { } } } + +fn reject_unknown_parameters( + url: &Url, + parameters: HashMap, +) -> Result<(), NexusBdevError> { + if !parameters.is_empty() { + let invalid_parameters = parameters + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect::>() + .join(", "); + Err(NexusBdevError::UriInvalid { + uri: url.to_string(), + message: format!( + "unrecognized parameters: {}.", + invalid_parameters + ), + }) + } else { + Ok(()) + } +} diff --git a/mayastor/src/bdev/dev/aio.rs b/mayastor/src/bdev/dev/aio.rs index 26acfb6f9..f23fba3fd 100644 --- a/mayastor/src/bdev/dev/aio.rs +++ b/mayastor/src/bdev/dev/aio.rs @@ -8,7 +8,7 @@ use url::Url; use spdk_sys::{bdev_aio_delete, create_aio_bdev}; use crate::{ - bdev::{util::uri, CreateDestroy, GetName}, + bdev::{dev::reject_unknown_parameters, util::uri, CreateDestroy, GetName}, core::Bdev, ffihelper::{cb_arg, done_errno_cb, errno_result_from_i32, ErrnoResult}, nexus_uri::{self, NexusBdevError}, @@ -55,9 +55,7 @@ impl TryFrom<&Url> for Aio { }, )?; - if let Some(keys) = uri::keys(parameters) { - warn!("ignored parameters: {}", keys); - } + reject_unknown_parameters(url, parameters)?; Ok(Aio { name: url.path().into(), diff --git a/mayastor/src/bdev/dev/iscsi.rs b/mayastor/src/bdev/dev/iscsi.rs index eab2470a1..155863746 100644 --- a/mayastor/src/bdev/dev/iscsi.rs +++ b/mayastor/src/bdev/dev/iscsi.rs @@ -14,7 +14,7 @@ use uuid::Uuid; use spdk_sys::{create_iscsi_disk, delete_iscsi_disk, spdk_bdev}; use crate::{ - bdev::{util::uri, CreateDestroy, GetName}, + bdev::{dev::reject_unknown_parameters, util::uri, CreateDestroy, GetName}, core::Bdev, ffihelper::{cb_arg, done_errno_cb, errno_result_from_i32, ErrnoResult}, nexus_uri::{self, NexusBdevError}, @@ -70,9 +70,7 @@ impl TryFrom<&Url> for Iscsi { }, )?; - if let Some(keys) = uri::keys(parameters) { - warn!("ignored parameters: {}", keys); - } + reject_unknown_parameters(url, parameters)?; Ok(Iscsi { name: url[url::Position::BeforeHost .. url::Position::AfterPath] diff --git a/mayastor/src/bdev/dev/loopback.rs b/mayastor/src/bdev/dev/loopback.rs index a8718ab4b..4233e8aa9 100644 --- a/mayastor/src/bdev/dev/loopback.rs +++ b/mayastor/src/bdev/dev/loopback.rs @@ -5,7 +5,13 @@ use snafu::ResultExt; use url::Url; use crate::{ - bdev::{lookup_child_from_bdev, util::uri, CreateDestroy, GetName}, + bdev::{ + dev::reject_unknown_parameters, + lookup_child_from_bdev, + util::uri, + CreateDestroy, + GetName, + }, core::Bdev, nexus_uri::{self, NexusBdevError}, }; @@ -39,9 +45,7 @@ impl TryFrom<&Url> for Loopback { }, )?; - if let Some(keys) = uri::keys(parameters) { - warn!("ignored parameters: {}", keys); - } + reject_unknown_parameters(url, parameters)?; Ok(Loopback { name: segments.join("/"), diff --git a/mayastor/src/bdev/dev/malloc.rs b/mayastor/src/bdev/dev/malloc.rs index d71015387..e2c352fc7 100644 --- a/mayastor/src/bdev/dev/malloc.rs +++ b/mayastor/src/bdev/dev/malloc.rs @@ -4,7 +4,7 @@ //! heap. IOW, you must ensure you do not run out of huge pages while using //! this. use crate::{ - bdev::util::uri, + bdev::{dev::reject_unknown_parameters, util::uri}, nexus_uri::{ NexusBdevError, {self}, @@ -104,6 +104,8 @@ impl TryFrom<&Url> for Malloc { }, )?; + reject_unknown_parameters(uri, parameters)?; + Ok(Self { name: uri.path()[1 ..].into(), alias: uri.to_string(), diff --git a/mayastor/src/bdev/dev/null.rs b/mayastor/src/bdev/dev/null.rs index 22a76e9f9..d510c20fc 100644 --- a/mayastor/src/bdev/dev/null.rs +++ b/mayastor/src/bdev/dev/null.rs @@ -2,7 +2,7 @@ //! returns undefined data for reads. It's useful for benchmarking the I/O stack //! with minimal overhead and should *NEVER* be used with *real* data. use crate::{ - bdev::util::uri, + bdev::{dev::reject_unknown_parameters, util::uri}, nexus_uri::{ NexusBdevError, {self}, @@ -102,6 +102,8 @@ impl TryFrom<&Url> for Null { }, )?; + reject_unknown_parameters(uri, parameters)?; + Ok(Self { name: uri.path()[1 ..].into(), alias: uri.to_string(), diff --git a/mayastor/src/bdev/dev/nvmf.rs b/mayastor/src/bdev/dev/nvmf.rs index 4ebb2d93e..d86c914f8 100644 --- a/mayastor/src/bdev/dev/nvmf.rs +++ b/mayastor/src/bdev/dev/nvmf.rs @@ -21,7 +21,7 @@ use spdk_sys::{ }; use crate::{ - bdev::{util::uri, CreateDestroy, GetName}, + bdev::{dev::reject_unknown_parameters, util::uri, CreateDestroy, GetName}, core::Bdev, ffihelper::{cb_arg, errno_result_from_i32, ErrnoResult}, nexus_uri::{self, NexusBdevError}, @@ -108,9 +108,7 @@ impl TryFrom<&Url> for Nvmf { }, )?; - if let Some(keys) = uri::keys(parameters) { - warn!("ignored parameters: {}", keys); - } + reject_unknown_parameters(url, parameters)?; Ok(Nvmf { name: url[url::Position::BeforeHost .. url::Position::AfterPath] diff --git a/mayastor/src/bdev/dev/uring.rs b/mayastor/src/bdev/dev/uring.rs index 09099de7b..c993560ec 100644 --- a/mayastor/src/bdev/dev/uring.rs +++ b/mayastor/src/bdev/dev/uring.rs @@ -8,7 +8,7 @@ use url::Url; use spdk_sys::{create_uring_bdev, delete_uring_bdev}; use crate::{ - bdev::{util::uri, CreateDestroy, GetName}, + bdev::{dev::reject_unknown_parameters, util::uri, CreateDestroy, GetName}, core::Bdev, ffihelper::{cb_arg, done_errno_cb, ErrnoResult}, nexus_uri::{self, NexusBdevError}, @@ -55,9 +55,7 @@ impl TryFrom<&Url> for Uring { }, )?; - if let Some(keys) = uri::keys(parameters) { - warn!("ignored parameters: {}", keys); - } + reject_unknown_parameters(url, parameters)?; Ok(Uring { name: url.path().into(), diff --git a/mayastor/src/bdev/util/uri.rs b/mayastor/src/bdev/util/uri.rs index 2084806c4..87c54f31e 100644 --- a/mayastor/src/bdev/util/uri.rs +++ b/mayastor/src/bdev/util/uri.rs @@ -1,6 +1,6 @@ //! Simple utility functions to help with parsing URIs. -use std::{collections::HashMap, str::ParseBoolError}; +use std::str::ParseBoolError; use url::Url; @@ -18,20 +18,6 @@ pub(crate) fn segments(url: &Url) -> Vec<&str> { Vec::new() } -/// Generate a comma separated list of all keys present in a HashMap as a String -pub(crate) fn keys(map: HashMap) -> Option { - if map.is_empty() { - None - } else { - Some( - map.keys() - .map(|key| key.to_string()) - .collect::>() - .join(", "), - ) - } -} - /// Parse a value that represents a boolean /// Acceptable values are: true, false, yes, no, on, off /// Also accept an (unsigned) integer, where 0 represents false From b4f2c093c590ab2e3b2824c305f89198247f680a Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Mon, 1 Mar 2021 12:25:40 +0000 Subject: [PATCH 55/78] ci: i/o soak test - NVMe over TCP. MQ-25 Deviations from MQ-25 test configuration - replication factor is set to 1 for now - application pods MAY be located on the same nodes as Mayastor - this requires a larger test cluster and more thought Also included is bunch of changes to ensure clusters are rendered usable, this to faciliate ease of test development. Re-creating clusters is time consuming in code-debug-test iteration cycle. --- .../basic_volume_io/basic_volume_io_test.go | 4 +- test/e2e/common/util.go | 26 +- test/e2e/common/util_cleanup.go | 276 +++++++++++++----- test/e2e/common/util_node.go | 17 +- test/e2e/common/util_pvc.go | 29 +- test/e2e/common/util_testpods.go | 118 ++++++-- test/e2e/io_soak/README.md | 14 + test/e2e/io_soak/filesystem_fio.go | 67 +++++ test/e2e/io_soak/fio.go | 88 ++++++ test/e2e/io_soak/io_soak_test.go | 228 +++++++++++++++ test/e2e/io_soak/rawblock_fio.go | 67 +++++ .../lib/node_disconnect_lib.go | 12 +- .../replica_pod_remove_test.go | 8 +- .../e2e/pvc_stress_fio/pvc_stress_fio_test.go | 5 +- test/e2e/uninstall/uninstall_test.go | 37 +-- 15 files changed, 851 insertions(+), 145 deletions(-) create mode 100644 test/e2e/io_soak/README.md create mode 100644 test/e2e/io_soak/filesystem_fio.go create mode 100644 test/e2e/io_soak/fio.go create mode 100644 test/e2e/io_soak/io_soak_test.go create mode 100644 test/e2e/io_soak/rawblock_fio.go diff --git a/test/e2e/basic_volume_io/basic_volume_io_test.go b/test/e2e/basic_volume_io/basic_volume_io_test.go index 93874bc24..e31a6c85b 100644 --- a/test/e2e/basic_volume_io/basic_volume_io_test.go +++ b/test/e2e/basic_volume_io/basic_volume_io_test.go @@ -51,7 +51,9 @@ func basicVolumeIOTest(scName string) { ).Should(Equal(true)) // Run the fio test - common.RunFio(fioPodName, 20) + _, err = common.RunFio(fioPodName, 20, common.FioFsFilename) + Expect(err).ToNot(HaveOccurred()) + podNames = podNames[:len(podNames)-1] // Delete the fio pod diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 6339ee70f..52729802c 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -3,7 +3,6 @@ package common import ( "context" "errors" - "fmt" "os/exec" "regexp" "strconv" @@ -21,6 +20,8 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" ) +const NSMayastor = "mayastor" + func ApplyDeployYaml(filename string) { cmd := exec.Command("kubectl", "apply", "-f", filename) cmd.Dir = "" @@ -36,7 +37,7 @@ func DeleteDeployYaml(filename string) { } // create a storage class -func MkStorageClass(scName string, scReplicas int, protocol string, provisioner string) { +func MkStorageClass(scName string, scReplicas int, protocol string, provisioner string) error { createOpts := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: scName, @@ -50,14 +51,14 @@ func MkStorageClass(scName string, scReplicas int, protocol string, provisioner ScApi := gTestEnv.KubeInt.StorageV1().StorageClasses _, createErr := ScApi().Create(context.TODO(), createOpts, metav1.CreateOptions{}) - Expect(createErr).To(BeNil()) + return createErr } // remove a storage class -func RmStorageClass(scName string) { +func RmStorageClass(scName string) error { ScApi := gTestEnv.KubeInt.StorageV1().StorageClasses deleteErr := ScApi().Delete(context.TODO(), scName, metav1.DeleteOptions{}) - Expect(deleteErr).To(BeNil()) + return deleteErr } // Add a node selector to the given pod definition @@ -77,7 +78,7 @@ func ApplyNodeSelectorToDeployment(deploymentName string, namespace string, labe deployment.Spec.Template.Spec.NodeSelector = make(map[string]string) } deployment.Spec.Template.Spec.NodeSelector[label] = value - _, err = depApi("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + _, err = depApi(NSMayastor).Update(context.TODO(), deployment, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) } @@ -88,7 +89,7 @@ func RemoveAllNodeSelectorsFromDeployment(deploymentName string, namespace strin Expect(err).ToNot(HaveOccurred()) if deployment.Spec.Template.Spec.NodeSelector != nil { deployment.Spec.Template.Spec.NodeSelector = nil - _, err = depApi("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + _, err = depApi(NSMayastor).Update(context.TODO(), deployment, metav1.UpdateOptions{}) } Expect(err).ToNot(HaveOccurred()) } @@ -104,11 +105,11 @@ func SetDeploymentReplication(deploymentName string, namespace string, replicas deployment, err := depAPI(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) deployment.Spec.Replicas = replicas - deployment, err = depAPI("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + deployment, err = depAPI(NSMayastor).Update(context.TODO(), deployment, metav1.UpdateOptions{}) if err == nil { break } - fmt.Printf("Re-trying update attempt due to error: %v\n", err) + logf.Log.Info("Re-trying update attempt due to error", "error", err) time.Sleep(1 * time.Second) } Expect(err).ToNot(HaveOccurred()) @@ -204,7 +205,10 @@ func PodPresentOnNode(podNameRegexp string, namespace string, nodeName string) b func AfterSuiteCleanup() { logf.Log.Info("AfterSuiteCleanup") - _, _ = DeleteAllVolumeResources() + // Place holder function, + // to facilitate post-mortem analysis do nothing + // however we may choose to cleanup based on + // test configuration. } // Check that no PVs, PVCs and MSVs are still extant. @@ -233,7 +237,7 @@ func AfterEachCheck() error { Version: "v1alpha1", Resource: "mayastorvolumes", } - msvs, _ := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + msvs, _ := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if len(msvs.Items) != 0 { errorMsg += " found leftover MayastorVolumes" logf.Log.Info("AfterEachCheck: found leftover MayastorVolumes, test fails.") diff --git a/test/e2e/common/util_cleanup.go b/test/e2e/common/util_cleanup.go index ffbdedf82..fb1bdb2dd 100644 --- a/test/e2e/common/util_cleanup.go +++ b/test/e2e/common/util_cleanup.go @@ -6,95 +6,137 @@ import ( "os/exec" "time" - "k8s.io/apimachinery/pkg/runtime/schema" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" logf "sigs.k8s.io/controller-runtime/pkg/log" ) +var ZeroInt64 = int64(0) + /// Delete all pods in the default namespace -// returns: -// 1) success i.e. true if all pods were deleted or there were no pods to delete. -// 2) the number of pods found -func DeleteAllPods() (bool, int) { +func DeleteAllPods() (int, error) { logf.Log.Info("DeleteAllPods") - success := true numPods := 0 + pods, err := gTestEnv.KubeInt.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) if err != nil { logf.Log.Info("DeleteAllPods: list pods failed.", "error", err) - success = false - } - if err == nil && pods != nil { + } else { numPods = len(pods.Items) + logf.Log.Info("DeleteAllPods: found", "pods", numPods) for _, pod := range pods.Items { logf.Log.Info("DeleteAllPods: Deleting", "pod", pod.Name) - if err := DeletePod(pod.Name); err != nil { - success = false + delErr := gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}) + if delErr != nil { + logf.Log.Info("DeleteAllPods: failed to delete the pod", "podName", pod.Name, "error", delErr) } } } - return success, numPods + return numPods, err } -// Make best attempt to delete PVCs, PVs and MSVs -func DeleteAllVolumeResources() (bool, bool) { - logf.Log.Info("DeleteAllVolumeResources") - foundResources := false - success := true +// Make best attempt to delete PersistentVolumeClaims +// returns ok -> operations succeeded, resources undeleted, delete resources failed +func DeleteAllPvcs() (int, error) { + logf.Log.Info("DeleteAllPvcs") // Delete all PVCs found - // Phase 1 to delete dangling resources pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) if err != nil { - logf.Log.Info("DeleteAllVolumeResources: list PVCs failed.", "error", err) - success = false - } - if err == nil && pvcs != nil && len(pvcs.Items) != 0 { - foundResources = true - logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumeClaims") + logf.Log.Info("DeleteAllPvcs: list PersistentVolumeClaims failed.", "error", err) + } else if len(pvcs.Items) != 0 { for _, pvc := range pvcs.Items { - if err := DeletePVC(pvc.Name); err != nil { - success = false + logf.Log.Info("DeleteAllPvcs: deleting", "PersistentVolumeClaim", pvc.Name) + delErr := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}) + if delErr != nil { + logf.Log.Info("DeleteAllPvcs: failed to delete", "PersistentVolumeClaim", pvc.Name, "error", delErr) } } } + // Wait 2 minutes for PVCS to be deleted + numPvcs := 0 + for attempts := 0; attempts < 120; attempts++ { + pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if err == nil { + numPvcs = len(pvcs.Items) + if numPvcs == 0 { + break + } + } + time.Sleep(1 * time.Second) + } + + logf.Log.Info("DeleteAllPvcs:", "number of PersistentVolumeClaims", numPvcs, "error", err) + return numPvcs, err +} + +// Make best attempt to delete PersistentVolumes +func DeleteAllPvs() (int, error) { // Delete all PVs found + // First remove all finalizers pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { - logf.Log.Info("DeleteAllVolumeResources: list PVs failed.", "error", err) - } - if err == nil && pvs != nil && len(pvs.Items) != 0 { - logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumes") + logf.Log.Info("DeleteAllPvs: list PersistentVolumes failed.", "error", err) + } else if len(pvs.Items) != 0 { + empty := make([]string, 0) for _, pv := range pvs.Items { - if err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}); err != nil { - success = false + finalizers := pv.GetFinalizers() + if len(finalizers) != 0 { + logf.Log.Info("DeleteAllPvs: deleting finalizer for", + "PersistentVolume", pv.Name, "finalizers", finalizers) + pv.SetFinalizers(empty) + _, _ = gTestEnv.KubeInt.CoreV1().PersistentVolumes().Update(context.TODO(), &pv, metav1.UpdateOptions{}) } } } - // Wait 2 minutes for resources to be deleted + // then wait for up to 2 minute for resources to be cleared + numPvs := 0 for attempts := 0; attempts < 120; attempts++ { - numPvcs := 0 - pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) - if err == nil && pvcs != nil { - numPvcs = len(pvcs.Items) - } - - numPvs := 0 pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) - if err == nil && pvs != nil { + if err == nil { numPvs = len(pvs.Items) + if numPvs == 0 { + break + } } + time.Sleep(1 * time.Second) + } - if numPvcs == 0 && numPvs == 0 { - break + // Then delete the PVs + pvs, err = gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Info("DeleteAllPvs: list PersistentVolumes failed.", "error", err) + } else if len(pvs.Items) != 0 { + for _, pv := range pvs.Items { + logf.Log.Info("DeleteAllPvs: deleting PersistentVolume", + "PersistentVolume", pv.Name) + if delErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}); delErr != nil { + logf.Log.Info("DeleteAllPvs: failed to delete PersistentVolume", + "PersistentVolume", pv.Name, "error", delErr) + } + } + } + // Wait 2 minutes for resources to be deleted + numPvs = 0 + for attempts := 0; attempts < 120; attempts++ { + pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err == nil { + numPvs = len(pvs.Items) + if numPvs == 0 { + break + } } time.Sleep(1 * time.Second) } + logf.Log.Info("DeleteAllPvs:", "number of PersistentVolumes", numPvs, "error", err) + return numPvs, err +} +// Make best attempt to delete MayastorVolumes +func DeleteAllMsvs() (int, error) { // If after deleting PVCs and PVs Mayastor volumes are leftover // try cleaning them up explicitly msvGVR := schema.GroupVersionResource{ @@ -106,34 +148,70 @@ func DeleteAllVolumeResources() (bool, bool) { msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) if err != nil { // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly - logf.Log.Info("DeleteAllVolumeResources: list MSVs failed.", "Error", err) + logf.Log.Info("DeleteAllMsvs: list MSVs failed.", "Error", err) } if err == nil && msvs != nil && len(msvs.Items) != 0 { - logf.Log.Info("DeleteAllVolumeResources: deleting MayastorVolumes") for _, msv := range msvs.Items { - if err := DeleteMSV(msv.GetName()); err != nil { - success = false + logf.Log.Info("DeleteAllMsvs: deleting MayastorVolume", "MayastorVolume", msv.GetName()) + if delErr := DeleteMSV(msv.GetName()); delErr != nil { + logf.Log.Info("DeleteAllMsvs: failed deleting MayastorVolume", "MayastorVolume", msv.GetName(), "error", delErr) } } } // Wait 2 minutes for resources to be deleted + numMsvs := 0 for attempts := 0; attempts < 120; attempts++ { - numMsvs := 0 msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) if err == nil && msvs != nil { numMsvs = len(msvs.Items) - } - if numMsvs == 0 { - break + if numMsvs == 0 { + break + } } time.Sleep(1 * time.Second) } + logf.Log.Info("DeleteAllMsvs:", "number of MayastorVolumes", numMsvs) + + return numMsvs, err +} + +func DeleteAllPoolFinalizers() (bool, error) { + deletedFinalizer := false + var deleteErr error - return success, foundResources + poolGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorpools", + } + + pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Info("DeleteAllPoolFinalisers: list MSPs failed.", "Error", err) + return false, err + } else if len(pools.Items) != 0 { + for _, pool := range pools.Items { + empty := make([]string, 0) + logf.Log.Info("DeleteAllPoolFinalizers", "pool", pool.GetName()) + finalizers := pool.GetFinalizers() + if finalizers != nil { + logf.Log.Info("Removing all finalizers", "pool", pool.GetName(), "finalizer", finalizers) + pool.SetFinalizers(empty) + _, err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Update(context.TODO(), &pool, metav1.UpdateOptions{}) + if err != nil { + deleteErr = err + logf.Log.Info("Pool update finalizer", "error", err) + } else { + deletedFinalizer = true + } + } + } + } + return deletedFinalizer, deleteErr } -func DeleteAllPools() { +func DeleteAllPools() bool { poolGVR := schema.GroupVersionResource{ Group: "openebs.io", Version: "v1alpha1", @@ -149,9 +227,9 @@ func DeleteAllPools() { logf.Log.Info("DeleteAllPools: deleting MayastorPools") for _, pool := range pools.Items { logf.Log.Info("DeleteAllPools: deleting", "pool", pool.GetName()) - err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Delete(context.TODO(), pool.GetName(), metav1.DeleteOptions{}) + err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Delete(context.TODO(), pool.GetName(), metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}) if err != nil { - logf.Log.Error(err, "Failed to delete pool", pool.GetName()) + logf.Log.Error(err, "DeleteAllPools: failed to delete pool", pool.GetName(), "error", err) } } } @@ -173,32 +251,40 @@ func DeleteAllPools() { if numPools != 0 { logf.Log.Info("DeleteAllPools: ", "Pools", pools.Items) } + return numPools == 0 } +// >=0 definitive number of mayastor pods +// < 0 indeterminate func MayastorUndeletedPodCount() int { - pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + ns, err := gTestEnv.KubeInt.CoreV1().Namespaces().Get(context.TODO(), NSMayastor, metav1.GetOptions{}) if err != nil { - logf.Log.Error(err, "MayastorUndeletedPodCount: list pods failed.") + logf.Log.Error(err, "MayastorUndeletedPodCount: get namespace") + return -1 + } + if ns == nil { + // No namespace => no mayastor pods return 0 } - if pods != nil { - return len(pods.Items) + pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "MayastorUndeletedPodCount: list pods failed.") + return -1 } - logf.Log.Info("MayastorUndeletedPodCount: nil list returned.") - return 0 + return len(pods.Items) } // Force deletion of all existing mayastor pods -// Returns true if pods were deleted, false otherwise -func ForceDeleteMayastorPods() bool { +// Returns true if pods were deleted, false otherwise, +// and the number of pods still present +func ForceDeleteMayastorPods() (bool, int, error) { logf.Log.Info("EnsureMayastorDeleted") pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) if err != nil { logf.Log.Error(err, "EnsureMayastorDeleted: list pods failed.") - return false - } - if pods == nil || len(pods.Items) == 0 { - return false + return false, 0, err + } else if len(pods.Items) == 0 { + return false, 0, nil } logf.Log.Info("EnsureMayastorDeleted: MayastorPods found.", "Count", len(pods.Items)) @@ -211,11 +297,59 @@ func ForceDeleteMayastorPods() bool { } } + podCount := 0 // We have made the best effort to cleanup, give things time to settle. - for attempts := 0; attempts < 30 && MayastorUndeletedPodCount() != 0; attempts++ { + for attempts := 0; attempts < 60 && MayastorUndeletedPodCount() != 0; attempts++ { + pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err == nil { + podCount = len(pods.Items) + if podCount == 0 { + break + } + } time.Sleep(2 * time.Second) } - logf.Log.Info("EnsureMayastorDeleted: lingering Mayastor pods were found !!!!!!!!") - return true + return true, podCount, nil +} + +// "Big" sweep, attempts to remove artefacts left over in the cluster +// that would prevent future successful test runs. +func CleanUp() bool { + podCount, delPodsErr := DeleteAllPods() + pvcCount, delPvcErr := DeleteAllPvcs() + pvCount, delPvErr := DeleteAllPvs() + msvCount, delMsvErr := DeleteAllMsvs() + // Pools should not have finalizers if there are no associated volume resources. + poolFinalizerDeleted, delPoolFinalizeErr := DeleteAllPoolFinalizers() + + logf.Log.Info("Resource cleanup", + "podCount", podCount, + "pvcCount", pvcCount, + "pvCount", pvCount, + "msvCount", msvCount, + "delPodsErr", delPodsErr, + "delPvcErr", delPvcErr, + "delPvErr", delPvErr, + "delMsvErr", delMsvErr, + "poolFinalizerDeleted", poolFinalizerDeleted, + "delPoolFinalizeErr", delPoolFinalizeErr, + ) + + scList, delScErr := gTestEnv.KubeInt.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) + if delScErr == nil { + for _, sc := range scList.Items { + if sc.Provisioner == "io.openebs.csi-mayastor" && sc.Name != "mayastor-iscsi" && sc.Name != "mayastor-nvmf" { + logf.Log.Info("Deleting", "storageClass", sc.Name) + _ = gTestEnv.KubeInt.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}) + } + } + } else { + logf.Log.Info("Error listing storage classes", "error", delScErr) + } + + //TODO: tidy up namespaces? + + // For now ignore delMsvErr, until we figure out how to ignore "no resource of this type" errors + return (podCount+pvcCount+pvCount+msvCount) == 0 && delPodsErr == nil && delPvcErr == nil && delPvErr == nil && delPoolFinalizeErr == nil } diff --git a/test/e2e/common/util_node.go b/test/e2e/common/util_node.go index e2b57eaf5..3b4f2c40e 100644 --- a/test/e2e/common/util_node.go +++ b/test/e2e/common/util_node.go @@ -5,17 +5,17 @@ import ( "context" "errors" "fmt" - "os/exec" - "sigs.k8s.io/controller-runtime/pkg/client" - . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "os/exec" + "sigs.k8s.io/controller-runtime/pkg/client" ) type NodeLocation struct { NodeName string IPAddress string MayastorNode bool + MasterNode bool } // returns vector of populated NodeLocation structs @@ -30,10 +30,14 @@ func GetNodeLocs() ([]NodeLocation, error) { addrstr := "" namestr := "" mayastorNode := false + masterNode := false for label, value := range k8snode.Labels { if label == "openebs.io/engine" && value == "mayastor" { mayastorNode = true } + if label == "node-role.kubernetes.io/master" { + masterNode = true + } } for _, addr := range k8snode.Status.Addresses { if addr.Type == corev1.NodeInternalIP { @@ -44,7 +48,12 @@ func GetNodeLocs() ([]NodeLocation, error) { } } if namestr != "" && addrstr != "" { - NodeLocs = append(NodeLocs, NodeLocation{NodeName: namestr, IPAddress: addrstr, MayastorNode: mayastorNode}) + NodeLocs = append(NodeLocs, NodeLocation{ + NodeName: namestr, + IPAddress: addrstr, + MayastorNode: mayastorNode, + MasterNode: masterNode, + }) } else { return nil, errors.New("node lacks expected fields") } diff --git a/test/e2e/common/util_pvc.go b/test/e2e/common/util_pvc.go index 9253a2423..e16215735 100644 --- a/test/e2e/common/util_pvc.go +++ b/test/e2e/common/util_pvc.go @@ -3,7 +3,6 @@ package common // Utility functions for Persistent Volume Claims and Persistent Volumes import ( "context" - "fmt" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -92,12 +91,14 @@ func GetPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { return pv.Status.Phase } +var blockVolumeMode = corev1.PersistentVolumeBlock + // Create a PVC and verify that // 1. The PVC status transitions to bound, // 2. The associated PV is created and its status transitions bound // 3. The associated MV is created and has a State "healthy" -func MkPVC(volName string, scName string) string { - fmt.Printf("creating %s, %s\n", volName, scName) +func mkPVC(volName string, scName string, rawBlock bool) string { + logf.Log.Info("creating", "volume", volName, "storageClass", scName) // PVC create options createOpts := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -115,6 +116,10 @@ func MkPVC(volName string, scName string) string { }, } + if rawBlock { + createOpts.Spec.VolumeMode = &blockVolumeMode + } + // Create the PVC. PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) @@ -169,12 +174,28 @@ func MkPVC(volName string, scName string) string { return string(pvc.ObjectMeta.UID) } +// Create a filesystem PVC and verify that +// 1. The PVC status transitions to bound, +// 2. The associated PV is created and its status transitions bound +// 3. The associated MV is created and has a State "healthy" +func MkPVC(volName string, scName string) string { + return mkPVC(volName, scName, false) +} + +// Create a block device PVC and verify that +// 1. The PVC status transitions to bound, +// 2. The associated PV is created and its status transitions bound +// 3. The associated MV is created and has a State "healthy" +func MkRawBlockPVC(volName string, scName string) string { + return mkPVC(volName, scName, true) +} + // Delete the PVC and verify that // 1. The PVC is deleted // 2. The associated PV is deleted // 3. The associated MV is deleted func RmPVC(volName string, scName string) { - fmt.Printf("removing %s, %s\n", volName, scName) + logf.Log.Info("Removing volume", "volume", volName, "storageClass", scName) PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims diff --git a/test/e2e/common/util_testpods.go b/test/e2e/common/util_testpods.go index 04194360d..14b7ae909 100644 --- a/test/e2e/common/util_testpods.go +++ b/test/e2e/common/util_testpods.go @@ -3,22 +3,35 @@ package common // Utility functions for test pods. import ( "context" + "errors" "fmt" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "os/exec" - - . "github.com/onsi/gomega" + "strings" logf "sigs.k8s.io/controller-runtime/pkg/log" ) -func RunFio(podName string, duration int) { +// These variables match the settings used in createFioPodDef +var FioFsMountPoint = "/volume" +var FioBlockFilename = "/dev/sdm" +var FioFsFilename = FioFsMountPoint + "/fiotestfile" + +// FIXME: this function runs fio with a bunch of parameters which are not configurable. +func RunFio(podName string, duration int, filename string, args ...string) ([]byte, error) { argRuntime := fmt.Sprintf("--runtime=%d", duration) - cmd := exec.Command( - "kubectl", + argFilename := fmt.Sprintf("--filename=%s", filename) + + logf.Log.Info("RunFio", + "podName", podName, + "duration", duration, + "filename", filename, + "args", args) + + cmdArgs := []string{ "exec", "-it", podName, @@ -26,7 +39,7 @@ func RunFio(podName string, duration int) { "fio", "--name=benchtest", "--size=50m", - "--filename=/volume/test", + argFilename, "--direct=1", "--rw=randrw", "--ioengine=libaio", @@ -35,10 +48,17 @@ func RunFio(podName string, duration int) { "--numjobs=1", "--time_based", argRuntime, + } + if args != nil { + cmdArgs = append(cmdArgs, args...) + } + cmd := exec.Command( + "kubectl", + cmdArgs..., ) cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + output, err := cmd.CombinedOutput() + return output, err } func IsPodRunning(podName string) bool { @@ -59,31 +79,33 @@ func DeletePod(podName string) error { return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) } -func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { - podDef := CreateFioPodDef(podName, volName) - return CreatePod(podDef) -} - /// Create a test fio pod in default namespace, no options and no context -/// mayastor volume is mounted on /volume -func CreateFioPodDef(podName string, volName string) *corev1.Pod { +func createFioPodDef(podName string, volName string, rawBlock bool) *corev1.Pod { + volMounts := []corev1.VolumeMount{ + { + Name: "ms-volume", + MountPath: FioFsMountPoint, + }, + } + volDevices := []corev1.VolumeDevice{ + { + Name: "ms-volume", + DevicePath: FioBlockFilename, + }, + } + podDef := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Namespace: "default", }, Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, Containers: []corev1.Container{ { Name: podName, Image: "dmonakhov/alpine-fio", Args: []string{"sleep", "1000000"}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "ms-volume", - MountPath: "/volume", - }, - }, }, }, Volumes: []corev1.Volume{ @@ -98,9 +120,34 @@ func CreateFioPodDef(podName string, volName string) *corev1.Pod { }, }, } + if rawBlock { + podDef.Spec.Containers[0].VolumeDevices = volDevices + } else { + podDef.Spec.Containers[0].VolumeMounts = volMounts + } return &podDef } +/// Create a test fio pod in default namespace, no options and no context +/// mayastor volume is mounted on /volume +func CreateFioPodDef(podName string, volName string) *corev1.Pod { + return createFioPodDef(podName, volName, false) +} + +/// Create a test fio pod in default namespace, no options and no context +/// mayastor volume is mounted on /volume +func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { + podDef := createFioPodDef(podName, volName, false) + return CreatePod(podDef) +} + +/// Create a test fio pod in default namespace, no options and no context +/// mayastor device is mounted on /dev/sdm +func CreateRawBlockFioPod(podName string, volName string) (*corev1.Pod, error) { + podDef := createFioPodDef(podName, volName, true) + return CreatePod(podDef) +} + func CheckForTestPods() (bool, error) { logf.Log.Info("CheckForTestPods") foundPods := false @@ -113,3 +160,32 @@ func CheckForTestPods() (bool, error) { } return foundPods, err } + +// Check test pods in a namespace for restarts and failed/unknown state +func CheckPods(namespace string) error { + podApi := gTestEnv.KubeInt.CoreV1().Pods + var errorStrings []string + podList, err := podApi(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return errors.New("failed to list pods") + } + + for _, pod := range podList.Items { + containerStatuses := pod.Status.ContainerStatuses + for _, containerStatus := range containerStatuses { + if containerStatus.RestartCount != 0 { + logf.Log.Info(pod.Name, "restarts", containerStatus.RestartCount) + errorStrings = append(errorStrings, fmt.Sprintf("%s restarted %d times", pod.Name, containerStatus.RestartCount)) + } + if pod.Status.Phase == corev1.PodFailed || pod.Status.Phase == corev1.PodUnknown { + logf.Log.Info(pod.Name, "phase", pod.Status.Phase) + errorStrings = append(errorStrings, fmt.Sprintf("%s phase is %v", pod.Name, pod.Status.Phase)) + } + } + } + + if len(errorStrings) != 0 { + return errors.New(strings.Join(errorStrings[:], "; ")) + } + return nil +} diff --git a/test/e2e/io_soak/README.md b/test/e2e/io_soak/README.md new file mode 100644 index 000000000..65af6a246 --- /dev/null +++ b/test/e2e/io_soak/README.md @@ -0,0 +1,14 @@ +# IO Soak test +JIRA: MQ-25 +## Abstract +Runs fio with varying duty cycles concurrently on a number of volumes for an extended duration. + +## Parameters +* `e2e_io_soak_load_factor` : Number of volumes per Mayastor node, type integer +* `e2e_io_soak_replicas` : Number of replicas for each volume, type integer +* `e2e_io_soak_duration` : Duration of fio runs, type string +* `e2e_io_soak_protocols` : Share protocols to run tests with, comma separated list + +`e2e_io_soak_duration` is parsed using `golangs` library function `time.ParseDuration`. +So `e2e_io_soak_duration` string is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". +Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". \ No newline at end of file diff --git a/test/e2e/io_soak/filesystem_fio.go b/test/e2e/io_soak/filesystem_fio.go new file mode 100644 index 000000000..16e0ff180 --- /dev/null +++ b/test/e2e/io_soak/filesystem_fio.go @@ -0,0 +1,67 @@ +package io_soak + +import ( + "e2e-basic/common" + + "fmt" + "time" + + coreV1 "k8s.io/api/core/v1" +) + +// IO soak filesystem fio job + +type FioFsSoakJob struct { + volName string + scName string + podName string + id int +} + +func (job FioFsSoakJob) makeVolume() { + common.MkPVC(job.volName, job.scName) +} + +func (job FioFsSoakJob) removeVolume() { + common.RmPVC(job.volName, job.scName) +} + +func (job FioFsSoakJob) makeTestPod() (*coreV1.Pod, error) { + pod, err := common.CreateFioPod(job.podName, job.volName) + return pod, err +} + +func (job FioFsSoakJob) removeTestPod() error { + return common.DeletePod(job.podName) +} + +func (job FioFsSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) { + ixp := job.id % len(FioDutyCycles) + RunIoSoakFio( + job.podName, + duration, + FioDutyCycles[ixp].thinkTime, + FioDutyCycles[ixp].thinkTimeBlocks, + false, + doneC, + errC, + ) +} + +func (job FioFsSoakJob) getPodName() string { + return job.podName +} + +func (job FioFsSoakJob) getId() int { + return job.id +} + +func MakeFioFsJob(scName string, id int) FioFsSoakJob { + nm := fmt.Sprintf("fio-filesystem-%s-%d", scName, id) + return FioFsSoakJob{ + volName: nm, + scName: scName, + podName: nm, + id: id, + } +} diff --git a/test/e2e/io_soak/fio.go b/test/e2e/io_soak/fio.go new file mode 100644 index 000000000..8fc5f8076 --- /dev/null +++ b/test/e2e/io_soak/fio.go @@ -0,0 +1,88 @@ +package io_soak + +import ( + "e2e-basic/common" + + "fmt" + "io/ioutil" + "time" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// This table of duty cycles is guesstimates and bear no relation to real loads. +// TODO: make configurable +var FioDutyCycles = []struct { + thinkTime int + thinkTimeBlocks int +}{ + {500000, 1000}, // 0.5 second, 1000 blocks + {750000, 1000}, // 0.75 second, 1000 blocks + {1000000, 2000}, // 1 second, 2000 blocks + {1250000, 2000}, // 1.25 seconds, 2000 blocks + {1500000, 3000}, // 1.5 seconds, 3000 blocks + {1750000, 3000}, // 1.75 seconds, 3000 blocks + {2000000, 4000}, // 2 seconds, 4000 blocks +} + +const fixedDuration = 60 + +// see https://fio.readthedocs.io/en/latest/fio_doc.html#i-o-rate +// run fio in a loop of fixed duration to fulfill a larger duration, +// this to facilitate a relatively timely termination when an error +// occurs elsewhere. +// podName - name of the fio pod +// duration - time in seconds to run fio +// thinktime - usecs, stall the job for the specified period of time after an I/O has completed before issuing the next +// thinktime_blocks - how many blocks to issue, before waiting thinktime usecs. +// rawBlock - false for filesystem volumes, true for raw block mounts. +func RunIoSoakFio(podName string, duration time.Duration, thinkTime int, thinkTimeBlocks int, rawBlock bool, doneC chan<- string, errC chan<- error) { + secs := int(duration.Seconds()) + argThinkTime := fmt.Sprintf("--thinktime=%d", thinkTime) + argThinkTimeBlocks := fmt.Sprintf("--thinktime_blocks=%d", thinkTimeBlocks) + + logf.Log.Info("Running fio", + "pod", podName, + "duration", duration, + "thinktime", thinkTime, + "thinktime_blocks", thinkTimeBlocks, + "rawBlock", rawBlock, + ) + + fioFile := "" + if rawBlock { + fioFile = common.FioBlockFilename + } else { + fioFile = common.FioFsFilename + } + + for ix := 1; secs > 0; ix++ { + runtime := fixedDuration + if runtime > secs { + runtime = secs + } + secs -= runtime + + logf.Log.Info("run fio ", + "iteration", ix, + "pod", podName, + "duration", runtime, + "thinktime", thinkTime, + "thinktime_blocks", thinkTimeBlocks, + "rawBlock", rawBlock, + "fioFile", fioFile, + ) + output, err := common.RunFio(podName, runtime, fioFile, argThinkTime, argThinkTimeBlocks ) + + //TODO: for now shove the output into /tmp + _ = ioutil.WriteFile("/tmp/"+podName+".out", output, 0644) + //logf.Log.Info(string(output)) + if err != nil { + logf.Log.Info("Abort running fio", "pod", podName, "error", err) + errC <- err + return + } + } + logf.Log.Info("Finished running fio", "pod", podName, "duration", duration) + doneC <- podName +} diff --git a/test/e2e/io_soak/io_soak_test.go b/test/e2e/io_soak/io_soak_test.go new file mode 100644 index 000000000..5f12ed565 --- /dev/null +++ b/test/e2e/io_soak/io_soak_test.go @@ -0,0 +1,228 @@ +// JIRA: MQ-25 +// JIRA: MQ-26 +package io_soak + +import ( + "e2e-basic/common" + rep "e2e-basic/common/reporter" + + "fmt" + "os" + "sort" + "strconv" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var defTimeoutSecs = "120s" + +type IoSoakJob interface { + makeVolume() + makeTestPod() (*corev1.Pod, error) + removeTestPod() error + removeVolume() + run(time.Duration, chan<- string, chan<- error) + getPodName() string +} + +var scNames []string +var jobs []IoSoakJob + +func TestIOSoak(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, "IO soak test, NVMe-oF TCP and iSCSI", rep.GetReporters("io-soak")) +} + +func monitor(errC chan<- error) { + logf.Log.Info("IOSoakTest monitor, checking mayastor and test pods") + for { + time.Sleep(30 * time.Second) + err := common.CheckPods(common.NSMayastor) + if err != nil { + logf.Log.Info("IOSoakTest monitor", "namespace", common.NSMayastor, "error", err) + errC <- err + break + } + err = common.CheckPods("default") + if err != nil { + logf.Log.Info("IOSoakTest monitor", "namespace", "default", "error", err) + errC <- err + break + } + } +} + +/// proto - protocol "nvmf" or "isci" +/// replicas - number of replicas for each volume +/// loadFactor - number of volumes for each mayastor instance +func IOSoakTest(protocols []string, replicas int, loadFactor int, duration time.Duration) { + nodeList, err := common.GetNodeLocs() + Expect(err).ToNot(HaveOccurred()) + + numMayastorNodes := 0 + jobCount := 0 + sort.Slice(nodeList, func(i, j int) bool { return nodeList[i].NodeName < nodeList[j].NodeName }) + for i, node := range nodeList { + if node.MayastorNode && !node.MasterNode { + logf.Log.Info("MayastorNode", "name", node.NodeName, "index", i) + jobCount += loadFactor + numMayastorNodes += 1 + } + } + + Expect(replicas <= numMayastorNodes).To(BeTrue()) + logf.Log.Info("IOSoakTest", "jobs", jobCount, "volumes", jobCount, "test pods", jobCount) + + for _, proto := range protocols { + scName := fmt.Sprintf("io-soak-%s", proto) + logf.Log.Info("Creating", "storage class", scName) + err = common.MkStorageClass(scName, replicas, proto, "io.openebs.csi-mayastor") + Expect(err).ToNot(HaveOccurred()) + scNames = append(scNames, scName) + } + + // Create the set of jobs + idx := 1 + for idx <= jobCount { + for _, scName := range scNames { + if idx > jobCount { + break + } + logf.Log.Info("Creating", "job", "fio filesystem job", "id", idx) + jobs = append(jobs, MakeFioFsJob(scName, idx)) + idx++ + + if idx > jobCount { + break + } + logf.Log.Info("Creating", "job", "fio raw block job", "id", idx) + jobs = append(jobs, MakeFioRawBlockJob(scName, idx)) + idx++ + } + } + + logf.Log.Info("Creating volumes") + // Create the job volumes + for _, job := range jobs { + job.makeVolume() + } + + logf.Log.Info("Creating test pods") + // Create the job test pods + for _, job := range jobs { + pod, err := job.makeTestPod() + Expect(err).ToNot(HaveOccurred()) + Expect(pod).ToNot(BeNil()) + } + + logf.Log.Info("Waiting for test pods to be ready") + // Wait for the test pods to be ready + for _, job := range jobs { + // Wait for the test Pod to transition to running + Eventually(func() bool { + return common.IsPodRunning(job.getPodName()) + }, + defTimeoutSecs, + "1s", + ).Should(Equal(true)) + } + + logf.Log.Info("Starting test execution in all test pods") + // Run the test jobs + doneC, errC := make(chan string), make(chan error) + go monitor(errC) + for _, job := range jobs { + go job.run(duration, doneC, errC) + } + + logf.Log.Info("Waiting for test execution to complete on all test pods") + // Wait and check that all test pods have executed successfully + for range jobs { + select { + case podName := <-doneC: + logf.Log.Info("Completed", "pod", podName) + case err := <-errC: + close(doneC) + logf.Log.Error(err, "fio run") + Expect(err).To(BeNil()) + } + } + + logf.Log.Info("All runs complete, deleting test pods") + for _, job := range jobs { + err := job.removeTestPod() + Expect(err).ToNot(HaveOccurred()) + } + + logf.Log.Info("All runs complete, deleting volumes") + for _, job := range jobs { + job.removeVolume() + } + + logf.Log.Info("All runs complete, deleting storage classes") + for _, scName := range scNames { + err = common.RmStorageClass(scName) + Expect(err).ToNot(HaveOccurred()) + } +} + +var _ = Describe("Mayastor Volume IO test", func() { + + AfterEach(func() { + logf.Log.Info("AfterEach") + // Check resource leakage. + err := common.AfterEachCheck() + Expect(err).ToNot(HaveOccurred()) + }) + + It("should verify an NVMe-oF TCP volume can process IO on multiple volumes simultaneously", func() { + replicas := 1 + loadFactor := 2 + duration, _ := time.ParseDuration("30s") + protocols := []string{"nvmf"} + var err error + tmp := os.Getenv("e2e_io_soak_load_factor") + if tmp != "" { + loadFactor, err = strconv.Atoi(tmp) + Expect(err).ToNot(HaveOccurred()) + } + tmp = os.Getenv("e2e_io_soak_duration") + if tmp != "" { + duration, err = time.ParseDuration(tmp) + Expect(err).ToNot(HaveOccurred()) + Expect(duration.Seconds() > 0).To(BeTrue()) + } + tmp = os.Getenv("e2e_io_soak_replicas") + if tmp != "" { + replicas, err = strconv.Atoi(tmp) + Expect(err).ToNot(HaveOccurred()) + } + tmp = os.Getenv("e2e_io_soak_protocols") + if tmp != "" { + protocols = strings.Split(tmp, ",") + } + logf.Log.Info("Parameters", "replicas", replicas, "loadFactor", loadFactor, "duration", duration) + IOSoakTest(protocols, replicas, loadFactor, duration) + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) + common.SetupTestEnv() + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. By("tearing down the test environment") + common.TeardownTestEnv() +}) diff --git a/test/e2e/io_soak/rawblock_fio.go b/test/e2e/io_soak/rawblock_fio.go new file mode 100644 index 000000000..dcb6fb53c --- /dev/null +++ b/test/e2e/io_soak/rawblock_fio.go @@ -0,0 +1,67 @@ +package io_soak + +import ( + "e2e-basic/common" + + "fmt" + "time" + + coreV1 "k8s.io/api/core/v1" +) + +// IO soak raw block fio job + +type FioRawBlockSoakJob struct { + volName string + scName string + podName string + id int +} + +func (job FioRawBlockSoakJob) makeVolume() { + common.MkRawBlockPVC(job.volName, job.scName) +} + +func (job FioRawBlockSoakJob) removeVolume() { + common.RmPVC(job.volName, job.scName) +} + +func (job FioRawBlockSoakJob) makeTestPod() (*coreV1.Pod, error) { + pod, err := common.CreateRawBlockFioPod(job.podName, job.volName) + return pod, err +} + +func (job FioRawBlockSoakJob) removeTestPod() error { + return common.DeletePod(job.podName) +} + +func (job FioRawBlockSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) { + ixp := job.id % len(FioDutyCycles) + RunIoSoakFio( + job.podName, + duration, + FioDutyCycles[ixp].thinkTime, + FioDutyCycles[ixp].thinkTimeBlocks, + true, + doneC, + errC, + ) +} + +func (job FioRawBlockSoakJob) getPodName() string { + return job.podName +} + +func (job FioRawBlockSoakJob) getId() int { + return job.id +} + +func MakeFioRawBlockJob(scName string, id int) FioRawBlockSoakJob { + nm := fmt.Sprintf("fio-rawblock-%s-%d", scName, id) + return FioRawBlockSoakJob{ + volName: nm, + scName: scName, + podName: nm, + id: id, + } +} diff --git a/test/e2e/node_disconnect/lib/node_disconnect_lib.go b/test/e2e/node_disconnect/lib/node_disconnect_lib.go index 73cf5f380..12d3e23d1 100644 --- a/test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -104,7 +104,8 @@ func (env *DisconnectEnv) PodLossTest() { logf.Log.Info("waiting for pod removal to affect the nexus", "timeout", disconnectionTimeoutSecs) Eventually(func() string { logf.Log.Info("running fio against the volume") - common.RunFio(env.fioPodName, 5) + _, err := common.RunFio(env.fioPodName, 5, common.FioFsFilename) + Expect(err).ToNot(HaveOccurred()) return common.GetMsvState(env.uuid) }, disconnectionTimeoutSecs, // timeout @@ -114,7 +115,8 @@ func (env *DisconnectEnv) PodLossTest() { logf.Log.Info("volume condition", "state", common.GetMsvState(env.uuid)) logf.Log.Info("running fio against the degraded volume") - common.RunFio(env.fioPodName, 20) + _, err := common.RunFio(env.fioPodName, 20, common.FioFsFilename) + Expect(err).ToNot(HaveOccurred()) logf.Log.Info("enabling mayastor pod", "node", env.replicaToRemove) env.UnsuppressMayastorPod() @@ -122,7 +124,8 @@ func (env *DisconnectEnv) PodLossTest() { logf.Log.Info("waiting for the volume to be repaired", "timeout", repairTimeoutSecs) Eventually(func() string { logf.Log.Info("running fio while volume is being repaired") - common.RunFio(env.fioPodName, 5) + _, err := common.RunFio(env.fioPodName, 5, common.FioFsFilename) + Expect(err).ToNot(HaveOccurred()) return common.GetMsvState(env.uuid) }, repairTimeoutSecs, // timeout @@ -132,7 +135,8 @@ func (env *DisconnectEnv) PodLossTest() { logf.Log.Info("volume condition", "state", common.GetMsvState(env.uuid)) logf.Log.Info("running fio against the repaired volume") - common.RunFio(env.fioPodName, 20) + _, err = common.RunFio(env.fioPodName, 20, common.FioFsFilename) + Expect(err).ToNot(HaveOccurred()) } // Common steps required when setting up the test. diff --git a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go index 0a14d3886..4432fec14 100644 --- a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go +++ b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go @@ -28,15 +28,17 @@ var _ = Describe("Mayastor replica pod removal test", func() { AfterEach(func() { logf.Log.Info("AfterEach") env.Teardown() // removes fio pod and volume - common.RmStorageClass(gStorageClass) + err := common.RmStorageClass(gStorageClass) + Expect(err).ToNot(HaveOccurred()) // Check resource leakage. - err := common.AfterEachCheck() + err = common.AfterEachCheck() Expect(err).ToNot(HaveOccurred()) }) It("should verify nvmf nexus behaviour when a mayastor pod is removed", func() { - common.MkStorageClass(gStorageClass, 2, "nvmf", "io.openebs.csi-mayastor") + err := common.MkStorageClass(gStorageClass, 2, "nvmf", "io.openebs.csi-mayastor") + Expect(err).ToNot(HaveOccurred()) env = disconnect_lib.Setup("loss-test-pvc-nvmf", gStorageClass, "fio-pod-remove-test") env.PodLossTest() }) diff --git a/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go b/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go index bd4015b0b..456e1c750 100644 --- a/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go +++ b/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go @@ -49,7 +49,7 @@ var volNames []volSc // 2. The associated PV is deleted // 3. The associated MV is deleted func testPVC(volName string, scName string, runFio bool) { - fmt.Printf("volume: %s, storageClass:%s, run FIO:%v\n", volName, scName, runFio) + logf.Log.Info("testPVC", "volume", volName, "storageClass", scName, "run FIO", runFio) // PVC create options createOpts := &coreV1.PersistentVolumeClaim{ ObjectMeta: metaV1.ObjectMeta{ @@ -148,7 +148,8 @@ func testPVC(volName string, scName string, runFio bool) { ).Should(Equal(true)) // Run the fio test - Cmn.RunFio(fioPodName, 5) + _, err = Cmn.RunFio(fioPodName, 5, Cmn.FioFsFilename) + Expect(err).ToNot(HaveOccurred()) // Delete the fio pod err = Cmn.DeletePod(fioPodName) diff --git a/test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go index 36a97afa9..d90832ed6 100644 --- a/test/e2e/uninstall/uninstall_test.go +++ b/test/e2e/uninstall/uninstall_test.go @@ -49,15 +49,14 @@ func deleteNamespace() { // Teardown mayastor on the cluster under test. // We deliberately call out to kubectl, rather than constructing the client-go -// objects, so that we can verfiy the local deploy yamls are correct. +// objects, so that we can verify the local deploy yaml files are correct. func teardownMayastor() { - var podsDeleted bool - var pvcsDeleted bool - var podCount int - var pvcsFound bool + var cleaned bool logf.Log.Info("Settings:", "cleanup", cleanup) - if !cleanup { + if cleanup { + cleaned = common.CleanUp() + } else { found, err := common.CheckForTestPods() if err != nil { logf.Log.Error(err, "Failed to checking for test pods.") @@ -83,18 +82,10 @@ func teardownMayastor() { } Expect(found).To(BeFalse()) - } else { - // The correct sequence for a reusable cluster is - // Delete all pods in the default namespace - // Delete all pvcs - // Delete all mayastor pools - // Then uninstall mayastor - podsDeleted, podCount = common.DeleteAllPods() - pvcsDeleted, pvcsFound = common.DeleteAllVolumeResources() + poolsDeleted := common.DeleteAllPools() + Expect(poolsDeleted).To(BeTrue()) } - common.DeleteAllPools() - logf.Log.Info("Cleanup done, Uninstalling mayastor") // Deletes can stall indefinitely, try to mitigate this // by running the deletes on different threads @@ -132,15 +123,13 @@ func teardownMayastor() { if cleanup { // Attempt to forcefully delete mayastor pods - forceDeleted := common.ForceDeleteMayastorPods() - deleteNamespace() - // delete the namespace prior to possibly failing the uninstall - // to yield a reusable cluster on fail. - Expect(podsDeleted).To(BeTrue()) + _, podCount, err := common.ForceDeleteMayastorPods() + Expect(cleaned).To(BeTrue()) Expect(podCount).To(BeZero()) - Expect(pvcsFound).To(BeFalse()) - Expect(pvcsDeleted).To(BeTrue()) - Expect(forceDeleted).To(BeFalse()) + Expect(err).ToNot(HaveOccurred()) + // Only delete the namespace if there are no pending resources + // other wise this hangs. + deleteNamespace() } else { Expect(common.MayastorUndeletedPodCount()).To(Equal(0)) // More verbose here as deleting the namespace is often where this From 80de0c7d8a291ecbc42e129f1b379237a63f3f94 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Wed, 3 Mar 2021 07:53:04 +0000 Subject: [PATCH 56/78] test(e2e): avoid allocating agents if not needed skipped steps should not allocate jenkins agents --- Jenkinsfile | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f4c8b4d90..2002c7224 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -167,7 +167,8 @@ pipeline { } parallel { stage('rust unit tests') { - when{ + when { + beforeAgent true expression { rust_test == true } } agent { label 'nixos-mayastor' } @@ -183,7 +184,8 @@ pipeline { } } stage('grpc tests') { - when{ + when { + beforeAgent true expression { grpc_test == true } } agent { label 'nixos-mayastor' } @@ -198,7 +200,8 @@ pipeline { } } stage('moac unit tests') { - when{ + when { + beforeAgent true expression { moac_test == true } } agent { label 'nixos-mayastor' } @@ -213,12 +216,14 @@ pipeline { } } stage('e2e tests') { - when{ + when { + beforeAgent true expression { e2e_test == true } } stages { stage('e2e docker images') { - when{ + when { + beforeAgent true expression { e2e_build_images == true } } agent { label 'nixos-mayastor' } From 75144ff74a88d0f4b98bade94cae986d4b33f576 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Wed, 3 Mar 2021 12:39:06 -0800 Subject: [PATCH 57/78] chore: formatting nits Signed-off-by: Ana Hobden --- mayastor/src/bin/spdk.rs | 5 +---- mayastor/src/replica.rs | 3 +-- spdk-sys/build.rs | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/mayastor/src/bin/spdk.rs b/mayastor/src/bin/spdk.rs index 49edcbe5a..d448113b6 100644 --- a/mayastor/src/bin/spdk.rs +++ b/mayastor/src/bin/spdk.rs @@ -10,12 +10,9 @@ use std::{ ffi::{c_void, CString}, io::{Error, ErrorKind}, iter::Iterator, + os::raw::{c_char, c_int}, ptr::null_mut, vec::Vec, - os::raw::{ - c_char, - c_int, - }, }; use mayastor::delay; diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index 129348dc2..a6cbc8f49 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -3,8 +3,7 @@ //! Replica is a logical data volume exported over nvmf (in SPDK terminology //! an lvol). Here we define methods for easy management of replicas. #![allow(dead_code)] -use std::ffi::CStr; -use std::os::raw::c_char; +use std::{ffi::CStr, os::raw::c_char}; use ::rpc::mayastor as rpc; use snafu::{ResultExt, Snafu}; diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 934708f64..a921d1cc3 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -107,9 +107,7 @@ fn main() { #[cfg(target_arch = "x86_64")] let bindings = bindings.clang_arg("-march=nehalem"); - let bindings = bindings - .generate() - .expect("Unable to generate bindings"); + let bindings = bindings.generate().expect("Unable to generate bindings"); bindings .write_to_file(out_path.join("libspdk.rs")) From b250a6d933e725601b188d8f526fb741a862bf56 Mon Sep 17 00:00:00 2001 From: Colin Jones Date: Wed, 3 Mar 2021 16:56:49 +0000 Subject: [PATCH 58/78] fix(nexus): mayastor hangs when creating a nexus if there is an error creating a child When creating a new nexus, as a final step, the newly created nexus is added to a global list of nexus instances. This list is required when handling bdev removal - specifically it is used by lookup_child_from_bdev() to determine the nexus child that is associated with a given bdev. The problem occurs when there is an error creating a nexus, and proper cleanup necessitates the removal of some children that may have already been successfully created. The removal code requires the owning nexus to be present in the global list in order to successfully remove the children, but the nexus has not yet been added to the list, as it has not been successfully created. The solution is to add the (partially created) nexus to the list of global instances as early as possible in the creation process. This means that we also need to ensure that it is removed again if any error is encountered. resolves CAS-757 --- control-plane/tests/tests/nexus.rs | 7 +- mayastor/src/bdev/nexus/nexus_bdev.rs | 105 +++++++++++------- .../src/bdev/nexus/nexus_bdev_children.rs | 8 +- mayastor/tests/child_size.rs | 78 +++++++++++++ 4 files changed, 147 insertions(+), 51 deletions(-) create mode 100644 mayastor/tests/child_size.rs diff --git a/control-plane/tests/tests/nexus.rs b/control-plane/tests/tests/nexus.rs index 537b34fd8..e9386412b 100644 --- a/control-plane/tests/tests/nexus.rs +++ b/control-plane/tests/tests/nexus.rs @@ -1,5 +1,3 @@ -#![feature(allow_fail)] - pub mod common; use common::*; @@ -19,14 +17,11 @@ async fn create_nexus_malloc() { .unwrap(); } -// FIXME: CAS-737 #[actix_rt::test] -#[allow_fail] async fn create_nexus_sizes() { let cluster = ClusterBuilder::builder() .with_rest_timeout(std::time::Duration::from_secs(1)) - // don't log whilst we have the allow_fail - .compose_build(|c| c.with_logs(false)) + .compose_build(|c| c.with_logs(true)) .await .unwrap(); diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 9bf7dfe0a..53193fc02 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -11,7 +11,7 @@ use std::{ os::raw::c_void, }; -use futures::channel::oneshot; +use futures::{channel::oneshot, future::join_all}; use nix::errno::Errno; use serde::Serialize; use snafu::{ResultExt, Snafu}; @@ -424,10 +424,7 @@ impl Nexus { max_io_attempts: cfg.err_store_opts.max_io_attempts, }); - n.bdev.set_uuid(match uuid { - Some(uuid) => Some(uuid.to_string()), - None => None, - }); + n.bdev.set_uuid(uuid.map(String::from)); if let Some(child_bdevs) = child_bdevs { n.register_children(child_bdevs); @@ -1033,11 +1030,13 @@ impl Nexus { } } -/// If we fail to create one of the children we will fail the whole operation -/// destroy any created children and return the error. Once created, and we -/// bring the nexus online, there still might be a configuration mismatch that -/// would prevent the nexus to come online. We can only determine this -/// (currently) when online, so we check the errors twice for now. +/// Create a new nexus and bring it online. +/// If we fail to create any of the children, then we fail the whole operation. +/// On failure, we must cleanup by destroying any children that were +/// successfully created. Also, once the nexus is created, there still might +/// be a configuration mismatch that would prevent us from going online. +/// Currently, we can only determine this once we are already online, +/// and so we check the errors twice for now. #[tracing::instrument(level = "debug")] pub async fn nexus_create( name: &str, @@ -1047,61 +1046,85 @@ pub async fn nexus_create( ) -> Result<(), Error> { // global variable defined in the nexus module let nexus_list = instances(); + if nexus_list.iter().any(|n| n.name == name) { - // instead of error we return Ok without making sure that also the - // children are the same, which seems wrong + // FIXME: Instead of error, we return Ok without checking + // that the children match, which seems wrong. return Ok(()); } - let mut ni = Nexus::new(name, size, uuid, None); + // Create a new Nexus object, and immediately add it to the global list. + // This is necessary to ensure proper cleanup, as the code responsible for + // closing a child assumes that the nexus to which it belongs will appear + // in the global list of nexus instances. We must also ensure that the + // nexus instance gets removed from the global list if an error occurs. + nexus_list.push(Nexus::new(name, size, uuid, None)); + + // Obtain a reference to the newly created Nexus object. + let ni = + nexus_list + .iter_mut() + .find(|n| n.name == name) + .ok_or_else(|| Error::NexusNotFound { + name: String::from(name), + })?; for child in children { - if let Err(err) = ni.create_and_register(child).await { - error!("failed to create child {}: {}", child, err); - ni.destroy_children().await; - return Err(err).context(CreateChild { - name: ni.name.clone(), + if let Err(error) = ni.create_and_register(child).await { + error!( + "failed to create nexus {}: failed to create child {}: {}", + name, child, error + ); + ni.close_children().await; + nexus_list.retain(|n| n.name != name); + return Err(Error::CreateChild { + source: error, + name: String::from(name), }); } } match ni.open().await { - // we still have code that waits for children to come online - // this however only works for config files so we need to clean up - // if we get the below error Err(Error::NexusIncomplete { .. }) => { - info!("deleting nexus due to missing children"); - for child in children { - if let Err(e) = bdev_destroy(child).await { - error!("failed to destroy child during cleanup {}", e); - } - } - - return Err(Error::NexusCreate { + // We still have code that waits for children to come online, + // although this currently only works for config files. + // We need to explicitly clean up child bdevs if we get this error. + error!("failed to open nexus {}: missing children", name); + destroy_child_bdevs(name, children).await; + nexus_list.retain(|n| n.name != name); + Err(Error::NexusCreate { name: String::from(name), - }); + }) } - Err(e) => { - error!("failed to open nexus {}: {}", ni.name, e); - ni.destroy_children().await; - return Err(e); + Err(error) => { + error!("failed to open nexus {}: {}", name, error); + ni.close_children().await; + nexus_list.retain(|n| n.name != name); + Err(error) } - Ok(_) => nexus_list.push(ni), + Ok(_) => Ok(()), + } +} + +/// Destroy list of child bdevs +async fn destroy_child_bdevs(name: &str, list: &[String]) { + let futures = list.iter().map(String::as_str).map(bdev_destroy); + let results = join_all(futures).await; + if results.iter().any(|c| c.is_err()) { + error!("{}: Failed to destroy child bdevs", name); } - Ok(()) } /// Lookup a nexus by its name (currently used only by test functions). pub fn nexus_lookup(name: &str) -> Option<&mut Nexus> { - if let Some(nexus) = instances().iter_mut().find(|n| n.name == name) { - Some(nexus) - } else { - None - } + instances() + .iter_mut() + .find(|n| n.name == name) + .map(AsMut::as_mut) } impl Display for Nexus { diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index 49be55e6e..d8443d688 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -345,13 +345,13 @@ impl Nexus { }) } } - /// destroy all children that are part of this nexus closes any child - /// that might be open first - pub(crate) async fn destroy_children(&mut self) { + + /// Close each child that belongs to this nexus. + pub(crate) async fn close_children(&mut self) { let futures = self.children.iter_mut().map(|c| c.close()); let results = join_all(futures).await; if results.iter().any(|c| c.is_err()) { - error!("{}: Failed to destroy child", self.name); + error!("{}: Failed to close children", self.name); } } diff --git a/mayastor/tests/child_size.rs b/mayastor/tests/child_size.rs new file mode 100644 index 000000000..40aead580 --- /dev/null +++ b/mayastor/tests/child_size.rs @@ -0,0 +1,78 @@ +use tracing::error; + +use once_cell::sync::OnceCell; + +use common::MayastorTest; +use mayastor::{ + bdev::{nexus_create, nexus_lookup}, + core::{Bdev, MayastorCliArgs}, +}; + +pub mod common; + +async fn create_nexus(size: u64) -> bool { + let children = vec![ + String::from("malloc:///m0?size_mb=32"), + format!("malloc:///m1?size_mb={}", size), + ]; + if let Err(error) = + nexus_create("core_nexus", size * 1024 * 1024, None, &children).await + { + error!("nexus_create() failed: {}", error); + return false; + } + true +} + +static MS: OnceCell = OnceCell::new(); + +fn mayastor() -> &'static MayastorTest<'static> { + let ms = MS.get_or_init(|| MayastorTest::new(MayastorCliArgs::default())); + &ms +} + +#[tokio::test] +async fn child_size_ok() { + mayastor() + .spawn(async { + assert_eq!(Bdev::bdev_first().into_iter().count(), 0); + assert!(create_nexus(16).await); + + let bdev = Bdev::lookup_by_name("core_nexus").unwrap(); + assert_eq!(bdev.name(), "core_nexus"); + + let bdev = + Bdev::lookup_by_name("m0").expect("child bdev m0 not found"); + assert_eq!(bdev.name(), "m0"); + + let bdev = + Bdev::lookup_by_name("m1").expect("child bdev m1 not found"); + assert_eq!(bdev.name(), "m1"); + + let nexus = nexus_lookup("core_nexus").expect("nexus not found"); + nexus.destroy().await.unwrap(); + + assert!(nexus_lookup("core_nexus").is_none()); + assert!(Bdev::lookup_by_name("core_nexus").is_none()); + assert!(Bdev::lookup_by_name("m0").is_none()); + assert!(Bdev::lookup_by_name("m1").is_none()); + assert_eq!(Bdev::bdev_first().into_iter().count(), 0); + }) + .await; +} + +#[tokio::test] +async fn child_too_small() { + mayastor() + .spawn(async { + assert_eq!(Bdev::bdev_first().into_iter().count(), 0); + assert!(!create_nexus(4).await); + + assert!(nexus_lookup("core_nexus").is_none()); + assert!(Bdev::lookup_by_name("core_nexus").is_none()); + assert!(Bdev::lookup_by_name("m0").is_none()); + assert!(Bdev::lookup_by_name("m1").is_none()); + assert_eq!(Bdev::bdev_first().into_iter().count(), 0); + }) + .await; +} From 7e8bac8c12f3bc8153749c952f4bdbbca40fb0ea Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Tue, 2 Mar 2021 16:44:53 +0000 Subject: [PATCH 59/78] refactor(ctrlp): add core agent Refactor the node,pool,volume agent into a single core agent. Tidy up the Errors throughout the control plane stack allowing us to get more concise error kinds at the rest layer and also at each agent endpoint, which will allow us to expose the correct http responses at the openapi spec, which will be done as another PR. --- Cargo.lock | 1 + control-plane/agents/Cargo.toml | 18 +- control-plane/agents/common/src/errors.rs | 300 ++++++ control-plane/agents/common/src/lib.rs | 130 +-- control-plane/agents/common/src/v0/mod.rs | 2 + .../src/{wrapper => }/v0/msg_translation.rs | 0 .../agents/common/src/wrapper/mod.rs | 4 - .../agents/common/src/wrapper/v0/mod.rs | 149 --- .../common/src/wrapper/v0/node_traits.rs | 376 -------- .../agents/common/src/wrapper/v0/pool.rs | 298 ------ .../agents/common/src/wrapper/v0/registry.rs | 533 ----------- .../agents/common/src/wrapper/v0/volume.rs | 375 -------- control-plane/agents/core/src/core/grpc.rs | 115 +++ control-plane/agents/core/src/core/mod.rs | 8 + .../agents/core/src/core/registry.rs | 65 ++ control-plane/agents/core/src/core/wrapper.rs | 883 ++++++++++++++++++ control-plane/agents/core/src/node/mod.rs | 129 +++ control-plane/agents/core/src/node/service.rs | 157 ++++ .../agents/core/src/node/watchdog.rs | 84 ++ control-plane/agents/core/src/pool/mod.rs | 173 ++++ .../agents/core/src/pool/registry.rs | 176 ++++ control-plane/agents/core/src/pool/service.rs | 213 +++++ control-plane/agents/core/src/server.rs | 176 ++++ .../src/server.rs => core/src/volume/mod.rs} | 141 +-- .../agents/core/src/volume/registry.rs | 72 ++ .../src => core/src/volume}/service.rs | 210 +++-- control-plane/agents/examples/service/main.rs | 8 +- control-plane/agents/jsongrpc/src/server.rs | 18 +- control-plane/agents/jsongrpc/src/service.rs | 2 +- control-plane/agents/node/src/server.rs | 367 -------- control-plane/agents/pool/src/server.rs | 241 ----- control-plane/agents/pool/src/service.rs | 196 ---- control-plane/deployer/src/infra/mod.rs | 6 +- control-plane/deployer/src/lib.rs | 2 +- .../mbus-api/examples/server/main.rs | 7 +- control-plane/mbus-api/src/lib.rs | 117 ++- control-plane/mbus-api/src/message_bus/v0.rs | 53 +- control-plane/mbus-api/src/v0.rs | 4 +- control-plane/rest/service/src/v0/children.rs | 7 +- control-plane/rest/service/src/v0/nexuses.rs | 9 +- control-plane/rest/service/src/v0/pools.rs | 9 +- control-plane/rest/service/src/v0/replicas.rs | 46 +- control-plane/rest/src/versions/v0.rs | 155 ++- control-plane/rest/tests/v0_test.rs | 38 +- mayastor/src/grpc/pool_grpc.rs | 6 + nix/pkgs/control-plane/cargo-project.nix | 2 +- nix/pkgs/control-plane/default.nix | 4 +- nix/pkgs/images/default.nix | 4 +- nix/pkgs/mayastor/default.nix | 2 +- 49 files changed, 3164 insertions(+), 2927 deletions(-) create mode 100644 control-plane/agents/common/src/errors.rs create mode 100644 control-plane/agents/common/src/v0/mod.rs rename control-plane/agents/common/src/{wrapper => }/v0/msg_translation.rs (100%) delete mode 100644 control-plane/agents/common/src/wrapper/mod.rs delete mode 100644 control-plane/agents/common/src/wrapper/v0/mod.rs delete mode 100644 control-plane/agents/common/src/wrapper/v0/node_traits.rs delete mode 100644 control-plane/agents/common/src/wrapper/v0/pool.rs delete mode 100644 control-plane/agents/common/src/wrapper/v0/registry.rs delete mode 100644 control-plane/agents/common/src/wrapper/v0/volume.rs create mode 100644 control-plane/agents/core/src/core/grpc.rs create mode 100644 control-plane/agents/core/src/core/mod.rs create mode 100644 control-plane/agents/core/src/core/registry.rs create mode 100644 control-plane/agents/core/src/core/wrapper.rs create mode 100644 control-plane/agents/core/src/node/mod.rs create mode 100644 control-plane/agents/core/src/node/service.rs create mode 100644 control-plane/agents/core/src/node/watchdog.rs create mode 100644 control-plane/agents/core/src/pool/mod.rs create mode 100644 control-plane/agents/core/src/pool/registry.rs create mode 100644 control-plane/agents/core/src/pool/service.rs create mode 100644 control-plane/agents/core/src/server.rs rename control-plane/agents/{volume/src/server.rs => core/src/volume/mod.rs} (55%) create mode 100644 control-plane/agents/core/src/volume/registry.rs rename control-plane/agents/{volume/src => core/src/volume}/service.rs (65%) delete mode 100644 control-plane/agents/node/src/server.rs delete mode 100644 control-plane/agents/pool/src/server.rs delete mode 100644 control-plane/agents/pool/src/service.rs diff --git a/Cargo.lock b/Cargo.lock index 30cac0dd5..6792b3ad1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -323,6 +323,7 @@ dependencies = [ "lazy_static", "mbus_api", "nats", + "paste", "rpc", "serde", "serde_json", diff --git a/control-plane/agents/Cargo.toml b/control-plane/agents/Cargo.toml index 17d616d73..c158ab3a2 100644 --- a/control-plane/agents/Cargo.toml +++ b/control-plane/agents/Cargo.toml @@ -5,26 +5,13 @@ authors = ["Tiago Castro "] edition = "2018" [[bin]] -name = "kiiss" -path = "kiiss/src/server.rs" - -[[bin]] -name = "node" -path = "node/src/server.rs" - -[[bin]] -name = "pool" -path = "pool/src/server.rs" - -[[bin]] -name = "volume" -path = "volume/src/server.rs" +name = "core" +path = "core/src/server.rs" [[bin]] name = "jsongrpc" path = "jsongrpc/src/server.rs" - [lib] name = "common" path = "common/src/lib.rs" @@ -50,6 +37,7 @@ tracing-futures = "0.2.4" rpc = { path = "../../rpc" } url = "2.2.0" http = "0.2.1" +paste = "1.0.4" [dev-dependencies] composer = { path = "../../composer" } diff --git a/control-plane/agents/common/src/errors.rs b/control-plane/agents/common/src/errors.rs new file mode 100644 index 000000000..6cc17961a --- /dev/null +++ b/control-plane/agents/common/src/errors.rs @@ -0,0 +1,300 @@ +use mbus_api::{ + message_bus::v0::BusError, + v0::*, + ErrorChain, + ReplyError, + ReplyErrorKind, + ResourceKind, +}; +use snafu::{Error, Snafu}; +use tonic::Code; + +/// Common error type for send/receive +#[derive(Debug, Snafu)] +#[snafu(visibility = "pub")] +#[allow(missing_docs)] +pub enum SvcError { + #[snafu(display("Failed to get node '{}' from the node agent", node))] + BusGetNode { node: String, source: BusError }, + #[snafu(display("Failed to get nodes from the node agent"))] + BusGetNodes { source: BusError }, + #[snafu(display("Node '{}' is not online", node))] + NodeNotOnline { node: NodeId }, + #[snafu(display( + "Timed out after '{:?}' attempting to connect to node '{}' via gRPC endpoint '{}'", + timeout, + node_id, + endpoint + ))] + GrpcConnectTimeout { + node_id: String, + endpoint: String, + timeout: std::time::Duration, + }, + #[snafu(display("Failed to connect to node via gRPC"))] + GrpcConnect { source: tonic::transport::Error }, + #[snafu(display("Node '{}' has invalid gRPC URI '{}'", node_id, uri))] + GrpcConnectUri { + node_id: String, + uri: String, + source: http::uri::InvalidUri, + }, + #[snafu(display( + "gRPC request '{}' for '{}' failed with '{}'", + request, + resource.to_string(), + source + ))] + GrpcRequestError { + resource: ResourceKind, + request: String, + source: tonic::Status, + }, + #[snafu(display("Node '{}' not found", node_id))] + NodeNotFound { node_id: NodeId }, + #[snafu(display("Pool '{}' not found", pool_id))] + PoolNotFound { pool_id: PoolId }, + #[snafu(display("Nexus '{}' not found", nexus_id))] + NexusNotFound { nexus_id: String }, + #[snafu(display("Replica '{}' not found", replica_id))] + ReplicaNotFound { replica_id: ReplicaId }, + #[snafu(display("Invalid filter value: {:?}", filter))] + InvalidFilter { filter: Filter }, + #[snafu(display("Operation failed due to insufficient resources"))] + NotEnoughResources { source: NotEnough }, + #[snafu(display("Failed to deserialise JsonRpc response"))] + JsonRpcDeserialise { source: serde_json::Error }, + #[snafu(display( + "Json RPC call failed for method '{}' with parameters '{}'. Error {}", + method, + params, + error, + ))] + JsonRpc { + method: String, + params: String, + error: String, + }, + #[snafu(display("Internal error: {}", details))] + Internal { details: String }, + #[snafu(display("Message Bus error"))] + MBusError { source: mbus_api::Error }, + #[snafu(display("Invalid Arguments"))] + InvalidArguments {}, +} + +impl From for SvcError { + fn from(source: mbus_api::Error) -> Self { + Self::MBusError { + source, + } + } +} + +impl From for SvcError { + fn from(source: NotEnough) -> Self { + Self::NotEnoughResources { + source, + } + } +} + +impl From for ReplyError { + fn from(error: SvcError) -> Self { + #[allow(deprecated)] + let desc: &String = &error.description().to_string(); + match error { + SvcError::BusGetNode { + source, .. + } => source, + SvcError::BusGetNodes { + source, + } => source, + SvcError::GrpcRequestError { + source, + request, + resource, + } => grpc_to_reply_error(SvcError::GrpcRequestError { + source, + request, + resource, + }), + + SvcError::InvalidArguments { + .. + } => ReplyError { + kind: ReplyErrorKind::InvalidArgument, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: error.full_string(), + }, + + SvcError::NodeNotOnline { + .. + } => ReplyError { + kind: ReplyErrorKind::FailedPrecondition, + resource: ResourceKind::Node, + source: desc.to_string(), + extra: error.full_string(), + }, + + SvcError::GrpcConnectTimeout { + .. + } => ReplyError { + kind: ReplyErrorKind::Timeout, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: error.full_string(), + }, + + SvcError::GrpcConnectUri { + .. + } => ReplyError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: error.full_string(), + }, + + SvcError::GrpcConnect { + source, + } => ReplyError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: source.to_string(), + }, + + SvcError::NotEnoughResources { + .. + } => ReplyError { + kind: ReplyErrorKind::ResourceExhausted, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::JsonRpcDeserialise { + .. + } => ReplyError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::JsonGrpc, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::JsonRpc { + .. + } => ReplyError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::JsonGrpc, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::NodeNotFound { + .. + } => ReplyError { + kind: ReplyErrorKind::NotFound, + resource: ResourceKind::Node, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::PoolNotFound { + .. + } => ReplyError { + kind: ReplyErrorKind::NotFound, + resource: ResourceKind::Pool, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::ReplicaNotFound { + .. + } => ReplyError { + kind: ReplyErrorKind::NotFound, + resource: ResourceKind::Replica, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::NexusNotFound { + .. + } => ReplyError { + kind: ReplyErrorKind::NotFound, + resource: ResourceKind::Nexus, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::InvalidFilter { + .. + } => ReplyError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::Internal { + .. + } => ReplyError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Unknown, + source: desc.to_string(), + extra: error.full_string(), + }, + SvcError::MBusError { + source, + } => source.into(), + } + } +} + +fn grpc_to_reply_error(error: SvcError) -> ReplyError { + match error { + SvcError::GrpcRequestError { + source, + request, + resource, + } => { + let kind = match source.code() { + Code::Ok => ReplyErrorKind::Internal, + Code::Cancelled => ReplyErrorKind::Internal, + Code::Unknown => ReplyErrorKind::Internal, + Code::InvalidArgument => ReplyErrorKind::InvalidArgument, + Code::DeadlineExceeded => ReplyErrorKind::DeadlineExceeded, + Code::NotFound => ReplyErrorKind::NotFound, + Code::AlreadyExists => ReplyErrorKind::AlreadyExists, + Code::PermissionDenied => ReplyErrorKind::PermissionDenied, + Code::ResourceExhausted => ReplyErrorKind::ResourceExhausted, + Code::FailedPrecondition => ReplyErrorKind::FailedPrecondition, + Code::Aborted => ReplyErrorKind::Aborted, + Code::OutOfRange => ReplyErrorKind::OutOfRange, + Code::Unimplemented => ReplyErrorKind::Unimplemented, + Code::Internal => ReplyErrorKind::Internal, + Code::Unavailable => ReplyErrorKind::Unavailable, + Code::DataLoss => ReplyErrorKind::Internal, + Code::Unauthenticated => ReplyErrorKind::Unauthenticated, + Code::__NonExhaustive => ReplyErrorKind::Internal, + }; + let extra = format!("{}::{}", request, source.to_string()); + ReplyError { + kind, + resource, + source: "SvcError::GrpcRequestError".to_string(), + extra, + } + } + _ => unreachable!("Expected a GrpcRequestError!"), + } +} + +/// Not enough resources available +#[derive(Debug, Snafu)] +#[allow(missing_docs)] +pub enum NotEnough { + #[snafu(display( + "Not enough suitable pools available, {}/{}", + have, + need + ))] + OfPools { have: u64, need: u64 }, + #[snafu(display("Not enough replicas available, {}/{}", have, need))] + OfReplicas { have: u64, need: u64 }, + #[snafu(display("Not enough nexuses available, {}/{}", have, need))] + OfNexuses { have: u64, need: u64 }, +} diff --git a/control-plane/agents/common/src/lib.rs b/control-plane/agents/common/src/lib.rs index 4ba22c69d..1ebff5cba 100644 --- a/control-plane/agents/common/src/lib.rs +++ b/control-plane/agents/common/src/lib.rs @@ -1,25 +1,32 @@ #![warn(missing_docs)] -//! Control Plane Services library with emphasis on the message bus interaction. +//! Control Plane Agents library with emphasis on the message bus interaction +//! including errors. //! -//! It's meant to facilitate the creation of services with a helper builder to +//! It's meant to facilitate the creation of agents with a helper builder to //! subscribe handlers for different message identifiers. -/// wrapper for mayastor resources -pub mod wrapper; +use std::{ + collections::HashMap, + convert::{Into, TryInto}, + ops::Deref, +}; use async_trait::async_trait; use dyn_clonable::clonable; use futures::{future::join_all, stream::StreamExt}; -use mbus_api::{v0::Liveness, *}; use snafu::{OptionExt, ResultExt, Snafu}; use state::Container; -use std::{ - collections::HashMap, - convert::{Into, TryInto}, - ops::Deref, -}; use tracing::{debug, error}; +use mbus_api::{v0::Liveness, *}; + +use crate::errors::SvcError; + +/// Agent level errors +pub mod errors; +/// Version 0 of the message bus types +pub mod v0; + #[derive(Debug, Snafu)] #[allow(missing_docs)] pub enum ServiceError { @@ -31,11 +38,11 @@ pub enum ServiceError { GetMessageId { channel: Channel, source: Error }, #[snafu(display("Failed to find subscription '{}' on Channel '{}'", id.to_string(), channel.to_string()))] FindSubscription { channel: Channel, id: MessageId }, - #[snafu(display("Failed to handle message id '{}' on Channel '{}'", id.to_string(), channel.to_string()))] + #[snafu(display("Failed to handle message id '{}' on Channel '{}', details: {}", id.to_string(), channel.to_string(), details))] HandleMessage { channel: Channel, id: MessageId, - source: Error, + details: String, }, } @@ -101,7 +108,7 @@ impl<'a> Context<'a> { self.bus } /// get the shared state of type `T` from the context - pub fn get_state(&self) -> Result<&T, Error> { + pub fn get_state(&self) -> Result<&T, SvcError> { match self.state.try_get() { Some(state) => Ok(state), None => { @@ -111,8 +118,8 @@ impl<'a> Context<'a> { type_name ); error!("{}", error_msg); - Err(Error::ServiceError { - message: error_msg, + Err(SvcError::Internal { + details: error_msg, }) } } @@ -128,7 +135,7 @@ pub type Request<'a> = ReceivedRawMessage<'a>; /// which processes the messages and a filter to match message types pub trait ServiceSubscriber: Clone + Send + Sync { /// async handler which processes the messages - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error>; + async fn handler(&self, args: Arguments<'_>) -> Result<(), SvcError>; /// filter which identifies which messages may be routed to the handler fn filter(&self) -> Vec; } @@ -147,7 +154,7 @@ impl Service { /// Connect to the provided message bus server immediately /// Useful for when dealing with async shared data which might required the /// message bus before the builder is complete - pub async fn connect(mut self) -> Self { + pub async fn connect_message_bus(mut self) -> Self { self.message_bus_init().await; self } @@ -178,7 +185,7 @@ impl Service { /// .with_subscription(ServiceHandler::::default()) /// .run().await; /// - /// # async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + /// # async fn handler(&self, args: Arguments<'_>) -> Result<(), SvcError> { /// let store: &NodeStore = args.context.get_state()?; /// let more: &More = args.context.get_state()?; /// # Ok(()) @@ -188,15 +195,26 @@ impl Service { tracing::debug!("Adding shared type: {}", type_name); if !self.shared_state.set(state) { panic!( - "{}", - format!( - "Shared state for type '{}' has already been set!", - type_name - ) + "Shared state for type '{}' has already been set!", + type_name ); } self } + /// Get the shared state of type `T` added with `with_shared_state` + pub fn get_shared_state(&self) -> &T { + match self.shared_state.try_get() { + Some(state) => state, + None => { + let type_name = std::any::type_name::(); + let error_msg = format!( + "Requested data type '{}' not shared via with_shared_data", + type_name + ); + panic!("{}", error_msg); + } + } + } /// Add a default liveness endpoint which can be used to probe /// the service for liveness on the current selected channel. @@ -219,10 +237,13 @@ impl Service { #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + async fn handler( + &self, + args: Arguments<'_>, + ) -> Result<(), SvcError> { let request: ReceivedMessage = args.request.try_into()?; - request.reply(()).await + Ok(request.reply(()).await?) } fn filter(&self) -> Vec { vec![Liveness::default().id()] @@ -232,6 +253,14 @@ impl Service { self.with_subscription(ServiceHandler::::default()) } + /// Configure `self` through a configure closure + pub fn configure(self, configure: F) -> Self + where + F: FnOnce(Service) -> Service, + { + configure(self) + } + /// Add a new subscriber on the default channel pub fn with_subscription( self, @@ -308,42 +337,23 @@ impl Service { id: id.clone(), })?; - let result = subscription.handler(arguments.clone()).await; - - Self::assess_handler_error(&result, &arguments).await; - - result.context(HandleMessage { - channel: channel.clone(), - id: id.clone(), - }) - } - - async fn assess_handler_error( - result: &Result<(), Error>, - arguments: &Arguments<'_>, - ) { - if let Err(error) = result.as_ref() { - match error { - Error::DeserializeSend { - .. - } => { - arguments - .request - .respond::<(), _>(Err(ReplyError::DeserializeReq { - message: error.full_string(), - })) - .await - } - _ => { - arguments - .request - .respond::<(), _>(Err(ReplyError::Process { - message: error.full_string(), - })) - .await - } + match subscription.handler(arguments.clone()).await { + Ok(_) => Ok(()), + Err(error) => { + let result = ServiceError::HandleMessage { + channel, + id: id.clone(), + details: error.to_string(), + }; + // respond back to the sender with an error, ignore the outcome + arguments + .request + .respond::<(), _>(Err(error.into())) + .await + // ignore the outcome, since we're already in error + .ok(); + Err(result) } - .ok(); } } diff --git a/control-plane/agents/common/src/v0/mod.rs b/control-plane/agents/common/src/v0/mod.rs new file mode 100644 index 000000000..29f778a90 --- /dev/null +++ b/control-plane/agents/common/src/v0/mod.rs @@ -0,0 +1,2 @@ +/// translate between message bus and gRPC +pub mod msg_translation; diff --git a/control-plane/agents/common/src/wrapper/v0/msg_translation.rs b/control-plane/agents/common/src/v0/msg_translation.rs similarity index 100% rename from control-plane/agents/common/src/wrapper/v0/msg_translation.rs rename to control-plane/agents/common/src/v0/msg_translation.rs diff --git a/control-plane/agents/common/src/wrapper/mod.rs b/control-plane/agents/common/src/wrapper/mod.rs deleted file mode 100644 index d9222c1c3..000000000 --- a/control-plane/agents/common/src/wrapper/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Service backend for the message bus and gRPC - -/// Version 0 of the message bus types -pub mod v0; diff --git a/control-plane/agents/common/src/wrapper/v0/mod.rs b/control-plane/agents/common/src/wrapper/v0/mod.rs deleted file mode 100644 index 4d1518c7f..000000000 --- a/control-plane/agents/common/src/wrapper/v0/mod.rs +++ /dev/null @@ -1,149 +0,0 @@ -//! Implementation of a service backend which interacts with -//! mayastor instances via gRPC and with the other services via the -//! message bus. - -mod registry; - -pub use pool::NodeWrapperPool; -pub use registry::Registry; -pub use volume::NodeWrapperVolume; - -use async_trait::async_trait; -use dyn_clonable::clonable; -use mbus_api::{ - message_bus::v0::{BusError, MessageBus, MessageBusTrait}, - v0::*, -}; -use rpc::mayastor::{mayastor_client::MayastorClient, Null}; -use snafu::{ResultExt, Snafu}; -use std::{ - cmp::Ordering, - collections::HashMap, - fmt::Debug, - marker::PhantomData, - str::FromStr, - sync::Arc, -}; -use tokio::sync::Mutex; -use tonic::transport::Channel; - -/// Common error type for send/receive -#[derive(Debug, Snafu)] -#[snafu(visibility = "pub")] -#[allow(missing_docs)] -pub enum SvcError { - #[snafu(display("Failed to get nodes from the node service"))] - BusGetNodes { source: BusError }, - #[snafu(display("Failed to get pools from the pool service"))] - BusGetPools { source: mbus_api::Error }, - #[snafu(display("Failed to create pool from the pool service"))] - BusCreatePool { source: mbus_api::Error }, - #[snafu(display("Failed to destroy pool from the pool service"))] - BusDestroyPool { source: mbus_api::Error }, - #[snafu(display("Failed to fetch replicas from the pool service"))] - BusGetReplicas { source: mbus_api::Error }, - #[snafu(display("Failed to get node '{}' from the node service", node))] - BusGetNode { source: BusError, node: NodeId }, - #[snafu(display("Node '{}' is not online", node))] - NodeNotOnline { node: NodeId }, - #[snafu(display("Failed to connect to node via gRPC"))] - GrpcConnect { source: tonic::transport::Error }, - #[snafu(display("Failed to list pools via gRPC"))] - GrpcListPools { source: tonic::Status }, - #[snafu(display("Failed to create pool via gRPC"))] - GrpcCreatePool { source: tonic::Status }, - #[snafu(display("Failed to destroy pool via gRPC"))] - GrpcDestroyPool { source: tonic::Status }, - #[snafu(display("Failed to list replicas via gRPC"))] - GrpcListReplicas { source: tonic::Status }, - #[snafu(display("Failed to create replica via gRPC"))] - GrpcCreateReplica { source: tonic::Status }, - #[snafu(display("Failed to destroy replica via gRPC"))] - GrpcDestroyReplica { source: tonic::Status }, - #[snafu(display("Failed to share replica via gRPC"))] - GrpcShareReplica { source: tonic::Status }, - #[snafu(display("Failed to unshare replica via gRPC"))] - GrpcUnshareReplica { source: tonic::Status }, - #[snafu(display("Node not found"))] - BusNodeNotFound { node_id: NodeId }, - #[snafu(display("Pool not found"))] - BusPoolNotFound { pool_id: String }, - #[snafu(display("Invalid filter for pools"))] - InvalidFilter { filter: Filter }, - #[snafu(display("Failed to list nexuses via gRPC"))] - GrpcListNexuses { source: tonic::Status }, - #[snafu(display("Failed to create nexus via gRPC"))] - GrpcCreateNexus { source: tonic::Status }, - #[snafu(display("Failed to destroy nexus via gRPC"))] - GrpcDestroyNexus { source: tonic::Status }, - #[snafu(display("Failed to share nexus via gRPC"))] - GrpcShareNexus { source: tonic::Status }, - #[snafu(display("Failed to unshare nexus via gRPC"))] - GrpcUnshareNexus { source: tonic::Status }, - #[snafu(display("Operation failed due to insufficient resources"))] - NotEnoughResources { source: NotEnough }, - #[snafu(display("Invalid arguments"))] - InvalidArguments {}, - #[snafu(display("Not implemented"))] - NotImplemented {}, - #[snafu(display( - "Json RPC call failed for method '{}' with parameters '{}'. Error {}", - method, - params, - error, - ))] - JsonRpc { - method: String, - params: String, - error: String, - }, - #[snafu(display("Failed to deserialise JsonRpc response"))] - JsonRpcDeserialise { source: serde_json::Error }, -} - -impl From for SvcError { - fn from(source: NotEnough) -> Self { - Self::NotEnoughResources { - source, - } - } -} - -/// Not enough resources available -#[derive(Debug, Snafu)] -#[allow(missing_docs)] -pub enum NotEnough { - #[snafu(display( - "Not enough suitable pools available, {}/{}", - have, - need - ))] - OfPools { have: u64, need: u64 }, - #[snafu(display("Not enough replicas available, {}/{}", have, need))] - OfReplicas { have: u64, need: u64 }, - #[snafu(display("Not enough nexuses available, {}/{}", have, need))] - OfNexuses { have: u64, need: u64 }, -} - -/// Implement default fake NodeNexusChildTrait for a type -#[macro_export] -macro_rules! impl_no_nexus_child { - ($F:ident) => { - #[async_trait] - impl NodeNexusChildTrait for $F {} - }; -} - -/// Implement default fake NodeNexusTrait for a type -#[macro_export] -macro_rules! impl_no_nexus { - ($F:ident) => { - #[async_trait] - impl NodeNexusTrait for $F {} - }; -} - -pub mod msg_translation; -mod node_traits; -mod pool; -mod volume; diff --git a/control-plane/agents/common/src/wrapper/v0/node_traits.rs b/control-plane/agents/common/src/wrapper/v0/node_traits.rs deleted file mode 100644 index 95684b7b7..000000000 --- a/control-plane/agents/common/src/wrapper/v0/node_traits.rs +++ /dev/null @@ -1,376 +0,0 @@ -use super::*; - -/// Context with the gRPC clients -pub struct GrpcContext { - pub client: MayaClient, -} -pub type MayaClient = MayastorClient; -impl GrpcContext { - pub async fn new(endpoint: String) -> Result { - let uri = format!("http://{}", endpoint); - let uri = http::uri::Uri::from_str(&uri).unwrap(); - let endpoint = tonic::transport::Endpoint::from(uri) - .timeout(std::time::Duration::from_secs(1)); - let client = MayaClient::connect(endpoint) - .await - .context(GrpcConnect {})?; - - Ok(Self { - client, - }) - } -} - -/// Trait for a Node Replica which can be implemented to interact with mayastor -/// node replicas either via gRPC or MBUS or with a service via MBUS -#[async_trait] -#[clonable] -pub trait NodeReplicaTrait: Send + Sync + Debug + Clone { - /// Fetch replicas on all pools via gRPC or MBUS - async fn fetch_replicas(&self) -> Result, SvcError>; - - /// Create a replica on a pool via gRPC or MBUS - async fn create_replica( - &self, - request: &CreateReplica, - ) -> Result; - - /// Share a replica on a pool via gRPC or MBUS - async fn share_replica( - &self, - request: &ShareReplica, - ) -> Result; - - /// Unshare a replica on a pool via gRPC or MBUS - async fn unshare_replica( - &self, - request: &UnshareReplica, - ) -> Result<(), SvcError>; - - /// Destroy a replica on a pool via gRPC or MBUS - async fn destroy_replica( - &self, - request: &DestroyReplica, - ) -> Result<(), SvcError>; - - /// Update internal replica list following a create - fn on_create_replica(&mut self, replica: &Replica); - /// Update internal replica list following a destroy - fn on_destroy_replica(&mut self, pool: &PoolId, replica: &ReplicaId); - /// Update internal replica list following an update - fn on_update_replica( - &mut self, - pool: &PoolId, - replica: &ReplicaId, - share: &Protocol, - uri: &str, - ); -} - -/// Trait for a Node Pool which can be implemented to interact with mayastor -/// node pools either via gRPC or MBUS or with a service via MBUS -#[async_trait] -#[clonable] -pub trait NodePoolTrait: Send + Sync + Debug + Clone { - /// Fetch all pools via gRPC or MBUS - async fn fetch_pools(&self) -> Result, SvcError>; - - /// Create a pool on a node via gRPC or MBUS - async fn create_pool(&self, request: &CreatePool) - -> Result; - - /// Destroy a pool on a node via gRPC or MBUS - async fn destroy_pool(&self, request: &DestroyPool) - -> Result<(), SvcError>; - - /// Update internal pool list following a create - async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]); - /// Update internal pool list following a destroy - fn on_destroy_pool(&mut self, pool: &PoolId); -} - -/// Trait for a Node Nexus which can be implemented to interact with mayastor -/// node nexuses either via gRPC or MBUS or with a service via MBUS -#[async_trait] -#[clonable] -#[allow(unused_variables)] -pub trait NodeNexusTrait: Send + Sync + Debug + Clone { - /// Get the internal nexuses - fn nexuses(&self) -> Vec { - vec![] - } - - /// Fetch all nexuses via gRPC or MBUS - async fn fetch_nexuses(&self) -> Result, SvcError> { - Err(SvcError::NotImplemented {}) - } - - /// Create a nexus on a node via gRPC or MBUS - async fn create_nexus( - &self, - request: &CreateNexus, - ) -> Result { - Err(SvcError::NotImplemented {}) - } - - /// Destroy a nexus on a node via gRPC or MBUS - async fn destroy_nexus( - &self, - request: &DestroyNexus, - ) -> Result<(), SvcError> { - Err(SvcError::NotImplemented {}) - } - - /// Share a nexus on the node via gRPC - async fn share_nexus( - &self, - request: &ShareNexus, - ) -> Result { - Err(SvcError::NotImplemented {}) - } - - /// Unshare a nexus on the node via gRPC - async fn unshare_nexus( - &self, - request: &UnshareNexus, - ) -> Result<(), SvcError> { - Err(SvcError::NotImplemented {}) - } - - /// Update internal nexus list following a create - fn on_create_nexus(&mut self, nexus: &Nexus) {} - /// Update internal nexus following a share/unshare - fn on_update_nexus(&mut self, nexus: &NexusId, uri: &str) {} - /// Update internal nexus list following a destroy - fn on_destroy_nexus(&mut self, nexus: &NexusId) {} -} - -/// Trait for a Node Nexus Children which can be implemented to interact with -/// mayastor node nexus children either via gRPC or MBUS or with a service via -/// MBUS -#[async_trait] -#[clonable] -#[allow(unused_variables)] -pub trait NodeNexusChildTrait: Send + Sync + Debug + Clone { - /// Fetch all children via gRPC or MBUS - async fn fetch_children(&self) -> Result, SvcError> { - Err(SvcError::NotImplemented {}) - } - - /// Add a child to a nexus via gRPC or MBUS - async fn add_child( - &self, - request: &AddNexusChild, - ) -> Result { - Err(SvcError::NotImplemented {}) - } - - /// Remove a child from a nexus via gRPC or MBUS - async fn remove_child( - &self, - request: &RemoveNexusChild, - ) -> Result<(), SvcError> { - Err(SvcError::NotImplemented {}) - } - - /// Update internal nexus children following a create - fn on_add_child(&mut self, nexus: &NexusId, child: &Child) {} - /// Update internal nexus children following a remove - fn on_remove_child(&mut self, request: &RemoveNexusChild) {} -} - -/// Trait for a Node which can be implemented to interact with mayastor -/// node replicas either via gRPC or MBUS or with a service via MBUS -#[async_trait] -#[clonable] -pub trait NodeWrapperTrait: - Send - + Sync - + Debug - + Clone - + NodeReplicaTrait - + NodePoolTrait - + NodeNexusTrait - + NodeNexusChildTrait -{ - /// New NodeWrapper for the node - #[allow(clippy::new_ret_no_self)] - async fn new(node: &NodeId) -> Result - where - Self: Sized; - /// Fetch all nodes via the message bus - async fn fetch_nodes() -> Result, SvcError> - where - Self: Sized, - { - MessageBus::get_nodes().await.context(BusGetNodes {}) - } - - /// Get the internal id - fn id(&self) -> NodeId; - /// Get the internal node - fn node(&self) -> Node; - /// Get the internal pools - fn pools(&self) -> Vec; - /// Get the internal pools wrapper - fn pools_wrapper(&self) -> Vec; - /// Get the internal replicas - fn replicas(&self) -> Vec; - - /// Check if the node is online - fn is_online(&self) -> bool; - /// Fallible Result used by operations that should only proceed with the - /// node online - fn online_only(&self) -> Result<(), SvcError> { - if !self.is_online() { - Err(SvcError::NodeNotOnline { - node: self.node().id, - }) - } else { - Ok(()) - } - } - - /// Update this node with the latest information from the message bus and - /// mayastor - async fn update(&mut self); - /// Set the node state - fn set_state(&mut self, state: NodeState); - - /// Get the gRPC context with the mayastor proto handle - async fn grpc_client(&self) -> Result { - self.online_only()?; - GrpcContext::new(self.node().grpc_endpoint.clone()).await - } -} -/// Handy Boxed NodeWrapperTrait -pub type NodeWrapper = Box; - -/// Wrapper over the message bus Pools -/// With the respective node and pool replicas -#[derive(Clone, Debug, Default, Eq, PartialEq)] -pub struct PoolWrapper { - pool: Pool, - replicas: Vec, -} - -impl PoolWrapper { - /// New Pool wrapper with the pool and replicas - pub fn new_from(pool: &Pool, replicas: &[Replica]) -> Self { - Self { - pool: pool.clone(), - replicas: replicas.into(), - } - } - - /// Get the internal pool - pub fn pool(&self) -> Pool { - self.pool.clone() - } - /// Get the pool uuid - pub fn uuid(&self) -> PoolId { - self.pool.id.clone() - } - /// Get the pool node name - pub fn node(&self) -> NodeId { - self.pool.node.clone() - } - /// Get the pool state - pub fn state(&self) -> PoolState { - self.pool.state.clone() - } - - /// Get the free space - pub fn free_space(&self) -> u64 { - if self.pool.capacity >= self.pool.used { - self.pool.capacity - self.pool.used - } else { - // odd, let's report no free space available - tracing::error!( - "Pool '{}' has a capacity of '{} B' but is using '{} B'", - self.pool.id, - self.pool.capacity, - self.pool.used - ); - 0 - } - } - - /// Set pool state as unknown - pub fn set_unknown(&mut self) { - self.pool.state = PoolState::Unknown; - } - - /// Get all replicas from this pool - pub fn replicas(&self) -> Vec { - self.replicas.clone() - } - - /// Add replica to list - pub fn added_replica(&mut self, replica: &Replica) { - self.replicas.push(replica.clone()) - } - /// Remove replica from list - pub fn removed_replica(&mut self, uuid: &ReplicaId) { - self.replicas.retain(|replica| &replica.uuid != uuid) - } - /// update replica from list - pub fn updated_replica( - &mut self, - uuid: &ReplicaId, - share: &Protocol, - uri: &str, - ) { - if let Some(replica) = self - .replicas - .iter_mut() - .find(|replica| &replica.uuid == uuid) - { - replica.share = share.clone(); - replica.uri = uri.to_string(); - } - } -} - -// 1. state ( online > degraded ) -// 2. smaller n replicas -// (here we should have pool IO stats over time so we can pick less active -// pools rather than the number of replicas which is useless if the volumes -// are not active) -impl PartialOrd for PoolWrapper { - fn partial_cmp(&self, other: &Self) -> Option { - match self.pool.state.partial_cmp(&other.pool.state) { - Some(Ordering::Greater) => Some(Ordering::Greater), - Some(Ordering::Less) => Some(Ordering::Less), - Some(Ordering::Equal) => { - match self.replicas.len().cmp(&other.replicas.len()) { - Ordering::Greater => Some(Ordering::Greater), - Ordering::Less => Some(Ordering::Less), - Ordering::Equal => { - Some(self.free_space().cmp(&other.free_space())) - } - } - } - None => None, - } - } -} - -impl Ord for PoolWrapper { - fn cmp(&self, other: &Self) -> Ordering { - match self.pool.state.partial_cmp(&other.pool.state) { - Some(Ordering::Greater) => Ordering::Greater, - Some(Ordering::Less) => Ordering::Less, - Some(Ordering::Equal) => { - match self.replicas.len().cmp(&other.replicas.len()) { - Ordering::Greater => Ordering::Greater, - Ordering::Less => Ordering::Less, - Ordering::Equal => { - self.free_space().cmp(&other.free_space()) - } - } - } - None => Ordering::Equal, - } - } -} diff --git a/control-plane/agents/common/src/wrapper/v0/pool.rs b/control-plane/agents/common/src/wrapper/v0/pool.rs deleted file mode 100644 index 4165efa89..000000000 --- a/control-plane/agents/common/src/wrapper/v0/pool.rs +++ /dev/null @@ -1,298 +0,0 @@ -use super::{node_traits::*, *}; -use crate::wrapper::v0::msg_translation::{MessageBusToRpc, RpcToMessageBus}; - -/// Implementation of the trait NodeWrapperPool for the pool service -#[derive(Debug, Default, Clone)] -pub struct NodeWrapperPool { - node: Node, - pools: HashMap, -} - -#[async_trait] -impl NodePoolTrait for NodeWrapperPool { - /// Fetch all pools from this node via gRPC - async fn fetch_pools(&self) -> Result, SvcError> { - let mut ctx = self.grpc_client().await?; - let rpc_pools = ctx - .client - .list_pools(Null {}) - .await - .context(GrpcListPools {})?; - let rpc_pools = &rpc_pools.get_ref().pools; - let pools = rpc_pools - .iter() - .map(|p| rpc_pool_to_bus(p, self.node.id.clone())) - .collect(); - Ok(pools) - } - - /// Create a pool on the node via gRPC - async fn create_pool( - &self, - request: &CreatePool, - ) -> Result { - let mut ctx = self.grpc_client().await?; - let rpc_pool = ctx - .client - .create_pool(request.to_rpc()) - .await - .context(GrpcCreatePool {})?; - - Ok(rpc_pool_to_bus(&rpc_pool.into_inner(), self.id())) - } - - /// Destroy a pool on the node via gRPC - async fn destroy_pool( - &self, - request: &DestroyPool, - ) -> Result<(), SvcError> { - let mut ctx = self.grpc_client().await?; - let _ = ctx - .client - .destroy_pool(request.to_rpc()) - .await - .context(GrpcDestroyPool {})?; - - Ok(()) - } - - async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]) { - self.pools - .insert(pool.id.clone(), PoolWrapper::new_from(&pool, replicas)); - } - - fn on_destroy_pool(&mut self, pool: &PoolId) { - self.pools.remove(pool); - } -} - -#[async_trait] -impl NodeReplicaTrait for NodeWrapperPool { - /// Fetch all replicas from this node via gRPC - async fn fetch_replicas(&self) -> Result, SvcError> { - let mut ctx = self.grpc_client().await?; - let rpc_pools = ctx - .client - .list_replicas(Null {}) - .await - .context(GrpcListPools {})?; - let rpc_pools = &rpc_pools.get_ref().replicas; - let pools = rpc_pools - .iter() - .map(|p| rpc_replica_to_bus(p, self.node.id.clone())) - .collect(); - Ok(pools) - } - - /// Create a replica on the pool via gRPC - async fn create_replica( - &self, - request: &CreateReplica, - ) -> Result { - let mut ctx = self.grpc_client().await?; - let rpc_replica = ctx - .client - .create_replica(request.to_rpc()) - .await - .context(GrpcCreateReplica {})?; - - Ok(rpc_replica_to_bus(&rpc_replica.into_inner(), self.id())) - } - - /// Share a replica on the pool via gRPC - async fn share_replica( - &self, - request: &ShareReplica, - ) -> Result { - let mut ctx = self.grpc_client().await?; - let share = ctx - .client - .share_replica(request.to_rpc()) - .await - .context(GrpcShareReplica {})?; - - Ok(share.into_inner().uri) - } - - /// Unshare a replica on the pool via gRPC - async fn unshare_replica( - &self, - request: &UnshareReplica, - ) -> Result<(), SvcError> { - let mut ctx = self.grpc_client().await?; - let _ = ctx - .client - .share_replica(request.to_rpc()) - .await - .context(GrpcUnshareReplica {})?; - - Ok(()) - } - - /// Destroy a replica on the pool via gRPC - async fn destroy_replica( - &self, - request: &DestroyReplica, - ) -> Result<(), SvcError> { - let mut ctx = self.grpc_client().await?; - let _ = ctx - .client - .destroy_replica(request.to_rpc()) - .await - .context(GrpcDestroyReplica {})?; - - Ok(()) - } - - fn on_create_replica(&mut self, replica: &Replica) { - if let Some(pool) = self.pools.get_mut(&replica.pool) { - pool.added_replica(replica); - } - } - - fn on_destroy_replica(&mut self, pool: &PoolId, replica: &ReplicaId) { - if let Some(pool) = self.pools.get_mut(pool) { - pool.removed_replica(replica) - } - } - - fn on_update_replica( - &mut self, - pool: &PoolId, - replica: &ReplicaId, - share: &Protocol, - uri: &str, - ) { - if let Some(pool) = self.pools.get_mut(pool) { - pool.updated_replica(replica, share, uri); - } - } -} - -#[async_trait] -impl NodeWrapperTrait for NodeWrapperPool { - async fn new(node: &NodeId) -> Result { - Ok(Box::new(Self::new_wrapper(node).await?)) - } - - fn id(&self) -> NodeId { - self.node.id.clone() - } - fn node(&self) -> Node { - self.node.clone() - } - fn pools(&self) -> Vec { - self.pools.values().map(|p| p.pool()).collect() - } - fn pools_wrapper(&self) -> Vec { - self.pools.values().cloned().collect() - } - fn replicas(&self) -> Vec { - self.pools - .values() - .map(|p| p.replicas()) - .flatten() - .collect() - } - fn is_online(&self) -> bool { - self.node.state == NodeState::Online - } - - async fn update(&mut self) { - match Self::new_wrapper(&self.node.id).await { - Ok(node) => { - let old_state = self.node.state.clone(); - *self = node; - if old_state != self.node.state { - tracing::error!( - "Node '{}' changed state from '{}' to '{}'", - self.node.id, - old_state.to_string(), - self.node.state.to_string() - ) - } - } - Err(error) => { - tracing::error!( - "Failed to update the node '{}', error: {}", - self.node.id, - error - ); - self.set_state(NodeState::Unknown); - } - } - } - fn set_state(&mut self, state: NodeState) { - if self.node.state != state { - tracing::info!( - "Node '{}' state is now {}", - self.node.id, - state.to_string() - ); - self.node.state = state; - for (_, pool) in self.pools.iter_mut() { - pool.set_unknown(); - } - } - } -} - -impl NodeWrapperPool { - /// Fetch node via the message bus - async fn fetch_node(node: &NodeId) -> Result { - MessageBus::get_node(node).await.context(BusGetNode { - node, - }) - } - - /// New node wrapper for the pool service containing - /// a list of pools and replicas - async fn new_wrapper(node: &NodeId) -> Result { - let mut node = Self { - // if we can't even fetch the node, then no point in proceeding - node: NodeWrapperPool::fetch_node(node).await?, - ..Default::default() - }; - - // if the node is not online, don't even bother trying to connect - if node.is_online() { - let pools = node.fetch_pools().await?; - let replicas = node.fetch_replicas().await?; - - for pool in &pools { - let replicas = replicas - .iter() - .filter(|r| r.pool == pool.id) - .cloned() - .collect::>(); - node.on_create_pool(pool, &replicas).await; - } - } - // we've got a node, but we might not have the full picture if it's - // offline - Ok(node) - } -} - -impl_no_nexus_child!(NodeWrapperPool); -impl_no_nexus!(NodeWrapperPool); - -/// Helper methods to convert between the message bus types and the -/// mayastor gRPC types - -/// convert rpc pool to a message bus pool -fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: NodeId) -> Pool { - let mut pool = rpc_pool.to_mbus(); - pool.node = id; - pool -} - -/// convert rpc replica to a message bus replica -fn rpc_replica_to_bus( - rpc_replica: &rpc::mayastor::Replica, - id: NodeId, -) -> Replica { - let mut replica = rpc_replica.to_mbus(); - replica.node = id; - replica -} diff --git a/control-plane/agents/common/src/wrapper/v0/registry.rs b/control-plane/agents/common/src/wrapper/v0/registry.rs deleted file mode 100644 index 2ebda3e43..000000000 --- a/control-plane/agents/common/src/wrapper/v0/registry.rs +++ /dev/null @@ -1,533 +0,0 @@ -use super::{node_traits::*, *}; - -/// When operating on a resource which is not found, determines whether to -/// Ignore/Fail the operation or try and fetch the latest version, if possible -#[derive(Clone, Debug, Eq, PartialEq)] -enum NotFoundPolicy { - #[allow(dead_code)] - Ignore, - Fetch, -} - -impl Default for NotFoundPolicy { - fn default() -> Self { - NotFoundPolicy::Fetch - } -} - -/// Registry with NodeWrapperTrait which allows us to get the resources either -/// via gRPC or message bus in a service specific way. -/// Event propagation from mayastor/services would be useful to avoid thrashing -/// mayastor instances with gRPC and services with message bus requests. For now -/// we update the the registry: -/// every `N` seconds as it queries the node service -/// for changes for every request that reaches the instances, it updates itself -/// with the result. -/// `T` is the specific type of the NodeWrapperTrait which allocates Node helper -/// Wrappers. -/// List operations list what the object has been built with or what the cache -/// has. Fetch operations make use of the node wrapper trait to fetch from -/// mayastor nodes/other services. -#[derive(Clone, Default, Debug)] -pub struct Registry { - nodes: Arc>>, - update_period: std::time::Duration, - not_found: NotFoundPolicy, - _t: PhantomData, -} - -impl Registry { - /// Create a new registry with the `period` for updates - pub fn new(period: std::time::Duration) -> Self { - Self { - update_period: period, - ..Default::default() - } - } - /// Start thread which updates the registry - pub fn start(&self) { - let registry = self.clone(); - tokio::spawn(async move { - registry.poller().await; - }); - } - - /// List all cached node wrappers - async fn list_nodes_wrapper(&self) -> Vec { - let nodes = self.nodes.lock().await; - nodes.values().cloned().collect() - } - - /// List all cached nodes - pub async fn list_nodes(&self) -> Vec { - let nodes = self.list_nodes_wrapper().await; - nodes.iter().map(|n| n.node()).collect() - } - - /// List all cached pool wrappers - pub async fn list_pools_wrapper(&self) -> Vec { - let nodes = self.nodes.lock().await; - nodes - .values() - .map(|node| node.pools_wrapper()) - .flatten() - .collect() - } - - /// Fetch all pools wrapper - pub async fn fetch_pools_wrapper(&self) -> Vec { - match T::fetch_nodes().await { - Ok(mut nodes) => { - for node in &mut nodes { - self.found_node(node).await; - } - } - Err(error) => { - tracing::error!( - "Failed to fetch the latest node information, '{}'", - error - ); - } - }; - - self.list_pools_wrapper().await - } - - /// List all cached pools - pub async fn list_pools(&self) -> Vec { - let nodes = self.nodes.lock().await; - nodes.values().map(|node| node.pools()).flatten().collect() - } - - /// List all cached pools from node - pub async fn list_node_pools(&self, node: &NodeId) -> Vec { - let nodes = self.list_nodes_wrapper().await; - if let Some(node) = nodes.iter().find(|&n| &n.id() == node) { - node.pools() - } else { - // or return error, node not found? - vec![] - } - } - - /// List all cached replicas - pub async fn list_replicas(&self) -> Vec { - let nodes = self.nodes.lock().await; - nodes - .values() - .map(|node| node.replicas()) - .flatten() - .collect() - } - - /// List all cached replicas from node - pub async fn list_node_replicas(&self, node: &NodeId) -> Vec { - let nodes = self.list_nodes_wrapper().await; - if let Some(node) = nodes.iter().find(|&n| &n.id() == node) { - node.replicas() - } else { - // or return error, node not found? - vec![] - } - } - - /// Create pool - pub async fn create_pool( - &self, - request: &CreatePool, - ) -> Result { - let pool = self - .get_node(&request.node) - .await? - .create_pool(request) - .await?; - self.on_pool_created(&pool).await; - Ok(pool) - } - - /// Get current list of known nodes - async fn get_known_nodes(&self, node_id: &NodeId) -> Option { - let nodes = self.nodes.lock().await; - nodes.get(node_id).cloned() - } - /// Get node `node_id` - async fn get_node( - &self, - node_id: &NodeId, - ) -> Result { - let mut nodes = self.nodes.lock().await; - let node = match nodes.get(node_id) { - Some(node) => node.clone(), - None => { - if self.not_found == NotFoundPolicy::Fetch { - let node = T::new(node_id).await; - if let Ok(node) = node { - nodes.insert(node.id(), node.clone()); - node - } else { - return Err(SvcError::BusNodeNotFound { - node_id: node_id.into(), - }); - } - } else { - return Err(SvcError::BusNodeNotFound { - node_id: node_id.into(), - }); - } - } - }; - Ok(node) - } - /// Registry events on crud operations - async fn on_pool_created(&self, pool: &Pool) { - if let Ok(node) = self.get_node(&pool.node).await { - // most likely no replicas, but in case it's an "import" - // let's go ahead and fetch them - let replicas = node.fetch_replicas().await.unwrap_or_default(); - { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&pool.node); - if let Some(node) = node { - node.on_create_pool(pool, &replicas).await; - } - } - } - } - async fn on_pool_destroyed(&self, request: &DestroyPool) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&request.node); - if let Some(node) = node { - node.on_destroy_pool(&request.id) - } - } - async fn on_replica_added(&self, replica: &Replica) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&replica.node); - if let Some(node) = node { - node.on_create_replica(replica); - } - } - async fn on_replica_removed(&self, request: &DestroyReplica) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&request.node); - if let Some(node) = node { - node.on_destroy_replica(&request.pool, &request.uuid); - } - } - async fn reg_update_replica( - &self, - node: &NodeId, - pool: &PoolId, - id: &ReplicaId, - share: &Protocol, - uri: &str, - ) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(node); - if let Some(node) = node { - node.on_update_replica(pool, id, share, uri); - } - } - - /// Destroy pool and update registry - pub async fn destroy_pool( - &self, - request: &DestroyPool, - ) -> Result<(), SvcError> { - let node = self.get_node(&request.node).await?; - node.destroy_pool(&request).await?; - self.on_pool_destroyed(&request).await; - Ok(()) - } - - /// Create replica and update registry - pub async fn create_replica( - &self, - request: &CreateReplica, - ) -> Result { - let node = self.get_node(&request.node).await?; - let replica = node.create_replica(&request).await?; - self.on_replica_added(&replica).await; - Ok(replica) - } - - /// Destroy replica and update registry - pub async fn destroy_replica( - &self, - request: &DestroyReplica, - ) -> Result<(), SvcError> { - let node = self.get_node(&request.node).await?; - node.destroy_replica(request).await?; - self.on_replica_removed(request).await; - Ok(()) - } - - /// Share replica and update registry - pub async fn share_replica( - &self, - request: &ShareReplica, - ) -> Result { - let node = self.get_node(&request.node).await?; - let share = node.share_replica(request).await?; - self.reg_update_replica( - &request.node, - &request.pool, - &request.uuid, - &request.protocol, - &share, - ) - .await; - Ok(share) - } - - /// Unshare replica and update registry - pub async fn unshare_replica( - &self, - request: &UnshareReplica, - ) -> Result<(), SvcError> { - let node = self.get_node(&request.node).await?; - node.unshare_replica(request).await?; - self.reg_update_replica( - &request.node, - &request.pool, - &request.uuid, - &Protocol::Off, - "", - ) - .await; - Ok(()) - } - - async fn on_create_nexus(&self, nexus: &Nexus) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&nexus.node); - if let Some(node) = node { - node.on_create_nexus(nexus); - } - } - async fn on_destroy_nexus(&self, request: &DestroyNexus) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&request.node); - if let Some(node) = node { - node.on_destroy_nexus(&request.uuid); - } - } - async fn on_add_nexus_child( - &self, - node: &NodeId, - nexus: &NexusId, - child: &Child, - ) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(node); - if let Some(node) = node { - node.on_add_child(nexus, child); - } - } - async fn on_remove_nexus_child(&self, request: &RemoveNexusChild) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(&request.node); - if let Some(node) = node { - node.on_remove_child(request); - } - } - async fn on_update_nexus(&self, node: &NodeId, nexus: &NexusId, uri: &str) { - let mut nodes = self.nodes.lock().await; - let node = nodes.get_mut(node); - if let Some(node) = node { - node.on_update_nexus(nexus, uri); - } - } - - /// List all cached nexuses - pub async fn list_nexuses(&self) -> Vec { - let nodes = self.nodes.lock().await; - nodes - .values() - .map(|node| node.nexuses()) - .flatten() - .collect() - } - - /// List all cached nexuses from node - pub async fn list_node_nexuses(&self, node: &NodeId) -> Vec { - let nodes = self.list_nodes_wrapper().await; - if let Some(node) = nodes.iter().find(|&n| &n.id() == node) { - node.nexuses() - } else { - // hmm, or return error, node not found? - vec![] - } - } - - /// Create nexus - pub async fn create_nexus( - &self, - request: &CreateNexus, - ) -> Result { - let node = self.get_node(&request.node).await?; - let nexus = node.create_nexus(request).await?; - self.on_create_nexus(&nexus).await; - Ok(nexus) - } - - /// Destroy nexus - pub async fn destroy_nexus( - &self, - request: &DestroyNexus, - ) -> Result<(), SvcError> { - let node = self.get_node(&request.node).await?; - node.destroy_nexus(request).await?; - self.on_destroy_nexus(request).await; - Ok(()) - } - - /// Share nexus - pub async fn share_nexus( - &self, - request: &ShareNexus, - ) -> Result { - let node = self.get_node(&request.node).await?; - let share = node.share_nexus(request).await?; - self.on_update_nexus(&request.node, &request.uuid, &share) - .await; - Ok(share) - } - - /// Unshare nexus - pub async fn unshare_nexus( - &self, - request: &UnshareNexus, - ) -> Result<(), SvcError> { - let node = self.get_node(&request.node).await?; - node.unshare_nexus(request).await?; - self.on_update_nexus(&request.node, &request.uuid, "").await; - Ok(()) - } - - /// Add nexus child - pub async fn add_nexus_child( - &self, - request: &AddNexusChild, - ) -> Result { - let node = self.get_node(&request.node).await?; - let child = node.add_child(request).await?; - self.on_add_nexus_child(&request.node, &request.nexus, &child) - .await; - Ok(child) - } - - /// Remove nexus child - pub async fn remove_nexus_child( - &self, - request: &RemoveNexusChild, - ) -> Result<(), SvcError> { - let node = self.get_node(&request.node).await?; - node.remove_child(request).await?; - self.on_remove_nexus_child(request).await; - Ok(()) - } - - /// Found this node via the node service - /// Update its resource list or add it to the registry if not there yet - async fn found_node(&self, node: &Node) { - match &node.state { - NodeState::Online => { - self.add_or_update_node(node).await; - } - state => { - // if not online, then only update the node state if it already - // exists in the registry, and don't even try to - // add it - let mut registry = self.nodes.lock().await; - if let Some((_, existing_node)) = - registry.iter_mut().find(|(id, _)| id == &&node.id) - { - existing_node.set_state(state.clone()); - } - } - } - } - - /// Mark nodes as missing if they are no longer discoverable by the node - /// service - async fn mark_missing_nodes(&self, live_nodes: &[Node]) { - let mut registry = self.nodes.lock().await; - for (name, node) in registry.iter_mut() { - let found = live_nodes.iter().find(|n| &n.id == name); - // if a node from the registry is not found then mark it as missing - if found.is_none() { - node.set_state(NodeState::Unknown); - } - } - } - - /// Update node from the registry - async fn update_node(&self, mut node: NodeWrapper) { - // update all resources from the node: nexus, pools, etc... - // note this is done this way to avoid holding the lock whilst - // we're doing gRPC requests - node.update().await; - let mut registry = self.nodes.lock().await; - registry.insert(node.id(), node.clone()); - } - - /// Add new node to the registry - async fn add_node(&self, node: &Node) { - match T::new(&node.id).await { - Ok(node) => { - let mut registry = self.nodes.lock().await; - registry.insert(node.id(), node.clone()); - } - Err(error) => { - tracing::error!( - "Error when adding node '{}': {}", - node.id, - error - ); - } - } - } - - /// Add or update a node (depending on whether the registry it's already in - /// the registry or not) - async fn add_or_update_node(&self, node: &Node) { - let existing_node = self.get_known_nodes(&node.id).await; - if let Some(node) = existing_node { - self.update_node(node).await; - } else { - self.add_node(node).await; - } - } - - /// Poll the node service for the current nodes it knows about - /// and update our view of their resources by querying the specific - /// mayastor instances themselves - async fn poller(&self) { - loop { - // collect all the nodes from the node service and then collect - // all the nexus and pool information from the nodes themselves - // (depending on the specific trait implementations of T) - let found_nodes = T::fetch_nodes().await; - if let Ok(found_nodes) = found_nodes { - self.mark_missing_nodes(&found_nodes).await; - - for node in &found_nodes { - // todo: add "last seen online" kind of thing to the node to - // avoid retrying to connect to a crashed/missed node over - // and over again when the node service - // is not aware of this yet. - self.found_node(node).await; - } - } - - self.trace_all().await; - tokio::time::delay_for(self.update_period).await; - } - } - - async fn trace_all(&self) { - let registry = self.nodes.lock().await; - tracing::trace!("Registry update: {:?}", registry); - } -} diff --git a/control-plane/agents/common/src/wrapper/v0/volume.rs b/control-plane/agents/common/src/wrapper/v0/volume.rs deleted file mode 100644 index 646ced2da..000000000 --- a/control-plane/agents/common/src/wrapper/v0/volume.rs +++ /dev/null @@ -1,375 +0,0 @@ -use super::{node_traits::*, *}; -use crate::wrapper::v0::msg_translation::{MessageBusToRpc, RpcToMessageBus}; -use mbus_api::Message; - -/// Implementation of the trait NodeWrapperVolume for the pool service -#[derive(Debug, Default, Clone)] -pub struct NodeWrapperVolume { - node: Node, - pools: HashMap, - nexuses: HashMap, -} - -#[async_trait] -impl NodePoolTrait for NodeWrapperVolume { - /// Fetch all pools from this node via MBUS - async fn fetch_pools(&self) -> Result, SvcError> { - MessageBus::get_pools(Filter::Node(self.id())) - .await - .context(BusGetNodes {}) - } - - /// Create a pool on the node via gRPC - async fn create_pool( - &self, - request: &CreatePool, - ) -> Result { - request.request().await.context(BusCreatePool {}) - } - - /// Destroy a pool on the node via gRPC - async fn destroy_pool( - &self, - request: &DestroyPool, - ) -> Result<(), SvcError> { - request.request().await.context(BusCreatePool {}) - } - - async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]) { - self.pools - .insert(pool.id.clone(), PoolWrapper::new_from(&pool, replicas)); - } - - fn on_destroy_pool(&mut self, pool: &PoolId) { - self.pools.remove(pool); - } -} - -#[async_trait] -impl NodeReplicaTrait for NodeWrapperVolume { - /// Fetch all replicas from this node via gRPC - async fn fetch_replicas(&self) -> Result, SvcError> { - GetReplicas { - filter: Filter::Node(self.id()), - } - .request() - .await - .context(BusGetReplicas {}) - .map(|r| r.0) - } - - /// Create a replica on the pool via gRPC - async fn create_replica( - &self, - request: &CreateReplica, - ) -> Result { - request.request().await.context(BusGetReplicas {}) - } - - /// Share a replica on the pool via gRPC - async fn share_replica( - &self, - request: &ShareReplica, - ) -> Result { - request.request().await.context(BusGetReplicas {}) - } - - /// Unshare a replica on the pool via gRPC - async fn unshare_replica( - &self, - request: &UnshareReplica, - ) -> Result<(), SvcError> { - request.request().await.context(BusGetReplicas {}) - } - - /// Destroy a replica on the pool via gRPC - async fn destroy_replica( - &self, - request: &DestroyReplica, - ) -> Result<(), SvcError> { - request.request().await.context(BusGetReplicas {}) - } - - fn on_create_replica(&mut self, replica: &Replica) { - if let Some(pool) = self.pools.get_mut(&replica.pool) { - pool.added_replica(replica); - } - } - - fn on_destroy_replica(&mut self, pool: &PoolId, replica: &ReplicaId) { - if let Some(pool) = self.pools.get_mut(pool) { - pool.removed_replica(replica) - } - } - - fn on_update_replica( - &mut self, - pool: &PoolId, - replica: &ReplicaId, - share: &Protocol, - uri: &str, - ) { - if let Some(pool) = self.pools.get_mut(pool) { - pool.updated_replica(replica, share, uri); - } - } -} - -#[async_trait] -impl NodeNexusTrait for NodeWrapperVolume { - fn nexuses(&self) -> Vec { - self.nexuses.values().cloned().collect() - } - - /// Fetch all nexuses from the node via gRPC - async fn fetch_nexuses(&self) -> Result, SvcError> { - let mut ctx = self.grpc_client().await?; - let rpc_nexuses = ctx - .client - .list_nexus(Null {}) - .await - .context(GrpcListNexuses {})?; - let rpc_nexuses = &rpc_nexuses.get_ref().nexus_list; - let nexuses = rpc_nexuses - .iter() - .map(|n| rpc_nexus_to_bus(n, self.node.id.clone())) - .collect(); - Ok(nexuses) - } - - /// Create a nexus on the node via gRPC - async fn create_nexus( - &self, - request: &CreateNexus, - ) -> Result { - let mut ctx = self.grpc_client().await?; - let rpc_nexus = ctx - .client - .create_nexus(request.to_rpc()) - .await - .context(GrpcCreateNexus {})?; - Ok(rpc_nexus_to_bus( - &rpc_nexus.into_inner(), - self.node.id.clone(), - )) - } - - /// Destroy a nexus on the node via gRPC - async fn destroy_nexus( - &self, - request: &DestroyNexus, - ) -> Result<(), SvcError> { - let mut ctx = self.grpc_client().await?; - let _ = ctx - .client - .destroy_nexus(request.to_rpc()) - .await - .context(GrpcDestroyNexus {})?; - Ok(()) - } - - /// Share a nexus on the node via gRPC - async fn share_nexus( - &self, - request: &ShareNexus, - ) -> Result { - let mut ctx = self.grpc_client().await?; - let share = ctx - .client - .publish_nexus(request.to_rpc()) - .await - .context(GrpcShareNexus {})?; - Ok(share.into_inner().device_uri) - } - - /// Unshare a nexus on the node via gRPC - async fn unshare_nexus( - &self, - request: &UnshareNexus, - ) -> Result<(), SvcError> { - let mut ctx = self.grpc_client().await?; - let _ = ctx - .client - .unpublish_nexus(request.to_rpc()) - .await - .context(GrpcUnshareNexus {})?; - Ok(()) - } - - fn on_create_nexus(&mut self, nexus: &Nexus) { - self.nexuses.insert(nexus.uuid.clone(), nexus.clone()); - } - - fn on_update_nexus(&mut self, nexus: &NexusId, uri: &str) { - if let Some(nexus) = self.nexuses.get_mut(nexus) { - nexus.device_uri = uri.to_string(); - } - } - - fn on_destroy_nexus(&mut self, nexus: &NexusId) { - self.nexuses.remove(nexus); - } -} - -#[async_trait] -impl NodeNexusChildTrait for NodeWrapperVolume { - async fn fetch_children(&self) -> Result, SvcError> { - unimplemented!() - } - - /// Add a child to a nexus via gRPC - async fn add_child( - &self, - request: &AddNexusChild, - ) -> Result { - let mut ctx = self.grpc_client().await?; - let rpc_child = ctx - .client - .add_child_nexus(request.to_rpc()) - .await - .context(GrpcDestroyNexus {})?; - Ok(rpc_child.into_inner().to_mbus()) - } - - /// Remove a child from its parent nexus via gRPC - async fn remove_child( - &self, - request: &RemoveNexusChild, - ) -> Result<(), SvcError> { - let mut ctx = self.grpc_client().await?; - let _ = ctx - .client - .remove_child_nexus(request.to_rpc()) - .await - .context(GrpcDestroyNexus {})?; - Ok(()) - } - - fn on_add_child(&mut self, nexus: &NexusId, child: &Child) { - if let Some(nexus) = self.nexuses.get_mut(nexus) { - nexus.children.push(child.clone()); - } - } - - fn on_remove_child(&mut self, request: &RemoveNexusChild) { - if let Some(nexus) = self.nexuses.get_mut(&request.nexus) { - nexus.children.retain(|replica| replica.uri != request.uri) - } - } -} - -#[async_trait] -impl NodeWrapperTrait for NodeWrapperVolume { - async fn new(node: &NodeId) -> Result { - Ok(Box::new(Self::new_wrapper(node).await?)) - } - - fn id(&self) -> NodeId { - self.node.id.clone() - } - fn node(&self) -> Node { - self.node.clone() - } - fn pools(&self) -> Vec { - self.pools.values().map(|p| p.pool()).collect() - } - fn pools_wrapper(&self) -> Vec { - self.pools.values().cloned().collect() - } - fn replicas(&self) -> Vec { - self.pools - .values() - .map(|p| p.replicas()) - .flatten() - .collect() - } - fn is_online(&self) -> bool { - self.node.state == NodeState::Online - } - - async fn update(&mut self) { - match Self::new_wrapper(&self.node.id).await { - Ok(node) => { - let old_state = self.node.state.clone(); - *self = node; - if old_state != self.node.state { - tracing::error!( - "Node '{}' changed state from '{}' to '{}'", - self.node.id, - old_state.to_string(), - self.node.state.to_string() - ) - } - } - Err(error) => { - tracing::error!( - "Failed to update the node '{}', error: {}", - self.node.id, - error - ); - self.set_state(NodeState::Unknown); - } - } - } - fn set_state(&mut self, state: NodeState) { - if self.node.state != state { - tracing::info!( - "Node '{}' state is now {}", - self.node.id, - state.to_string() - ); - self.node.state = state; - for (_, pool) in self.pools.iter_mut() { - pool.set_unknown(); - } - } - } -} - -impl NodeWrapperVolume { - /// Fetch node via the message bus - async fn fetch_node(node: &NodeId) -> Result { - MessageBus::get_node(node).await.context(BusGetNode { - node, - }) - } - - /// New node wrapper for the pool service containing - /// a list of pools and replicas - async fn new_wrapper(node: &NodeId) -> Result { - let mut node = Self { - // if we can't even fetch the node, then no point in proceeding - node: NodeWrapperVolume::fetch_node(node).await?, - ..Default::default() - }; - - // if the node is not online, don't even bother trying to connect - if node.is_online() { - let pools = node.fetch_pools().await?; - let replicas = node.fetch_replicas().await?; - let nexuses = node.fetch_nexuses().await?; - - for pool in &pools { - let replicas = replicas - .iter() - .filter(|r| r.pool == pool.id) - .cloned() - .collect::>(); - node.on_create_pool(pool, &replicas).await; - } - - for nexus in &nexuses { - node.on_create_nexus(nexus); - } - } - // we've got a node, but we might not have the full picture if it's - // offline - Ok(node) - } -} - -fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: NodeId) -> Nexus { - let mut nexus = rpc_nexus.to_mbus(); - nexus.node = id; - nexus -} diff --git a/control-plane/agents/core/src/core/grpc.rs b/control-plane/agents/core/src/core/grpc.rs new file mode 100644 index 000000000..cb3f088e3 --- /dev/null +++ b/control-plane/agents/core/src/core/grpc.rs @@ -0,0 +1,115 @@ +use common::errors::{GrpcConnect, GrpcConnectUri, SvcError}; +use mbus_api::v0::NodeId; +use rpc::mayastor::mayastor_client::MayastorClient; +use snafu::ResultExt; +use std::{ + ops::{Deref, DerefMut}, + str::FromStr, + sync::Arc, +}; +use tonic::transport::Channel; + +/// Context with a gRPC client and a lock to serialize mutating gRPC calls +#[derive(Clone)] +pub(crate) struct GrpcContext { + /// gRPC CRUD lock + lock: Arc>, + /// node identifier + node: NodeId, + /// gRPC URI endpoint + endpoint: tonic::transport::Endpoint, +} + +impl GrpcContext { + pub(crate) fn new( + lock: Arc>, + node: &NodeId, + endpoint: &str, + ) -> Result { + let uri = format!("http://{}", endpoint); + let uri = http::uri::Uri::from_str(&uri).context(GrpcConnectUri { + node_id: node.to_string(), + uri: uri.clone(), + })?; + let endpoint = tonic::transport::Endpoint::from(uri) + .timeout(std::time::Duration::from_secs(5)); + + Ok(Self { + node: node.clone(), + lock, + endpoint, + }) + } + pub(crate) async fn lock(&self) -> tokio::sync::OwnedMutexGuard<()> { + self.lock.clone().lock_owned().await + } + pub(crate) async fn connect(&self) -> Result { + GrpcClient::new(self).await + } + pub(crate) async fn connect_locked( + &self, + ) -> Result { + GrpcClientLocked::new(self).await + } +} + +/// Wrapper over all gRPC Clients types +#[derive(Clone)] +pub(crate) struct GrpcClient { + context: GrpcContext, + /// gRPC Mayastor Client + pub(crate) client: MayaClient, +} +pub(crate) type MayaClient = MayastorClient; +impl GrpcClient { + pub(crate) async fn new(context: &GrpcContext) -> Result { + let client = match tokio::time::timeout( + std::time::Duration::from_secs(1), + MayaClient::connect(context.endpoint.clone()), + ) + .await + { + Err(_) => Err(SvcError::GrpcConnectTimeout { + node_id: context.node.to_string(), + endpoint: format!("{:?}", context.endpoint), + timeout: std::time::Duration::from_secs(1), + }), + Ok(client) => Ok(client.context(GrpcConnect)?), + }?; + + Ok(Self { + context: context.clone(), + client, + }) + } +} + +/// Wrapper over all gRPC Clients types with implicit locking for serialization +pub(crate) struct GrpcClientLocked { + /// gRPC auto CRUD guard lock + _lock: tokio::sync::OwnedMutexGuard<()>, + client: GrpcClient, +} +impl GrpcClientLocked { + pub(crate) async fn new(context: &GrpcContext) -> Result { + let client = GrpcClient::new(context).await?; + + Ok(Self { + _lock: context.lock().await, + client, + }) + } +} + +impl Deref for GrpcClientLocked { + type Target = GrpcClient; + + fn deref(&self) -> &Self::Target { + &self.client + } +} +impl DerefMut for GrpcClientLocked { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.client + } +} diff --git a/control-plane/agents/core/src/core/mod.rs b/control-plane/agents/core/src/core/mod.rs new file mode 100644 index 000000000..13095e0a6 --- /dev/null +++ b/control-plane/agents/core/src/core/mod.rs @@ -0,0 +1,8 @@ +//! Common modules used by the different core services + +/// gRPC helpers +pub mod grpc; +/// registry with node and all its resources +pub mod registry; +/// helper wrappers over the resources +pub mod wrapper; diff --git a/control-plane/agents/core/src/core/registry.rs b/control-plane/agents/core/src/core/registry.rs new file mode 100644 index 000000000..664217890 --- /dev/null +++ b/control-plane/agents/core/src/core/registry.rs @@ -0,0 +1,65 @@ +use super::wrapper::NodeWrapper; +use crate::core::wrapper::InternalOps; +use mbus_api::v0::NodeId; +use std::{collections::HashMap, sync::Arc}; +use tokio::sync::{Mutex, RwLock}; + +/// Registry containing all mayastor instances which register themselves via the +/// `Register` Message. +/// Said instances may also send `Deregister` to unregister themselves during +/// node/pod shutdown/restart. When this happens the node state is set as +/// `Unknown`. It's TBD how to detect when a node is really going away for good. +/// +/// A mayastor instance sends `Register` every N seconds as sort of a keep +/// alive message. +/// A watchful watchdog is started for each node and it will change the state +/// of said node to `Offline` if it is not petted before its `deadline`. +#[derive(Clone, Debug)] +pub struct Registry { + pub(crate) nodes: Arc>>>>, + /// period to refresh the cache + period: std::time::Duration, +} + +impl Registry { + /// Create a new registry with the `period` to reload the cache + pub fn new(period: std::time::Duration) -> Self { + let registry = Self { + nodes: Default::default(), + period, + }; + registry.start(); + registry + } + + /// Start thread which updates the registry + pub fn start(&self) { + let registry = self.clone(); + tokio::spawn(async move { + registry.poller().await; + }); + } + + /// Poll each node for resource updates + async fn poller(&self) { + loop { + let nodes = self.nodes.read().await.clone(); + for (_, node) in nodes.iter() { + let lock = node.grpc_lock().await; + let _guard = lock.lock().await; + + let mut node_clone = node.lock().await.clone(); + if node_clone.reload().await.is_ok() { + // update node in the registry + *node.lock().await = node_clone; + } + } + self.trace_all().await; + tokio::time::delay_for(self.period).await; + } + } + async fn trace_all(&self) { + let registry = self.nodes.read().await; + tracing::debug!("Registry update: {:?}", registry); + } +} diff --git a/control-plane/agents/core/src/core/wrapper.rs b/control-plane/agents/core/src/core/wrapper.rs new file mode 100644 index 000000000..a03383595 --- /dev/null +++ b/control-plane/agents/core/src/core/wrapper.rs @@ -0,0 +1,883 @@ +use super::{super::node::watchdog::Watchdog, grpc::GrpcContext}; +use common::{ + errors::{GrpcRequestError, SvcError}, + v0::msg_translation::{MessageBusToRpc, RpcToMessageBus}, +}; +use mbus_api::{ + v0::{ + CreatePool, + CreateReplica, + DestroyPool, + DestroyReplica, + Node, + NodeId, + NodeState, + Pool, + PoolId, + PoolState, + Protocol, + Replica, + ReplicaId, + ShareReplica, + UnshareReplica, + }, + ResourceKind, +}; +use rpc::mayastor::Null; +use snafu::ResultExt; +use std::{cmp::Ordering, collections::HashMap}; + +/// Wrapper over a `Node` plus a few useful methods/properties. Includes: +/// all pools and replicas from the node +/// a watchdog to keep track of the node's liveness +/// a lock to serialize mutating gRPC calls +#[derive(Debug, Clone)] +pub(crate) struct NodeWrapper { + /// inner Node value + node: Node, + /// watchdog to track the node state + watchdog: Watchdog, + /// gRPC CRUD lock + lock: Arc>, + /// pools part of the node + pools: HashMap, + /// nexuses part of the node + nexuses: HashMap, +} + +impl NodeWrapper { + /// Create a new wrapper for a `Node` with a `deadline` for its watchdog + pub(crate) fn new(node: &Node, deadline: std::time::Duration) -> Self { + tracing::debug!("Creating new node {:?}", node); + Self { + node: node.clone(), + watchdog: Watchdog::new(&node.id, deadline), + pools: Default::default(), + nexuses: Default::default(), + lock: Default::default(), + } + } + + /// Get `GrpcClient` for this node + async fn grpc_client(&self) -> Result { + GrpcClient::new(&GrpcContext::new( + self.lock.clone(), + &self.id, + &self.node.grpc_endpoint, + )?) + .await + } + + /// Get `GrpcContext` for this node + pub(crate) fn grpc_context(&self) -> Result { + GrpcContext::new(self.lock.clone(), &self.id, &self.node.grpc_endpoint) + } + + /// Whether the watchdog deadline has expired + pub(crate) fn registration_expired(&self) -> bool { + self.watchdog.timestamp().elapsed() > self.watchdog.deadline() + } + + /// On_register callback when the node is registered with the registry + pub(crate) async fn on_register(&mut self) { + self.watchdog.pet().await.ok(); + self.set_state(NodeState::Online); + } + + /// Update the node state based on the watchdog + pub(crate) fn update(&mut self) { + if self.registration_expired() { + self.set_state(NodeState::Offline); + } + } + + /// Set the node state + pub(crate) fn set_state(&mut self, state: NodeState) { + if self.node.state != state { + tracing::info!( + "Node '{}' changing from {} to {}", + self.node.id, + self.node.state.to_string(), + state.to_string(), + ); + self.node.state = state; + if self.node.state == NodeState::Unknown { + self.watchdog.disarm() + } + for (_, pool) in self.pools.iter_mut() { + pool.set_unknown(); + } + } + } + + /// Get a mutable reference to the node's watchdog + pub(crate) fn watchdog_mut(&mut self) -> &mut Watchdog { + &mut self.watchdog + } + /// Get the inner node + pub(crate) fn node(&self) -> &Node { + &self.node + } + /// Get all pools + pub(crate) fn pools(&self) -> Vec { + self.pools.values().cloned().collect() + } + /// Get pool from `pool_id` or None + pub(crate) fn pool(&self, pool_id: &PoolId) -> Option<&PoolWrapper> { + self.pools.get(pool_id) + } + /// Get all replicas + pub(crate) fn replicas(&self) -> Vec { + let replicas = self.pools.iter().map(|p| p.1.replicas()); + replicas.flatten().collect() + } + /// Get all nexuses + fn nexuses(&self) -> Vec { + self.nexuses.values().cloned().collect() + } + /// Get nexus + fn nexus(&self, nexus_id: &NexusId) -> Option<&Nexus> { + self.nexuses.get(nexus_id) + } + /// Get replica from `replica_id` + pub(crate) fn replica(&self, replica_id: &ReplicaId) -> Option<&Replica> { + self.pools + .iter() + .find_map(|p| p.1.replicas.iter().find(|r| &r.uuid == replica_id)) + } + /// Is the node online + pub(crate) fn is_online(&self) -> bool { + self.node.state == NodeState::Online + } + + /// Reload the node by fetching information from mayastor + pub(super) async fn reload(&mut self) -> Result<(), SvcError> { + if self.is_online() { + tracing::trace!("Reloading node '{}'", self.id); + + let replicas = self.fetch_replicas().await?; + let pools = self.fetch_pools().await?; + let nexuses = self.fetch_nexuses().await?; + + self.pools.clear(); + for pool in &pools { + let replicas = replicas + .iter() + .filter(|r| r.pool == pool.id) + .cloned() + .collect::>(); + self.add_pool_with_replicas(pool, &replicas); + } + self.nexuses.clear(); + for nexus in &nexuses { + self.add_nexus(nexus); + } + Ok(()) + } else { + tracing::trace!( + "Skipping reload of node '{}' since it's '{:?}'", + self.id, + self.state + ); + Err(SvcError::NodeNotOnline { + node: self.id.to_owned(), + }) + } + } + + /// Add pool with replicas + fn add_pool_with_replicas(&mut self, pool: &Pool, replicas: &[Replica]) { + self.pools + .insert(pool.id.clone(), PoolWrapper::new(&pool, replicas)); + } + /// Remove pool from node + fn remove_pool(&mut self, pool: &PoolId) { + self.pools.remove(&pool); + } + /// Add replica + fn add_replica(&mut self, replica: &Replica) { + match self.pools.iter_mut().find(|(id, _)| id == &&replica.pool) { + None => { + tracing::error!("Can't add replica '{} to pool '{}' because the pool does not exist", replica.uuid, replica.pool); + } + Some((_, pool)) => { + pool.add_replica(replica); + } + }; + } + /// Remove replica from pool + fn remove_replica(&mut self, pool: &PoolId, replica: &ReplicaId) { + match self.pools.iter_mut().find(|(id, _)| id == &pool) { + None => (), + Some((_, pool)) => { + pool.remove_replica(replica); + } + }; + } + /// Update a replica's share uri and protocol + fn share_replica( + &mut self, + share: &Protocol, + uri: &str, + pool: &PoolId, + replica: &ReplicaId, + ) { + match self.pools.iter_mut().find(|(id, _)| id == &pool) { + None => (), + Some((_, pool)) => { + pool.update_replica(replica, share, uri); + } + }; + } + /// Unshare a replica by removing its share protocol and uri + fn unshare_replica(&mut self, pool: &PoolId, replica: &ReplicaId) { + self.share_replica(&Protocol::Off, "", pool, replica); + } + /// Add a new nexus to the node + fn add_nexus(&mut self, nexus: &Nexus) { + self.nexuses.insert(nexus.uuid.clone(), nexus.clone()); + } + /// Remove nexus from the node + fn remove_nexus(&mut self, nexus: &NexusId) { + self.nexuses.remove(nexus); + } + /// Update a nexus share uri + fn share_nexus(&mut self, uri: &str, nexus: &NexusId) { + match self.nexuses.get_mut(nexus) { + None => (), + Some(nexus) => { + nexus.device_uri = uri.to_string(); + } + } + } + /// Unshare a nexus by removing its share uri + fn unshare_nexus(&mut self, nexus: &NexusId) { + self.share_nexus("", nexus); + } + /// Add a Child to the nexus + fn add_child(&mut self, nexus: &NexusId, child: &Child) { + match self.nexuses.get_mut(nexus) { + None => (), + Some(nexus) => { + nexus.children.push(child.clone()); + } + } + } + /// Remove child from the nexus + fn remove_child(&mut self, nexus: &NexusId, child: &ChildUri) { + match self.nexuses.get_mut(nexus) { + None => (), + Some(nexus) => { + nexus.children.retain(|c| &c.uri == child); + } + } + } + + /// Fetch all replicas from this node via gRPC + async fn fetch_replicas(&self) -> Result, SvcError> { + let mut ctx = self.grpc_client().await?; + let rpc_replicas = ctx.client.list_replicas(Null {}).await.context( + GrpcRequestError { + resource: ResourceKind::Replica, + request: "list_replicas", + }, + )?; + let rpc_replicas = &rpc_replicas.get_ref().replicas; + let pools = rpc_replicas + .iter() + .map(|p| rpc_replica_to_bus(p, &self.id)) + .collect(); + Ok(pools) + } + /// Fetch all pools from this node via gRPC + async fn fetch_pools(&self) -> Result, SvcError> { + let mut ctx = self.grpc_client().await?; + let rpc_pools = + ctx.client + .list_pools(Null {}) + .await + .context(GrpcRequestError { + resource: ResourceKind::Pool, + request: "list_pools", + })?; + let rpc_pools = &rpc_pools.get_ref().pools; + let pools = rpc_pools + .iter() + .map(|p| rpc_pool_to_bus(p, &self.id)) + .collect(); + Ok(pools) + } + /// Fetch all nexuses from the node via gRPC + async fn fetch_nexuses(&self) -> Result, SvcError> { + let mut ctx = self.grpc_client().await?; + let rpc_nexuses = + ctx.client + .list_nexus(Null {}) + .await + .context(GrpcRequestError { + resource: ResourceKind::Nexus, + request: "list_nexus", + })?; + let rpc_nexuses = &rpc_nexuses.get_ref().nexus_list; + let nexuses = rpc_nexuses + .iter() + .map(|n| rpc_nexus_to_bus(n, &self.id)) + .collect(); + Ok(nexuses) + } +} + +impl std::ops::Deref for NodeWrapper { + type Target = Node; + fn deref(&self) -> &Self::Target { + &self.node + } +} + +use crate::core::grpc::{GrpcClient, GrpcClientLocked}; +use async_trait::async_trait; +use mbus_api::v0::{ + AddNexusChild, + Child, + ChildUri, + CreateNexus, + DestroyNexus, + Nexus, + NexusId, + RemoveNexusChild, + ShareNexus, + UnshareNexus, +}; +use std::{ops::Deref, sync::Arc}; + +/// CRUD Operations on a locked mayastor `NodeWrapper` such as: +/// pools, replicas, nexuses and their children +#[async_trait] +pub trait ClientOps { + async fn create_pool(&self, request: &CreatePool) + -> Result; + /// Destroy a pool on the node via gRPC + async fn destroy_pool(&self, request: &DestroyPool) + -> Result<(), SvcError>; + /// Create a replica on the pool via gRPC + async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result; + /// Share a replica on the pool via gRPC + async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result; + /// Unshare a replica on the pool via gRPC + async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError>; + /// Destroy a replica on the pool via gRPC + async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError>; + + /// Create a nexus on a node via gRPC or MBUS + async fn create_nexus( + &self, + request: &CreateNexus, + ) -> Result; + /// Destroy a nexus on a node via gRPC or MBUS + async fn destroy_nexus( + &self, + request: &DestroyNexus, + ) -> Result<(), SvcError>; + /// Share a nexus on the node via gRPC + async fn share_nexus( + &self, + request: &ShareNexus, + ) -> Result; + /// Unshare a nexus on the node via gRPC + async fn unshare_nexus( + &self, + request: &UnshareNexus, + ) -> Result<(), SvcError>; + /// Add a child to a nexus via gRPC + async fn add_child( + &self, + request: &AddNexusChild, + ) -> Result; + /// Remove a child from its parent nexus via gRPC + async fn remove_child( + &self, + request: &RemoveNexusChild, + ) -> Result<(), SvcError>; +} + +/// Internal Operations on a mayastor locked `NodeWrapper` for the implementor +/// of the `ClientOps` trait and the `Registry` itself +#[async_trait] +pub(crate) trait InternalOps { + /// Get the grpc lock and client pair + async fn grpc_client_locked(&self) -> Result; + /// Get the inner lock, typically used to sync mutating gRPC operations + async fn grpc_lock(&self) -> Arc>; +} + +/// Getter operations on a mayastor locked `NodeWrapper` to get copies of its +/// resources, such as pools, replicas and nexuses +#[async_trait] +pub(crate) trait GetterOps { + async fn pools(&self) -> Vec; + async fn pool(&self, pool_id: &PoolId) -> Option; + + async fn replicas(&self) -> Vec; + async fn replica(&self, replica: &ReplicaId) -> Option; + + async fn nexuses(&self) -> Vec; + async fn nexus(&self, nexus_id: &NexusId) -> Option; +} + +#[async_trait] +impl GetterOps for Arc> { + async fn pools(&self) -> Vec { + let node = self.lock().await; + node.pools() + } + async fn pool(&self, pool_id: &PoolId) -> Option { + let node = self.lock().await; + node.pool(pool_id).cloned() + } + async fn replicas(&self) -> Vec { + let node = self.lock().await; + node.replicas() + } + async fn replica(&self, replica: &ReplicaId) -> Option { + let node = self.lock().await; + node.replica(replica).cloned() + } + async fn nexuses(&self) -> Vec { + let node = self.lock().await; + node.nexuses() + } + async fn nexus(&self, nexus_id: &NexusId) -> Option { + let node = self.lock().await; + node.nexus(nexus_id).cloned() + } +} + +#[async_trait] +impl InternalOps for Arc> { + async fn grpc_client_locked(&self) -> Result { + let ctx = self.lock().await.grpc_context()?; + let client = ctx.connect_locked().await?; + Ok(client) + } + async fn grpc_lock(&self) -> Arc> { + self.lock().await.lock.clone() + } +} + +#[async_trait] +impl ClientOps for Arc> { + async fn create_pool( + &self, + request: &CreatePool, + ) -> Result { + let mut ctx = self.grpc_client_locked().await?; + let rpc_pool = ctx.client.create_pool(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Pool, + request: "create_pool", + }, + )?; + let pool = rpc_pool_to_bus(&rpc_pool.into_inner(), &request.node); + + self.lock().await.add_pool_with_replicas(&pool, &[]); + Ok(pool) + } + /// Destroy a pool on the node via gRPC + async fn destroy_pool( + &self, + request: &DestroyPool, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client_locked().await?; + let _ = ctx.client.destroy_pool(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Pool, + request: "destroy_pool", + }, + )?; + self.lock().await.remove_pool(&request.id); + Ok(()) + } + + /// Create a replica on the pool via gRPC + async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result { + let mut ctx = self.grpc_client_locked().await?; + let rpc_replica = + ctx.client.create_replica(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Replica, + request: "create_replica", + }, + )?; + + let replica = + rpc_replica_to_bus(&rpc_replica.into_inner(), &request.node); + self.lock().await.add_replica(&replica); + Ok(replica) + } + + /// Share a replica on the pool via gRPC + async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result { + let mut ctx = self.grpc_client_locked().await?; + let share = ctx + .client + .share_replica(request.to_rpc()) + .await + .context(GrpcRequestError { + resource: ResourceKind::Replica, + request: "share_replica", + })? + .into_inner() + .uri; + self.lock().await.share_replica( + &request.protocol, + &share, + &request.pool, + &request.uuid, + ); + Ok(share) + } + + /// Unshare a replica on the pool via gRPC + async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client_locked().await?; + let _ = ctx.client.share_replica(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Replica, + request: "unshare_replica", + }, + )?; + self.lock() + .await + .unshare_replica(&request.pool, &request.uuid); + Ok(()) + } + + /// Destroy a replica on the pool via gRPC + async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client_locked().await?; + let _ = ctx.client.destroy_replica(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Replica, + request: "destroy_replica", + }, + )?; + self.lock() + .await + .remove_replica(&request.pool, &request.uuid); + Ok(()) + } + + /// Create a nexus on the node via gRPC + async fn create_nexus( + &self, + request: &CreateNexus, + ) -> Result { + let mut ctx = self.grpc_client_locked().await?; + let rpc_nexus = + ctx.client.create_nexus(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Nexus, + request: "create_nexus", + }, + )?; + let nexus = rpc_nexus_to_bus(&rpc_nexus.into_inner(), &request.node); + self.lock().await.add_nexus(&nexus); + Ok(nexus) + } + + /// Destroy a nexus on the node via gRPC + async fn destroy_nexus( + &self, + request: &DestroyNexus, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client_locked().await?; + let _ = ctx.client.destroy_nexus(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Nexus, + request: "destroy_nexus", + }, + )?; + self.lock().await.remove_nexus(&request.uuid); + Ok(()) + } + + /// Share a nexus on the node via gRPC + async fn share_nexus( + &self, + request: &ShareNexus, + ) -> Result { + let mut ctx = self.grpc_client_locked().await?; + let share = ctx.client.publish_nexus(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Nexus, + request: "publish_nexus", + }, + )?; + let share = share.into_inner().device_uri; + self.lock().await.share_nexus(&share, &request.uuid); + Ok(share) + } + + /// Unshare a nexus on the node via gRPC + async fn unshare_nexus( + &self, + request: &UnshareNexus, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client_locked().await?; + let _ = ctx.client.unpublish_nexus(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Nexus, + request: "unpublish_nexus", + }, + )?; + self.lock().await.unshare_nexus(&request.uuid); + Ok(()) + } + + /// Add a child to a nexus via gRPC + async fn add_child( + &self, + request: &AddNexusChild, + ) -> Result { + let mut ctx = self.grpc_client_locked().await?; + let rpc_child = + ctx.client.add_child_nexus(request.to_rpc()).await.context( + GrpcRequestError { + resource: ResourceKind::Child, + request: "add_child_nexus", + }, + )?; + let child = rpc_child.into_inner().to_mbus(); + self.lock().await.add_child(&request.nexus, &child); + Ok(child) + } + + /// Remove a child from its parent nexus via gRPC + async fn remove_child( + &self, + request: &RemoveNexusChild, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client_locked().await?; + let _ = ctx + .client + .remove_child_nexus(request.to_rpc()) + .await + .context(GrpcRequestError { + resource: ResourceKind::Child, + request: "remove_child_nexus", + })?; + self.lock().await.remove_child(&request.nexus, &request.uri); + Ok(()) + } +} + +/// convert rpc pool to a message bus pool +fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: &NodeId) -> Pool { + let mut pool = rpc_pool.to_mbus(); + pool.node = id.clone(); + pool +} + +/// convert rpc replica to a message bus replica +fn rpc_replica_to_bus( + rpc_replica: &rpc::mayastor::Replica, + id: &NodeId, +) -> Replica { + let mut replica = rpc_replica.to_mbus(); + replica.node = id.clone(); + replica +} + +fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: &NodeId) -> Nexus { + let mut nexus = rpc_nexus.to_mbus(); + nexus.node = id.clone(); + nexus +} + +/// Wrapper over the message bus `Pool` which includes all the replicas +/// and Ord traits to aid pool selection for volume replicas +#[derive(Clone, Debug, Default, Eq, PartialEq)] +pub struct PoolWrapper { + pool: Pool, + replicas: Vec, +} + +impl Deref for PoolWrapper { + type Target = Pool; + fn deref(&self) -> &Self::Target { + &self.pool + } +} + +impl PoolWrapper { + /// New Pool wrapper with the pool and replicas + pub fn new(pool: &Pool, replicas: &[Replica]) -> Self { + Self { + pool: pool.clone(), + replicas: replicas.into(), + } + } + + /// Get the pool's replicas + pub fn replicas(&self) -> Vec { + self.replicas.clone() + } + /// Get replica from the pool + pub fn replica(&self, replica: &ReplicaId) -> Option<&Replica> { + self.replicas.iter().find(|r| &r.uuid == replica) + } + + /// Get the free space + pub fn free_space(&self) -> u64 { + if self.pool.capacity >= self.pool.used { + self.pool.capacity - self.pool.used + } else { + // odd, let's report no free space available + tracing::error!( + "Pool '{}' has a capacity of '{} B' but is using '{} B'", + self.pool.id, + self.pool.capacity, + self.pool.used + ); + 0 + } + } + + /// Set pool state as unknown + pub fn set_unknown(&mut self) { + self.pool.state = PoolState::Unknown; + } + + /// Add replica to list + pub fn add_replica(&mut self, replica: &Replica) { + self.replicas.push(replica.clone()) + } + /// Remove replica from list + pub fn remove_replica(&mut self, uuid: &ReplicaId) { + self.replicas.retain(|replica| &replica.uuid != uuid) + } + /// update replica from list + pub fn update_replica( + &mut self, + uuid: &ReplicaId, + share: &Protocol, + uri: &str, + ) { + if let Some(replica) = self + .replicas + .iter_mut() + .find(|replica| &replica.uuid == uuid) + { + replica.share = share.clone(); + replica.uri = uri.to_string(); + } + } +} + +impl From<&NodeWrapper> for Node { + fn from(node: &NodeWrapper) -> Self { + node.node.clone() + } +} +impl From for Vec { + fn from(node: NodeWrapper) -> Self { + node.pools + .values() + .map(Vec::::from) + .flatten() + .collect() + } +} +impl From for Vec { + fn from(node: NodeWrapper) -> Self { + node.pools.values().cloned().collect() + } +} + +impl From for Pool { + fn from(pool: PoolWrapper) -> Self { + pool.pool + } +} +impl From<&PoolWrapper> for Pool { + fn from(pool: &PoolWrapper) -> Self { + pool.pool.clone() + } +} +impl From for Vec { + fn from(pool: PoolWrapper) -> Self { + pool.replicas + } +} +impl From<&PoolWrapper> for Vec { + fn from(pool: &PoolWrapper) -> Self { + pool.replicas.clone() + } +} + +// 1. state ( online > degraded ) +// 2. smaller n replicas +// (here we should have pool IO stats over time so we can pick less active +// pools rather than the number of replicas which is useless if the volumes +// are not active) +impl PartialOrd for PoolWrapper { + fn partial_cmp(&self, other: &Self) -> Option { + match self.pool.state.partial_cmp(&other.pool.state) { + Some(Ordering::Greater) => Some(Ordering::Greater), + Some(Ordering::Less) => Some(Ordering::Less), + Some(Ordering::Equal) => { + match self.replicas.len().cmp(&other.replicas.len()) { + Ordering::Greater => Some(Ordering::Greater), + Ordering::Less => Some(Ordering::Less), + Ordering::Equal => { + Some(self.free_space().cmp(&other.free_space())) + } + } + } + None => None, + } + } +} + +impl Ord for PoolWrapper { + fn cmp(&self, other: &Self) -> Ordering { + match self.pool.state.partial_cmp(&other.pool.state) { + Some(Ordering::Greater) => Ordering::Greater, + Some(Ordering::Less) => Ordering::Less, + Some(Ordering::Equal) => { + match self.replicas.len().cmp(&other.replicas.len()) { + Ordering::Greater => Ordering::Greater, + Ordering::Less => Ordering::Less, + Ordering::Equal => { + self.free_space().cmp(&other.free_space()) + } + } + } + None => Ordering::Equal, + } + } +} diff --git a/control-plane/agents/core/src/node/mod.rs b/control-plane/agents/core/src/node/mod.rs new file mode 100644 index 000000000..a43498719 --- /dev/null +++ b/control-plane/agents/core/src/node/mod.rs @@ -0,0 +1,129 @@ +pub(super) mod service; +/// node watchdog to keep track of a node's liveness +pub(crate) mod watchdog; + +use super::{ + core::registry, + handler, + handler_publish, + impl_publish_handler, + impl_request_handler, + CliArgs, +}; +use common::{errors::SvcError, Service}; +use mbus_api::{v0::*, *}; + +use async_trait::async_trait; +use std::{convert::TryInto, marker::PhantomData}; +use structopt::StructOpt; + +pub(crate) fn configure(builder: Service) -> Service { + let registry = builder.get_shared_state::().clone(); + let deadline = CliArgs::from_args().deadline.into(); + builder + .with_shared_state(service::Service::new(registry, deadline)) + .with_channel(ChannelVs::Registry) + .with_subscription(handler_publish!(Register)) + .with_subscription(handler_publish!(Deregister)) + .with_channel(ChannelVs::Node) + .with_subscription(handler!(GetNodes)) + .with_subscription(handler!(GetBlockDevices)) + .with_default_liveness() +} + +#[cfg(test)] +mod tests { + use super::*; + use composer::*; + use rpc::mayastor::Null; + + async fn bus_init() -> Result<(), Box> { + tokio::time::timeout(std::time::Duration::from_secs(2), async { + mbus_api::message_bus_init("10.1.0.2".into()).await + }) + .await?; + Ok(()) + } + async fn wait_for_node() -> Result<(), Box> { + let _ = GetNodes {}.request().await?; + Ok(()) + } + fn init_tracing() { + if let Ok(filter) = + tracing_subscriber::EnvFilter::try_from_default_env() + { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } + } + // to avoid waiting for timeouts + async fn orderly_start( + test: &ComposeTest, + ) -> Result<(), Box> { + test.start_containers(vec!["nats", "core"]).await?; + + bus_init().await?; + wait_for_node().await?; + + test.start("mayastor").await?; + + let mut hdl = test.grpc_handle("mayastor").await?; + hdl.mayastor.list_nexus(Null {}).await?; + Ok(()) + } + + #[tokio::test] + async fn node() { + init_tracing(); + let maya_name = NodeId::from("node-test-name"); + let test = Builder::new() + .name("node") + .add_container_bin( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .add_container_bin( + "core", + Binary::from_dbg("core") + .with_nats("-n") + .with_args(vec!["-d", "2sec"]), + ) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_nats("-n") + .with_args(vec!["-N", maya_name.as_str()]), + ) + .autorun(false) + .build() + .await + .unwrap(); + + orderly_start(&test).await.unwrap(); + + let nodes = GetNodes {}.request().await.unwrap(); + tracing::info!("Nodes: {:?}", nodes); + assert_eq!(nodes.0.len(), 1); + assert_eq!( + nodes.0.first().unwrap(), + &Node { + id: maya_name.clone(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Online, + } + ); + tokio::time::delay_for(std::time::Duration::from_secs(2)).await; + let nodes = GetNodes {}.request().await.unwrap(); + tracing::info!("Nodes: {:?}", nodes); + assert_eq!(nodes.0.len(), 1); + assert_eq!( + nodes.0.first().unwrap(), + &Node { + id: maya_name.clone(), + grpc_endpoint: "0.0.0.0:10124".to_string(), + state: NodeState::Offline, + } + ); + } +} diff --git a/control-plane/agents/core/src/node/service.rs b/control-plane/agents/core/src/node/service.rs new file mode 100644 index 000000000..94006812c --- /dev/null +++ b/control-plane/agents/core/src/node/service.rs @@ -0,0 +1,157 @@ +use super::*; +use crate::core::{registry::Registry, wrapper::NodeWrapper}; +use common::{ + errors::{GrpcRequestError, NodeNotFound, SvcError}, + v0::msg_translation::RpcToMessageBus, +}; +use rpc::mayastor::ListBlockDevicesRequest; +use snafu::{OptionExt, ResultExt}; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Node's Service +#[derive(Debug, Clone)] +pub(crate) struct Service { + registry: Registry, + /// deadline for receiving keepalive/Register messages + deadline: std::time::Duration, +} + +impl Service { + /// New Node Service which uses the `registry` as its node cache and sets + /// the `deadline` to each node's watchdog + pub(super) fn new( + registry: Registry, + deadline: std::time::Duration, + ) -> Self { + Self { + registry, + deadline, + } + } + + /// Callback to be called when a node's watchdog times out + pub(super) async fn on_timeout(service: &Service, id: &NodeId) { + let registry = service.registry.clone(); + let state = registry.nodes.read().await; + if let Some(node) = state.get(id) { + let mut node = node.lock().await; + if node.is_online() { + tracing::error!( + "Node id '{}' missed the registration deadline of {:?}", + id, + service.deadline + ); + node.update(); + } + } + } + + /// Register a new node through the register information + pub(super) async fn register(&self, registration: &Register) { + let node = Node { + id: registration.id.clone(), + grpc_endpoint: registration.grpc_endpoint.clone(), + state: NodeState::Online, + }; + let mut nodes = self.registry.nodes.write().await; + match nodes.get_mut(&node.id) { + None => { + let mut node = NodeWrapper::new(&node, self.deadline); + node.watchdog_mut().arm(self.clone()); + nodes.insert(node.id.clone(), Arc::new(Mutex::new(node))); + } + Some(node) => { + node.lock().await.on_register().await; + } + } + } + + /// Deregister a node through the deregister information + pub(super) async fn deregister(&self, node: &Deregister) { + let nodes = self.registry.nodes.read().await; + match nodes.get(&node.id) { + None => {} + // ideally we want this node to disappear completely when it's not + // part of the daemonset, but we just don't have that kind of + // information at this level :( + // maybe nodes should also be registered/deregistered via REST? + Some(node) => { + node.lock().await.set_state(NodeState::Unknown); + } + } + } + + /// Get all nodes + pub(crate) async fn get_nodes( + &self, + _: &GetNodes, + ) -> Result { + let nodes = self.registry.get_nodes_wrapper().await; + let mut nodes_vec = vec![]; + for node in nodes { + nodes_vec.push(node.lock().await.node().clone()); + } + Ok(Nodes(nodes_vec)) + } + + /// Get block devices from a node + pub(crate) async fn get_block_devices( + &self, + request: &GetBlockDevices, + ) -> Result { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + + let grpc = node.lock().await.grpc_context()?; + let mut client = grpc.connect().await?; + + let result = client + .client + .list_block_devices(ListBlockDevicesRequest { + all: request.all, + }) + .await; + + let response = result + .context(GrpcRequestError { + resource: ResourceKind::Block, + request: "list_block_devices", + })? + .into_inner(); + + let bdevs = response + .devices + .iter() + .map(|rpc_bdev| rpc_bdev.to_mbus()) + .collect(); + Ok(BlockDevices(bdevs)) + } +} + +impl Registry { + /// Get all node wrappers + pub(crate) async fn get_nodes_wrapper( + &self, + ) -> Vec>> { + let nodes = self.nodes.read().await; + nodes.values().cloned().collect() + } + + /// Get node `node_id` + pub(crate) async fn get_node_wrapper( + &self, + node_id: &NodeId, + ) -> Option>> { + let nodes = self.nodes.read().await; + match nodes.iter().find(|n| n.0 == node_id) { + None => None, + Some((_, node)) => Some(node.clone()), + } + } +} diff --git a/control-plane/agents/core/src/node/watchdog.rs b/control-plane/agents/core/src/node/watchdog.rs new file mode 100644 index 000000000..f37823b8c --- /dev/null +++ b/control-plane/agents/core/src/node/watchdog.rs @@ -0,0 +1,84 @@ +use crate::node::service::Service; +use mbus_api::v0::NodeId; + +/// Watchdog which must be pet within the deadline, otherwise +/// it triggers the `on_timeout` callback from the node `Service` +#[derive(Debug, Clone)] +pub(crate) struct Watchdog { + node_id: NodeId, + deadline: std::time::Duration, + timestamp: std::time::Instant, + pet_chan: Option>, + service: Option, +} + +impl Watchdog { + /// new empty watchdog with a deadline timeout for node `node_id` + pub(crate) fn new(node_id: &NodeId, deadline: std::time::Duration) -> Self { + Self { + deadline, + node_id: node_id.clone(), + timestamp: std::time::Instant::now(), + pet_chan: None, + service: None, + } + } + + /// the set deadline + pub(crate) fn deadline(&self) -> std::time::Duration { + self.deadline + } + + /// last time the node was seen + pub(crate) fn timestamp(&self) -> std::time::Instant { + self.timestamp + } + + /// arm watchdog with self timeout and execute error callback if + /// the deadline is not met + pub(crate) fn arm(&mut self, service: Service) { + tracing::debug!("Arming the watchdog for node '{}'", self.node_id); + let (s, mut r) = tokio::sync::mpsc::channel(1); + self.pet_chan = Some(s); + self.service = Some(service.clone()); + let deadline = self.deadline; + let id = self.node_id.clone(); + tokio::spawn(async move { + loop { + let result = tokio::time::timeout(deadline, r.recv()).await; + match result { + Err(_) => Service::on_timeout(&service, &id).await, + Ok(None) => { + tracing::warn!("Stopping Watchdog for node '{}'", id); + break; + } + _ => (), + } + } + }); + } + + /// meet the deadline + pub(crate) async fn pet( + &mut self, + ) -> Result<(), tokio::sync::mpsc::error::SendError<()>> { + self.timestamp = std::time::Instant::now(); + if let Some(chan) = &mut self.pet_chan { + chan.send(()).await + } else { + // if the watchdog was stopped, then rearm it + if let Some(service) = self.service.clone() { + self.arm(service); + } + Ok(()) + } + } + /// stop the watchdog + pub(crate) fn disarm(&mut self) { + tracing::debug!("Disarming the watchdog for node '{}'", self.node_id); + if let Some(chan) = &mut self.pet_chan { + let _ = chan.disarm(); + } + self.pet_chan = None; + } +} diff --git a/control-plane/agents/core/src/pool/mod.rs b/control-plane/agents/core/src/pool/mod.rs new file mode 100644 index 000000000..b30616205 --- /dev/null +++ b/control-plane/agents/core/src/pool/mod.rs @@ -0,0 +1,173 @@ +mod registry; +pub mod service; + +use std::{convert::TryInto, marker::PhantomData}; + +use super::{core::registry::Registry, handler, impl_request_handler}; +use async_trait::async_trait; +use common::{errors::SvcError, Service}; +use mbus_api::{ + v0::{ + ChannelVs, + CreatePool, + CreateReplica, + DestroyPool, + DestroyReplica, + GetPools, + GetReplicas, + ShareReplica, + UnshareReplica, + }, + Message, + MessageId, + ReceivedMessage, +}; + +pub(crate) fn configure(builder: Service) -> Service { + let registry = builder.get_shared_state::().clone(); + builder + .with_channel(ChannelVs::Pool) + .with_default_liveness() + .with_shared_state(service::Service::new(registry)) + .with_subscription(handler!(GetPools)) + .with_subscription(handler!(CreatePool)) + .with_subscription(handler!(DestroyPool)) + .with_subscription(handler!(GetReplicas)) + .with_subscription(handler!(CreateReplica)) + .with_subscription(handler!(DestroyReplica)) + .with_subscription(handler!(ShareReplica)) + .with_subscription(handler!(UnshareReplica)) +} + +#[cfg(test)] +mod tests { + use super::*; + use composer::*; + use mbus_api::v0::{GetNodes, Liveness, Protocol, Replica}; + use rpc::mayastor::Null; + + async fn wait_for_services() { + let _ = GetNodes {}.request().await.unwrap(); + Liveness {}.request_on(ChannelVs::Pool).await.unwrap(); + } + // to avoid waiting for timeouts + async fn orderly_start(test: &ComposeTest) { + test.start_containers(vec!["nats", "core"]).await.unwrap(); + + test.connect_to_bus("nats").await; + wait_for_services().await; + + test.start("mayastor").await.unwrap(); + + let mut hdl = test.grpc_handle("mayastor").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.unwrap(); + } + + #[tokio::test] + async fn pool() { + let mayastor = "pool-test-name"; + let test = Builder::new() + .name("pool") + .add_container_bin( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .add_container_bin("core", Binary::from_dbg("core").with_nats("-n")) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_nats("-n") + .with_args(vec!["-N", mayastor]) + .with_args(vec!["-g", "10.1.0.4:10124"]), + ) + .with_default_tracing() + .autorun(false) + .build() + .await + .unwrap(); + + orderly_start(&test).await; + + let nodes = GetNodes {}.request().await.unwrap(); + tracing::info!("Nodes: {:?}", nodes); + + CreatePool { + node: mayastor.into(), + id: "pooloop".into(), + disks: vec!["malloc:///disk0?size_mb=100".into()], + } + .request() + .await + .unwrap(); + + let pools = GetPools::default().request().await.unwrap(); + tracing::info!("Pools: {:?}", pools); + + let replica = CreateReplica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + size: 12582912, /* actual size will be a multiple of 4MB so just + * create it like so */ + thin: true, + share: Protocol::Off, + } + .request() + .await + .unwrap(); + + let replicas = GetReplicas::default().request().await.unwrap(); + tracing::info!("Replicas: {:?}", replicas); + + assert_eq!( + replica, + Replica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + thin: false, + size: 12582912, + share: Protocol::Off, + uri: "bdev:///replica1".into() + } + ); + + let uri = ShareReplica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + protocol: Protocol::Nvmf, + } + .request() + .await + .unwrap(); + + let mut replica_updated = replica; + replica_updated.uri = uri; + replica_updated.share = Protocol::Nvmf; + let replica = GetReplicas::default().request().await.unwrap(); + let replica = replica.0.first().unwrap(); + assert_eq!(replica, &replica_updated); + + DestroyReplica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + } + .request() + .await + .unwrap(); + + assert!(GetReplicas::default().request().await.unwrap().0.is_empty()); + + DestroyPool { + node: mayastor.into(), + id: "pooloop".into(), + } + .request() + .await + .unwrap(); + + assert!(GetPools::default().request().await.unwrap().0.is_empty()); + } +} diff --git a/control-plane/agents/core/src/pool/registry.rs b/control-plane/agents/core/src/pool/registry.rs new file mode 100644 index 000000000..d51c7a662 --- /dev/null +++ b/control-plane/agents/core/src/pool/registry.rs @@ -0,0 +1,176 @@ +use crate::core::{registry::Registry, wrapper::*}; +use common::errors::{NodeNotFound, PoolNotFound, ReplicaNotFound, SvcError}; +use mbus_api::v0::{NodeId, Pool, PoolId, Replica, ReplicaId}; +use snafu::OptionExt; + +/// Pool helpers +impl Registry { + /// Get all pools from node `node_id` or from all nodes + pub(crate) async fn get_node_opt_pools( + &self, + node_id: Option, + ) -> Result, SvcError> { + match node_id { + None => self.get_pools_inner().await, + Some(node_id) => self.get_node_pools(&node_id).await, + } + } + + /// Get wrapper pool `pool_id` from node `node_id` + pub(crate) async fn get_node_pool_wrapper( + &self, + node_id: &NodeId, + pool_id: &PoolId, + ) -> Result { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + let pool = node.pool(pool_id).await.context(PoolNotFound { + pool_id: pool_id.clone(), + })?; + Ok(pool) + } + + /// Get pool wrapper for `pool_id` + pub(crate) async fn get_pool_wrapper( + &self, + pool_id: &PoolId, + ) -> Result { + let nodes = self.get_nodes_wrapper().await; + for node in nodes { + if let Some(pool) = node.pool(pool_id).await { + return Ok(pool); + } + } + Err(common::errors::SvcError::PoolNotFound { + pool_id: pool_id.clone(), + }) + } + + /// Get all pool wrappers + pub(crate) async fn get_pools_wrapper( + &self, + ) -> Result, SvcError> { + let nodes = self.get_nodes_wrapper().await; + let mut pools = vec![]; + for node in nodes { + pools.extend(node.pools().await); + } + Ok(pools) + } + + /// Get all pools + pub(crate) async fn get_pools_inner(&self) -> Result, SvcError> { + let nodes = self.get_pools_wrapper().await?; + Ok(nodes.iter().map(Pool::from).collect()) + } + + /// Get all pools from node `node_id` + pub(crate) async fn get_node_pools( + &self, + node_id: &NodeId, + ) -> Result, SvcError> { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + Ok(node.pools().await.iter().map(Pool::from).collect()) + } +} + +/// Replica helpers +impl Registry { + /// Get all replicas from node `node_id` or from all nodes + pub(crate) async fn get_node_opt_replicas( + &self, + node_id: Option, + ) -> Result, SvcError> { + match node_id { + None => self.get_replicas().await, + Some(node_id) => self.get_node_replicas(&node_id).await, + } + } + + /// Get all replicas + pub(crate) async fn get_replicas(&self) -> Result, SvcError> { + let nodes = self.get_pools_wrapper().await?; + Ok(nodes.iter().map(|pool| pool.replicas()).flatten().collect()) + } + + /// Get replica `replica_id` + pub(crate) async fn get_replica( + &self, + replica_id: &ReplicaId, + ) -> Result { + let replicas = self.get_replicas().await?; + let replica = replicas.iter().find(|r| &r.uuid == replica_id).context( + ReplicaNotFound { + replica_id: replica_id.clone(), + }, + )?; + Ok(replica.clone()) + } + + /// Get all replicas from node `node_id` + pub(crate) async fn get_node_replicas( + &self, + node_id: &NodeId, + ) -> Result, SvcError> { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + Ok(node.replicas().await) + } + + /// Get replica `replica_id` from node `node_id` + pub(crate) async fn get_node_replica( + &self, + node_id: &NodeId, + replica_id: &ReplicaId, + ) -> Result { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + let replica = + node.replica(replica_id).await.context(ReplicaNotFound { + replica_id: replica_id.clone(), + })?; + Ok(replica) + } + + /// Get replica `replica_id` from pool `pool_id` + pub(crate) async fn get_pool_replica( + &self, + pool_id: &PoolId, + replica_id: &ReplicaId, + ) -> Result { + let pool = self.get_pool_wrapper(pool_id).await?; + let replica = pool.replica(replica_id).context(ReplicaNotFound { + replica_id: replica_id.clone(), + })?; + Ok(replica.clone()) + } + + /// Get replica `replica_id` from pool `pool_id` on node `node_id` + pub(crate) async fn get_node_pool_replica( + &self, + node_id: &NodeId, + pool_id: &PoolId, + replica_id: &ReplicaId, + ) -> Result { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + let pool = node.pool(pool_id).await.context(PoolNotFound { + pool_id: pool_id.clone(), + })?; + let replica = pool.replica(replica_id).context(ReplicaNotFound { + replica_id: replica_id.clone(), + })?; + Ok(replica.clone()) + } +} diff --git a/control-plane/agents/core/src/pool/service.rs b/control-plane/agents/core/src/pool/service.rs new file mode 100644 index 000000000..c7bdbb62e --- /dev/null +++ b/control-plane/agents/core/src/pool/service.rs @@ -0,0 +1,213 @@ +use crate::core::{registry::Registry, wrapper::ClientOps}; +use common::errors::{NodeNotFound, SvcError}; +use mbus_api::v0::{ + CreatePool, + CreateReplica, + DestroyPool, + DestroyReplica, + Filter, + GetPools, + GetReplicas, + Pool, + Pools, + Replica, + Replicas, + ShareReplica, + UnshareReplica, +}; +use snafu::OptionExt; + +#[derive(Debug, Clone)] +pub(super) struct Service { + registry: Registry, +} + +impl Service { + pub(super) fn new(registry: Registry) -> Self { + Self { + registry, + } + } + + /// Get pools according to the filter + #[tracing::instrument(level = "debug", err)] + pub(super) async fn get_pools( + &self, + request: &GetPools, + ) -> Result { + let filter = request.filter.clone(); + Ok(Pools(match filter { + Filter::None => self.registry.get_node_opt_pools(None).await?, + Filter::Node(node_id) => { + self.registry.get_node_pools(&node_id).await? + } + Filter::NodePool(node_id, pool_id) => { + let pool = self + .registry + .get_node_pool_wrapper(&node_id, &pool_id) + .await?; + vec![pool.into()] + } + Filter::Pool(pool_id) => { + let pool = self.registry.get_pool_wrapper(&pool_id).await?; + vec![pool.into()] + } + _ => { + return Err(SvcError::InvalidFilter { + filter, + }) + } + })) + } + + /// Get replicas according to the filter + #[tracing::instrument(level = "debug", err)] + pub(super) async fn get_replicas( + &self, + request: &GetReplicas, + ) -> Result { + let filter = request.filter.clone(); + Ok(Replicas(match filter { + Filter::None => self.registry.get_node_opt_replicas(None).await?, + Filter::Node(node_id) => { + self.registry.get_node_opt_replicas(Some(node_id)).await? + } + Filter::NodePool(node_id, pool_id) => { + let pool = self + .registry + .get_node_pool_wrapper(&node_id, &pool_id) + .await?; + pool.into() + } + Filter::Pool(pool_id) => { + let pool = self.registry.get_pool_wrapper(&pool_id).await?; + pool.into() + } + Filter::NodePoolReplica(node_id, pool_id, replica_id) => { + vec![ + self.registry + .get_node_pool_replica(&node_id, &pool_id, &replica_id) + .await?, + ] + } + Filter::NodeReplica(node_id, replica_id) => { + vec![ + self.registry + .get_node_replica(&node_id, &replica_id) + .await?, + ] + } + Filter::PoolReplica(pool_id, replica_id) => { + vec![ + self.registry + .get_pool_replica(&pool_id, &replica_id) + .await?, + ] + } + Filter::Replica(replica_id) => { + vec![self.registry.get_replica(&replica_id).await?] + } + _ => { + return Err(SvcError::InvalidFilter { + filter, + }) + } + })) + } + + /// Create pool + #[tracing::instrument(level = "debug", err)] + pub(super) async fn create_pool( + &self, + request: &CreatePool, + ) -> Result { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.create_pool(request).await + } + + /// Destroy pool + #[tracing::instrument(level = "debug", err)] + pub(super) async fn destroy_pool( + &self, + request: &DestroyPool, + ) -> Result<(), SvcError> { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.destroy_pool(request).await + } + + /// Create replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.create_replica(request).await + } + + /// Destroy replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError> { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.destroy_replica(&request).await + } + + /// Share replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.share_replica(&request).await + } + + /// Unshare replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError> { + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.unshare_replica(&request).await + } +} diff --git a/control-plane/agents/core/src/server.rs b/control-plane/agents/core/src/server.rs new file mode 100644 index 000000000..3d57cd4f2 --- /dev/null +++ b/control-plane/agents/core/src/server.rs @@ -0,0 +1,176 @@ +pub mod core; +pub mod node; +pub mod pool; +pub mod volume; + +use crate::core::registry; +use common::*; +use mbus_api::v0::ChannelVs; +use structopt::StructOpt; +use tracing::info; + +#[derive(Debug, StructOpt)] +pub(crate) struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + pub(crate) nats: String, + + /// The period at which the registry updates its cache of all + /// resources from all nodes + #[structopt(long, short, default_value = "20s")] + pub(crate) cache_period: humantime::Duration, + + /// Deadline for the mayastor instance keep alive registration + /// Default: 10s + #[structopt(long, short, default_value = "10s")] + pub(crate) deadline: humantime::Duration, +} + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[tokio::main] +async fn main() { + init_tracing(); + + let cli_args = CliArgs::from_args(); + info!("Using options: {:?}", &cli_args); + + server(cli_args).await; +} + +async fn server(cli_args: CliArgs) { + Service::builder(cli_args.nats, ChannelVs::Core) + .with_default_liveness() + .connect_message_bus() + .await + .with_shared_state(registry::Registry::new( + CliArgs::from_args().cache_period.into(), + )) + .configure(node::configure) + .configure(pool::configure) + .configure(volume::configure) + .run() + .await; +} + +/// Constructs a service handler for `RequestType` which gets redirected to a +/// PoolSvc Handler named `ServiceFnName` +#[macro_export] +macro_rules! impl_request_handler { + ($RequestType:ident, $ServiceFnName:ident) => { + /// Needed so we can implement the ServiceSubscriber trait for + /// the message types external to the crate + #[derive(Clone, Default)] + struct ServiceHandler { + data: PhantomData, + } + #[async_trait] + impl common::ServiceSubscriber for ServiceHandler<$RequestType> { + async fn handler( + &self, + args: common::Arguments<'_>, + ) -> Result<(), SvcError> { + let request: ReceivedMessage<$RequestType> = + args.request.try_into()?; + + let service: &service::Service = args.context.get_state()?; + let reply = service.$ServiceFnName(&request.inner()).await?; + Ok(request.reply(reply).await?) + } + fn filter(&self) -> Vec { + vec![$RequestType::default().id()] + } + } + }; +} + +/// Constructs a service handler for `PublishType` which gets redirected to a +/// PoolSvc Handler named `ServiceFnName` +#[macro_export] +macro_rules! impl_publish_handler { + ($PublishType:ident, $ServiceFnName:ident) => { + /// Needed so we can implement the ServiceSubscriber trait for + /// the message types external to the crate + #[derive(Clone, Default)] + struct ServiceHandler { + data: PhantomData, + } + #[async_trait] + impl common::ServiceSubscriber for ServiceHandler<$PublishType> { + async fn handler( + &self, + args: common::Arguments<'_>, + ) -> Result<(), SvcError> { + let request: ReceivedMessage<$PublishType> = + args.request.try_into()?; + + let service: &service::Service = args.context.get_state()?; + service.$ServiceFnName(&request.inner()).await; + Ok(()) + } + fn filter(&self) -> Vec { + vec![$PublishType::default().id()] + } + } + }; +} + +/// Constructs and calls out to a service handler for `RequestType` which gets +/// redirected to a Service Handler where its name is either: +/// `RequestType` as a snake lowercase (default) or +/// `ServiceFn` parameter (if provided) +#[macro_export] +macro_rules! handler { + ($RequestType:ident) => {{ + paste::paste! { + impl_request_handler!( + $RequestType, + [<$RequestType:snake:lower>] + ); + } + ServiceHandler::<$RequestType>::default() + }}; + ($RequestType:ident, $ServiceFn:ident) => {{ + paste::paste! { + impl_request_handler!( + $RequestType, + $ServiceFn + ); + } + ServiceHandler::<$RequestType>::default() + }}; +} + +/// Constructs and calls out to a service handler for `RequestType` which gets +/// redirected to a Service Handler where its name is either: +/// `RequestType` as a snake lowercase (default) or +/// `ServiceFn` parameter (if provided) +#[macro_export] +macro_rules! handler_publish { + ($RequestType:ident) => {{ + paste::paste! { + impl_publish_handler!( + $RequestType, + [<$RequestType:snake:lower>] + ); + } + ServiceHandler::<$RequestType>::default() + }}; + ($RequestType:ident, $ServiceFn:ident) => {{ + paste::paste! { + impl_publish_handler!( + $RequestType, + $ServiceFn + ); + } + ServiceHandler::<$RequestType>::default() + }}; +} diff --git a/control-plane/agents/volume/src/server.rs b/control-plane/agents/core/src/volume/mod.rs similarity index 55% rename from control-plane/agents/volume/src/server.rs rename to control-plane/agents/core/src/volume/mod.rs index ced17233e..4f08b1ef7 100644 --- a/control-plane/agents/volume/src/server.rs +++ b/control-plane/agents/core/src/volume/mod.rs @@ -1,114 +1,30 @@ -pub mod service; +pub(crate) mod registry; +mod service; -use async_trait::async_trait; -use common::*; -use mbus_api::{v0::*, *}; -use service::*; use std::{convert::TryInto, marker::PhantomData}; -use structopt::StructOpt; -use tracing::info; - -#[derive(Debug, StructOpt)] -struct CliArgs { - /// The Nats Server URL to connect to - /// (supports the nats schema) - /// Default: nats://127.0.0.1:4222 - #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] - nats: String, - - /// The period at which the registry updates its cache of all - /// resources from all nodes - #[structopt(long, short, default_value = "20s")] - period: humantime::Duration, -} - -/// Needed so we can implement the ServiceSubscriber trait for -/// the message types external to the crate -#[derive(Clone, Default)] -struct ServiceHandler { - data: PhantomData, -} -macro_rules! impl_service_handler { - // RequestType is the message bus request type - // ServiceFnName is the name of the service function to route the request - // into - ($RequestType:ident, $ServiceFnName:ident) => { - #[async_trait] - impl ServiceSubscriber for ServiceHandler<$RequestType> { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let request: ReceivedMessage<$RequestType> = - args.request.try_into()?; - - let service: &VolumeSvc = args.context.get_state()?; - let reply = service - .$ServiceFnName(&request.inner()) - .await - .map_err(|error| Error::ServiceError { - message: error.full_string(), - })?; - request.reply(reply).await - } - fn filter(&self) -> Vec { - vec![$RequestType::default().id()] - } - } - }; -} - -// todo: -// a service handler can actually specify a vector of message filters so could -// indeed do the filtering at our service specific code and have a single -// entrypoint here nexus -impl_service_handler!(GetNexuses, get_nexuses); -impl_service_handler!(CreateNexus, create_nexus); -impl_service_handler!(DestroyNexus, destroy_nexus); -impl_service_handler!(ShareNexus, share_nexus); -impl_service_handler!(UnshareNexus, unshare_nexus); -impl_service_handler!(AddNexusChild, add_nexus_child); -impl_service_handler!(RemoveNexusChild, remove_nexus_child); -// volumes -impl_service_handler!(GetVolumes, get_volumes); -impl_service_handler!(CreateVolume, create_volume); -impl_service_handler!(DestroyVolume, destroy_volume); - -fn init_tracing() { - if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("info").init(); - } -} - -#[tokio::main] -async fn main() { - init_tracing(); - - let cli_args = CliArgs::from_args(); - info!("Using options: {:?}", &cli_args); - - server(cli_args).await; -} +use super::{core::registry::Registry, handler, impl_request_handler}; +use async_trait::async_trait; +use common::errors::SvcError; +use mbus_api::{v0::*, *}; -async fn server(cli_args: CliArgs) { - Service::builder(cli_args.nats, ChannelVs::Volume) - .connect() - .await - .with_shared_state(VolumeSvc::new(cli_args.period.into())) +pub(crate) fn configure(builder: common::Service) -> common::Service { + let registry = builder.get_shared_state::().clone(); + builder + .with_channel(ChannelVs::Volume) .with_default_liveness() - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) + .with_shared_state(service::Service::new(registry)) + .with_subscription(handler!(GetVolumes)) + .with_subscription(handler!(CreateVolume)) + .with_subscription(handler!(DestroyVolume)) .with_channel(ChannelVs::Nexus) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .run() - .await; + .with_subscription(handler!(GetNexuses)) + .with_subscription(handler!(CreateNexus)) + .with_subscription(handler!(DestroyNexus)) + .with_subscription(handler!(ShareNexus)) + .with_subscription(handler!(UnshareNexus)) + .with_subscription(handler!(AddNexusChild)) + .with_subscription(handler!(RemoveNexusChild)) } #[cfg(test)] @@ -124,9 +40,7 @@ mod tests { } // to avoid waiting for timeouts async fn orderly_start(test: &ComposeTest) { - test.start_containers(vec!["nats", "node", "pool", "volume"]) - .await - .unwrap(); + test.start_containers(vec!["nats", "core"]).await.unwrap(); test.connect_to_bus("nats").await; wait_for_services().await; @@ -147,25 +61,20 @@ mod tests { let test = Builder::new() .name("volume") .add_container_bin("nats", Binary::from_nix("nats-server")) - .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) - .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) - .add_container_bin( - "volume", - Binary::from_dbg("volume").with_nats("-n"), - ) + .add_container_bin("core", Binary::from_dbg("core").with_nats("-n")) .add_container_bin( "mayastor", Binary::from_dbg("mayastor") .with_nats("-n") .with_args(vec!["-N", mayastor]) - .with_args(vec!["-g", "10.1.0.6:10124"]), + .with_args(vec!["-g", "10.1.0.4:10124"]), ) .add_container_bin( "mayastor2", Binary::from_dbg("mayastor") .with_nats("-n") .with_args(vec!["-N", mayastor2]) - .with_args(vec!["-g", "10.1.0.7:10124"]), + .with_args(vec!["-g", "10.1.0.5:10124"]), ) .with_default_tracing() .autorun(false) diff --git a/control-plane/agents/core/src/volume/registry.rs b/control-plane/agents/core/src/volume/registry.rs new file mode 100644 index 000000000..66020d15e --- /dev/null +++ b/control-plane/agents/core/src/volume/registry.rs @@ -0,0 +1,72 @@ +use crate::core::{registry::Registry, wrapper::*}; +use common::errors::{NexusNotFound, NodeNotFound, SvcError}; +use mbus_api::v0::{Nexus, NexusId, NodeId}; +use snafu::OptionExt; + +/// Nexus helpers +impl Registry { + /// Get all nexuses from node `node_id` or from all nodes + pub(crate) async fn get_node_opt_nexuses( + &self, + node_id: Option, + ) -> Result, SvcError> { + Ok(match node_id { + None => self.get_nexuses().await, + Some(node_id) => self.get_node_nexuses(&node_id).await?, + }) + } + + /// Get all nexuses from node `node_id` + pub(crate) async fn get_node_nexuses( + &self, + node_id: &NodeId, + ) -> Result, SvcError> { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + Ok(node.nexuses().await) + } + + /// Get nexus `nexus_id` from node `node_id` + pub(crate) async fn get_node_nexus( + &self, + node_id: &NodeId, + nexus_id: &NexusId, + ) -> Result { + let node = + self.get_node_wrapper(node_id).await.context(NodeNotFound { + node_id: node_id.clone(), + })?; + let nexus = node.nexus(nexus_id).await.context(NexusNotFound { + nexus_id: nexus_id.clone(), + })?; + Ok(nexus) + } + + /// Get nexus `nexus_id` + pub(crate) async fn get_nexus( + &self, + nexus_id: &NexusId, + ) -> Result { + let nodes = self.get_nodes_wrapper().await; + for node in nodes { + if let Some(nexus) = node.nexus(nexus_id).await { + return Ok(nexus); + } + } + Err(common::errors::SvcError::NexusNotFound { + nexus_id: nexus_id.to_string(), + }) + } + + /// Get all nexuses + pub(crate) async fn get_nexuses(&self) -> Vec { + let nodes = self.get_nodes_wrapper().await; + let mut nexuses = vec![]; + for node in nodes { + nexuses.extend(node.nexuses().await); + } + nexuses + } +} diff --git a/control-plane/agents/volume/src/service.rs b/control-plane/agents/core/src/volume/service.rs similarity index 65% rename from control-plane/agents/volume/src/service.rs rename to control-plane/agents/core/src/volume/service.rs index 5c455e3c7..ef4b3f9d5 100644 --- a/control-plane/agents/volume/src/service.rs +++ b/control-plane/agents/core/src/volume/service.rs @@ -1,37 +1,46 @@ -#![allow(clippy::unit_arg)] +use crate::core::{registry::Registry, wrapper::ClientOps}; +use common::errors::{NodeNotFound, NotEnough, SvcError}; +use mbus_api::{ + v0::{ + AddNexusChild, + Child, + CreateNexus, + CreateReplica, + CreateVolume, + DestroyNexus, + DestroyReplica, + DestroyVolume, + Filter, + GetNexuses, + GetVolumes, + Nexus, + NexusId, + NexusState, + Nexuses, + PoolState, + Protocol, + RemoveNexusChild, + ReplicaId, + ShareNexus, + UnshareNexus, + Volume, + VolumeId, + Volumes, + }, + ErrorChain, +}; +use snafu::OptionExt; -use super::*; -use common::wrapper::v0::*; - -/// Volume service implementation methods -#[derive(Clone, Debug, Default)] -pub(super) struct VolumeSvc { - registry: Registry, +#[derive(Debug, Clone)] +pub(super) struct Service { + registry: Registry, } -impl VolumeSvc { - /// New Service with the update `period` - pub fn new(period: std::time::Duration) -> Self { - let obj = Self { - registry: Registry::new(period), - }; - obj.start(); - obj - } - /// Start registry poller - fn start(&self) { - self.registry.start(); - } - - /// Get all nexuses from node or from all nodes - async fn get_node_nexuses( - &self, - node_id: Option, - ) -> Result, SvcError> { - Ok(match node_id { - None => self.registry.list_nexuses().await, - Some(node_id) => self.registry.list_node_nexuses(&node_id).await, - }) +impl Service { + pub(super) fn new(registry: Registry) -> Self { + Self { + registry, + } } /// Get nexuses according to the filter @@ -42,25 +51,18 @@ impl VolumeSvc { ) -> Result { let filter = request.filter.clone(); let nexuses = match filter { - Filter::None => self.get_node_nexuses(None).await?, + Filter::None => self.registry.get_node_opt_nexuses(None).await?, Filter::Node(node_id) => { - self.get_node_nexuses(Some(node_id)).await? + self.registry.get_node_nexuses(&node_id).await? } Filter::NodeNexus(node_id, nexus_id) => { - let nexuses = self.get_node_nexuses(Some(node_id)).await?; - nexuses - .iter() - .filter(|&n| n.uuid == nexus_id) - .cloned() - .collect() + let nexus = + self.registry.get_node_nexus(&node_id, &nexus_id).await?; + vec![nexus] } Filter::Nexus(nexus_id) => { - let nexuses = self.get_node_nexuses(None).await?; - nexuses - .iter() - .filter(|&n| n.uuid == nexus_id) - .cloned() - .collect() + let nexus = self.registry.get_nexus(&nexus_id).await?; + vec![nexus] } _ => { return Err(SvcError::InvalidFilter { @@ -77,7 +79,14 @@ impl VolumeSvc { &self, request: &CreateNexus, ) -> Result { - self.registry.create_nexus(request).await + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.create_nexus(request).await } /// Destroy nexus @@ -86,7 +95,14 @@ impl VolumeSvc { &self, request: &DestroyNexus, ) -> Result<(), SvcError> { - self.registry.destroy_nexus(request).await + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.destroy_nexus(request).await } /// Share nexus @@ -95,7 +111,14 @@ impl VolumeSvc { &self, request: &ShareNexus, ) -> Result { - self.registry.share_nexus(request).await + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.share_nexus(request).await } /// Unshare nexus @@ -104,7 +127,14 @@ impl VolumeSvc { &self, request: &UnshareNexus, ) -> Result<(), SvcError> { - self.registry.unshare_nexus(request).await + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.unshare_nexus(request).await } /// Add nexus child @@ -113,7 +143,14 @@ impl VolumeSvc { &self, request: &AddNexusChild, ) -> Result { - self.registry.add_nexus_child(request).await + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.add_child(request).await } /// Remove nexus child @@ -122,7 +159,14 @@ impl VolumeSvc { &self, request: &RemoveNexusChild, ) -> Result<(), SvcError> { - self.registry.remove_nexus_child(request).await + let node = self + .registry + .get_node_wrapper(&request.node) + .await + .context(NodeNotFound { + node_id: request.node.clone(), + })?; + node.remove_child(request).await } /// Get volumes @@ -131,7 +175,7 @@ impl VolumeSvc { &self, request: &GetVolumes, ) -> Result { - let nexus = self.registry.list_nexuses().await; + let nexus = self.registry.get_nexuses().await; Ok(Volumes( nexus .iter() @@ -152,7 +196,7 @@ impl VolumeSvc { request: &CreateVolume, ) -> Result { // should we just use the cache here? - let pools = self.registry.fetch_pools_wrapper().await; + let pools = self.registry.get_pools_wrapper().await?; let size = request.size; let replicas = request.replicas; @@ -164,9 +208,7 @@ impl VolumeSvc { } if request.nexuses > 1 { - tracing::warn!( - "Multiple nexus per volume is not currently working" - ); + panic!("ANA volumes is not currently supported"); } // filter pools according to the following criteria (any order): @@ -180,7 +222,7 @@ impl VolumeSvc { .iter() .filter(|&p| { // required nodes, if any - allowed_nodes.is_empty() || allowed_nodes.contains(&p.node()) + allowed_nodes.is_empty() || allowed_nodes.contains(&p.node) }) .filter(|&p| { // enough free space @@ -188,8 +230,7 @@ impl VolumeSvc { }) .filter(|&p| { // but preferably (the sort will sort this out for us) - p.state() != PoolState::Faulted - && p.state() != PoolState::Unknown + p.state != PoolState::Faulted && p.state != PoolState::Unknown }) .collect::>(); @@ -209,9 +250,9 @@ impl VolumeSvc { let mut replicas = vec![]; while let Some(pool) = pools.pop() { let create_replica = CreateReplica { - node: pool.node(), + node: pool.node.clone(), uuid: ReplicaId::from(request.uuid.as_str()), - pool: pool.uuid(), + pool: pool.id.clone(), size: request.size, thin: true, share: if replicas.is_empty() { @@ -224,7 +265,14 @@ impl VolumeSvc { Protocol::Nvmf }, }; - let replica = self.registry.create_replica(&create_replica).await; + let node = self + .registry + .get_node_wrapper(&create_replica.node) + .await + .context(NodeNotFound { + node_id: create_replica.node.clone(), + })?; + let replica = node.create_replica(&create_replica).await; if let Ok(replica) = replica { replicas.push(replica); } else { @@ -257,7 +305,7 @@ impl VolumeSvc { .collect(), }; - match self.registry.create_nexus(&create_nexus).await { + match self.create_nexus(&create_nexus).await { Ok(nexus) => { nexuses.push(nexus); } @@ -305,31 +353,37 @@ impl VolumeSvc { &self, request: &DestroyVolume, ) -> Result<(), SvcError> { - let nexuses = self.registry.list_nexuses().await; + let nexuses = self.registry.get_nexuses().await; let nexuses = nexuses .iter() .filter(|n| n.uuid.as_str() == request.uuid.as_str()) .collect::>(); + for nexus in nexuses { - self.registry - .destroy_nexus(&DestroyNexus { - node: nexus.node.clone(), - uuid: NexusId::from(request.uuid.as_str()), - }) - .await?; + self.destroy_nexus(&DestroyNexus { + node: nexus.node.clone(), + uuid: NexusId::from(request.uuid.as_str()), + }) + .await?; for child in &nexus.children { - let replicas = self.registry.list_replicas().await; + let replicas = self.registry.get_replicas().await?; let replica = replicas .iter() .find(|r| r.uri.as_str() == child.uri.as_str()); if let Some(replica) = replica { - self.registry - .destroy_replica(&DestroyReplica { - node: replica.node.clone(), - pool: replica.pool.clone(), - uuid: replica.uuid.clone(), - }) - .await?; + let node = self + .registry + .get_node_wrapper(&replica.node) + .await + .context(NodeNotFound { + node_id: replica.node.clone(), + })?; + node.destroy_replica(&DestroyReplica { + node: replica.node.clone(), + pool: replica.pool.clone(), + uuid: replica.uuid.clone(), + }) + .await?; } } } diff --git a/control-plane/agents/examples/service/main.rs b/control-plane/agents/examples/service/main.rs index 4417743a5..c23746430 100644 --- a/control-plane/agents/examples/service/main.rs +++ b/control-plane/agents/examples/service/main.rs @@ -1,6 +1,6 @@ use async_trait::async_trait; -use common::*; -use mbus_api::*; +use common::{errors::SvcError, *}; +use mbus_api::{v0, *}; use serde::{Deserialize, Serialize}; use std::{convert::TryInto, marker::PhantomData}; use structopt::StructOpt; @@ -35,14 +35,14 @@ bus_impl_message_all!(GetSvcName, Default, SvcName, Default); #[async_trait] impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), SvcError> { let msg: ReceivedMessage = args.request.try_into()?; let reply = SvcName("example".into()); println!("Received {:?} and replying {:?}", msg.inner(), reply); - msg.reply(reply).await + Ok(msg.reply(reply).await?) } fn filter(&self) -> Vec { vec![GetSvcName::default().id()] diff --git a/control-plane/agents/jsongrpc/src/server.rs b/control-plane/agents/jsongrpc/src/server.rs index cddd36039..5158c2c40 100644 --- a/control-plane/agents/jsongrpc/src/server.rs +++ b/control-plane/agents/jsongrpc/src/server.rs @@ -1,7 +1,7 @@ pub mod service; use async_trait::async_trait; -use common::*; +use common::{errors::SvcError, *}; use mbus_api::{v0::*, *}; use service::*; use std::{convert::TryInto, marker::PhantomData}; @@ -31,16 +31,16 @@ macro_rules! impl_service_handler { ($RequestType:ident, $ServiceFnName:ident) => { #[async_trait] impl ServiceSubscriber for ServiceHandler<$RequestType> { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + async fn handler( + &self, + args: Arguments<'_>, + ) -> Result<(), SvcError> { let request: ReceivedMessage<$RequestType> = args.request.try_into()?; - let reply = JsonGrpcSvc::$ServiceFnName(&request.inner()) - .await - .map_err(|error| Error::ServiceError { - message: error.full_string(), - })?; - request.reply(reply).await + let reply = + JsonGrpcSvc::$ServiceFnName(&request.inner()).await?; + Ok(request.reply(reply).await?) } fn filter(&self) -> Vec { vec![$RequestType::default().id()] @@ -71,7 +71,7 @@ async fn main() { async fn server(cli_args: CliArgs) { Service::builder(cli_args.nats, ChannelVs::JsonGrpc) - .connect() + .connect_message_bus() .await .with_subscription(ServiceHandler::::default()) .with_default_liveness() diff --git a/control-plane/agents/jsongrpc/src/service.rs b/control-plane/agents/jsongrpc/src/service.rs index 1235058de..cbcff2ea6 100644 --- a/control-plane/agents/jsongrpc/src/service.rs +++ b/control-plane/agents/jsongrpc/src/service.rs @@ -2,7 +2,7 @@ #![allow(clippy::unit_arg)] use ::rpc::mayastor::{JsonRpcReply, JsonRpcRequest}; -use common::wrapper::v0::{BusGetNode, JsonRpcDeserialise, SvcError}; +use common::errors::{BusGetNode, JsonRpcDeserialise, SvcError}; use mbus_api::message_bus::v0::{MessageBus, *}; use rpc::mayastor::json_rpc_client::JsonRpcClient; use snafu::ResultExt; diff --git a/control-plane/agents/node/src/server.rs b/control-plane/agents/node/src/server.rs deleted file mode 100644 index 254af7e81..000000000 --- a/control-plane/agents/node/src/server.rs +++ /dev/null @@ -1,367 +0,0 @@ -use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; - -use async_trait::async_trait; -use structopt::StructOpt; -use tokio::sync::Mutex; -use tracing::{error, info}; - -use ::rpc::mayastor::{ - mayastor_client::MayastorClient, - ListBlockDevicesRequest, -}; -use common::{wrapper::v0::msg_translation::RpcToMessageBus, *}; -use mbus_api::{v0::*, *}; - -#[derive(Debug, StructOpt)] -struct CliArgs { - /// The Nats Server URL to connect to - /// (supports the nats schema) - /// Default: nats://127.0.0.1:4222 - #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] - nats: String, - /// Deadline for the mayastor instance keep alive registration - /// Default: 10s - #[structopt(long, short, default_value = "10s")] - deadline: humantime::Duration, -} - -/// Needed so we can implement the ServiceSubscriber trait for -/// the message types external to the crate -#[derive(Clone, Default)] -struct ServiceHandler { - data: PhantomData, -} - -/// Watchdog with which must be pet within the deadline, otherwise -/// it triggers the `on_timeout` future -#[derive(Clone)] -struct Watchdog { - deadline: std::time::Duration, - pet_chan: tokio::sync::mpsc::Sender<()>, -} - -impl Watchdog { - /// new empty watchdog with a timeout - pub fn new(deadline: std::time::Duration) -> Self { - Self { - deadline, - pet_chan: tokio::sync::mpsc::channel(1).0, - } - } - - /// arm watchdog with self timeout and execute error callback if - /// the deadline is not met - pub fn arm(&mut self, on_timeout: T) - where - T: std::future::Future + Send + 'static, - T::Output: Send + 'static, - { - let deadline = self.deadline; - let (s, mut r) = tokio::sync::mpsc::channel(1); - self.pet_chan = s; - tokio::spawn(async move { - let result = tokio::time::timeout(deadline, r.recv()).await; - if result.is_err() { - on_timeout.await; - } - }); - } - - /// meet the deadline - #[allow(dead_code)] - pub async fn pet( - &mut self, - ) -> Result<(), tokio::sync::mpsc::error::SendError<()>> { - self.pet_chan.send(()).await - } -} - -/// In memory database of all nodes which we know of and their state -#[derive(Default, Clone)] -struct NodeStore { - inner: std::sync::Arc, -} -struct NodeStoreInner { - state: Mutex>, - deadline: std::time::Duration, -} -impl Default for NodeStoreInner { - fn default() -> Self { - Self { - deadline: CliArgs::from_args().deadline.into(), - state: Default::default(), - } - } -} - -impl NodeStore { - /// Register a new node through the register information - async fn register(&self, registration: Register) { - let mut state = self.inner.state.lock().await; - - let mut watchdog = Watchdog::new(self.inner.deadline); - let id = registration.id.clone(); - let store = self.clone(); - let deadline = self.inner.deadline; - watchdog.arm(async move { - error!( - "Node id {} missed the registration deadline of {:?}!", - id, deadline - ); - store.offline(id).await; - }); - - let id = registration.id.clone(); - let node = Node { - id: registration.id, - grpc_endpoint: registration.grpc_endpoint, - state: NodeState::Online, - }; - state.insert(id, (node, watchdog)); - } - /// Deregister a node through the deregister information - async fn deregister(&self, node: Deregister) { - let mut state = self.inner.state.lock().await; - state.remove(&node.id); - } - /// Offline node through its id - async fn offline(&self, id: NodeId) { - let mut state = self.inner.state.lock().await; - if let Some(n) = state.get_mut(&id) { - n.0.state = NodeState::Offline; - } - } - /// Get the list of nodes which we know of - async fn get_nodes(&self) -> Vec { - let nodes = self.inner.state.lock().await; - nodes - .values() - .cloned() - .collect::>() - .into_iter() - .map(|(n, _)| n) - .collect() - } -} - -#[async_trait] -impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let store: &NodeStore = args.context.get_state()?; - store.register(args.request.inner()?).await; - Ok(()) - } - fn filter(&self) -> Vec { - vec![Register::default().id()] - } -} - -#[async_trait] -impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let store: &NodeStore = args.context.get_state()?; - store.deregister(args.request.inner()?).await; - Ok(()) - } - fn filter(&self) -> Vec { - vec![Deregister::default().id()] - } -} - -#[async_trait] -impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let request: ReceivedMessage = args.request.try_into()?; - - let store: &NodeStore = args.context.get_state()?; - let nodes = store.get_nodes().await; - request.reply(Nodes(nodes)).await - } - fn filter(&self) -> Vec { - vec![GetNodes::default().id()] - } -} - -#[async_trait] -impl ServiceSubscriber for ServiceHandler { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let request: ReceivedMessage = - args.request.try_into()?; - let store: &NodeStore = args.context.get_state()?; - let nodes = store - .get_nodes() - .await - .into_iter() - .filter(|n| n.id == request.inner().node) - .collect::>(); - - if nodes.is_empty() { - return Err(Error::ServiceError { - message: format!( - "Node with id {} not found", - request.inner().node - ), - }); - } - - // Only expect one node to match the given ID. - assert_eq!(nodes.len(), 1); - - let mut client = MayastorClient::connect(format!( - "http://{}", - nodes[0].grpc_endpoint - )) - .await - .unwrap(); - - // Issue the gRPC request - let response = client - .list_block_devices(ListBlockDevicesRequest { - all: request.inner().all, - }) - .await - .unwrap() - .into_inner(); - - // Convert the rpc types into message bus types before sending the - // reply. - let bdevs = response - .devices - .iter() - .map(|rpc_bdev| rpc_bdev.to_mbus()) - .collect::>(); - request.reply(BlockDevices(bdevs)).await - } - - fn filter(&self) -> Vec { - vec![GetBlockDevices::default().id()] - } -} - -fn init_tracing() { - if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("info").init(); - } -} - -#[tokio::main] -async fn main() { - init_tracing(); - - let cli_args = CliArgs::from_args(); - info!("Using options: {:?}", &cli_args); - - server(cli_args).await; -} - -async fn server(cli_args: CliArgs) { - Service::builder(cli_args.nats, ChannelVs::Registry) - .with_shared_state(NodeStore::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_channel(ChannelVs::Node) - .with_default_liveness() - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .run() - .await; -} - -#[cfg(test)] -mod tests { - use super::*; - use composer::*; - use rpc::mayastor::Null; - - async fn bus_init() -> Result<(), Box> { - tokio::time::timeout(std::time::Duration::from_secs(2), async { - mbus_api::message_bus_init("10.1.0.2".into()).await - }) - .await?; - Ok(()) - } - async fn wait_for_node() -> Result<(), Box> { - let _ = GetNodes {}.request().await?; - Ok(()) - } - fn init_tracing() { - if let Ok(filter) = - tracing_subscriber::EnvFilter::try_from_default_env() - { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("info").init(); - } - } - // to avoid waiting for timeouts - async fn orderly_start( - test: &ComposeTest, - ) -> Result<(), Box> { - test.start_containers(vec!["nats", "node"]).await?; - - bus_init().await?; - wait_for_node().await?; - - test.start("mayastor").await?; - - let mut hdl = test.grpc_handle("mayastor").await?; - hdl.mayastor.list_nexus(Null {}).await?; - Ok(()) - } - - #[tokio::test] - async fn node() { - init_tracing(); - let maya_name = NodeId::from("node-test-name"); - let test = Builder::new() - .name("node") - .add_container_bin( - "nats", - Binary::from_nix("nats-server").with_arg("-DV"), - ) - .add_container_bin( - "node", - Binary::from_dbg("node") - .with_nats("-n") - .with_args(vec!["-d", "2sec"]), - ) - .add_container_bin( - "mayastor", - Binary::from_dbg("mayastor") - .with_nats("-n") - .with_args(vec!["-N", maya_name.as_str()]), - ) - .autorun(false) - .build() - .await - .unwrap(); - - orderly_start(&test).await.unwrap(); - - let nodes = GetNodes {}.request().await.unwrap(); - tracing::info!("Nodes: {:?}", nodes); - assert_eq!(nodes.0.len(), 1); - assert_eq!( - nodes.0.first().unwrap(), - &Node { - id: maya_name.clone(), - grpc_endpoint: "0.0.0.0:10124".to_string(), - state: NodeState::Online, - } - ); - tokio::time::delay_for(std::time::Duration::from_secs(2)).await; - let nodes = GetNodes {}.request().await.unwrap(); - tracing::info!("Nodes: {:?}", nodes); - assert_eq!(nodes.0.len(), 1); - assert_eq!( - nodes.0.first().unwrap(), - &Node { - id: maya_name.clone(), - grpc_endpoint: "0.0.0.0:10124".to_string(), - state: NodeState::Offline, - } - ); - } -} diff --git a/control-plane/agents/pool/src/server.rs b/control-plane/agents/pool/src/server.rs deleted file mode 100644 index c50c3d141..000000000 --- a/control-plane/agents/pool/src/server.rs +++ /dev/null @@ -1,241 +0,0 @@ -pub mod service; - -use async_trait::async_trait; -use common::*; -use mbus_api::{v0::*, *}; -use service::*; -use std::{convert::TryInto, marker::PhantomData}; -use structopt::StructOpt; -use tracing::info; - -#[derive(Debug, StructOpt)] -struct CliArgs { - /// The Nats Server URL to connect to - /// (supports the nats schema) - /// Default: nats://127.0.0.1:4222 - #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] - nats: String, - - /// The period at which the registry updates its cache of all - /// resources from all nodes - #[structopt(long, short, default_value = "20s")] - period: humantime::Duration, -} - -/// Needed so we can implement the ServiceSubscriber trait for -/// the message types external to the crate -#[derive(Clone, Default)] -struct ServiceHandler { - data: PhantomData, -} - -macro_rules! impl_service_handler { - // RequestType is the message bus request type - // ServiceFnName is the name of the service function to route the request - // into - ($RequestType:ident, $ServiceFnName:ident) => { - #[async_trait] - impl ServiceSubscriber for ServiceHandler<$RequestType> { - async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let request: ReceivedMessage<$RequestType> = - args.request.try_into()?; - - let service: &PoolSvc = args.context.get_state()?; - let reply = service - .$ServiceFnName(&request.inner()) - .await - .map_err(|error| Error::ServiceError { - message: error.full_string(), - })?; - request.reply(reply).await - } - fn filter(&self) -> Vec { - vec![$RequestType::default().id()] - } - } - }; -} - -// todo: -// a service handler can actually specify a vector of message filters so could -// indeed do the filtering at our service specific code and have a single -// entrypoint here nexus -impl_service_handler!(GetPools, get_pools); -impl_service_handler!(GetReplicas, get_replicas); -impl_service_handler!(CreatePool, create_pool); -impl_service_handler!(DestroyPool, destroy_pool); -impl_service_handler!(CreateReplica, create_replica); -impl_service_handler!(DestroyReplica, destroy_replica); -impl_service_handler!(ShareReplica, share_replica); -impl_service_handler!(UnshareReplica, unshare_replica); - -fn init_tracing() { - if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("info").init(); - } -} - -#[tokio::main] -async fn main() { - init_tracing(); - - let cli_args = CliArgs::from_args(); - info!("Using options: {:?}", &cli_args); - - server(cli_args).await; -} - -async fn server(cli_args: CliArgs) { - Service::builder(cli_args.nats, ChannelVs::Pool) - .connect() - .await - .with_shared_state(PoolSvc::new(cli_args.period.into())) - .with_default_liveness() - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .with_subscription(ServiceHandler::::default()) - .run() - .await; -} - -#[cfg(test)] -mod tests { - use super::*; - use composer::*; - use rpc::mayastor::Null; - - async fn wait_for_services() { - let _ = GetNodes {}.request().await.unwrap(); - Liveness {}.request_on(ChannelVs::Pool).await.unwrap(); - } - // to avoid waiting for timeouts - async fn orderly_start(test: &ComposeTest) { - test.start_containers(vec!["nats", "node", "pool"]) - .await - .unwrap(); - - test.connect_to_bus("nats").await; - wait_for_services().await; - - test.start("mayastor").await.unwrap(); - - let mut hdl = test.grpc_handle("mayastor").await.unwrap(); - hdl.mayastor.list_nexus(Null {}).await.unwrap(); - } - - #[tokio::test] - async fn pool() { - let mayastor = "pool-test-name"; - let test = Builder::new() - .name("pool") - .add_container_bin( - "nats", - Binary::from_nix("nats-server").with_arg("-DV"), - ) - .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) - .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) - .add_container_bin( - "mayastor", - Binary::from_dbg("mayastor") - .with_nats("-n") - .with_args(vec!["-N", mayastor]) - .with_args(vec!["-g", "10.1.0.5:10124"]), - ) - .with_default_tracing() - .autorun(false) - .build() - .await - .unwrap(); - - orderly_start(&test).await; - - let nodes = GetNodes {}.request().await.unwrap(); - tracing::info!("Nodes: {:?}", nodes); - - CreatePool { - node: mayastor.into(), - id: "pooloop".into(), - disks: vec!["malloc:///disk0?size_mb=100".into()], - } - .request() - .await - .unwrap(); - - let pools = GetPools::default().request().await.unwrap(); - tracing::info!("Pools: {:?}", pools); - - let replica = CreateReplica { - node: mayastor.into(), - uuid: "replica1".into(), - pool: "pooloop".into(), - size: 12582912, /* actual size will be a multiple of 4MB so just - * create it like so */ - thin: true, - share: Protocol::Off, - } - .request() - .await - .unwrap(); - - let replicas = GetReplicas::default().request().await.unwrap(); - tracing::info!("Replicas: {:?}", replicas); - - assert_eq!( - replica, - Replica { - node: mayastor.into(), - uuid: "replica1".into(), - pool: "pooloop".into(), - thin: false, - size: 12582912, - share: Protocol::Off, - uri: "bdev:///replica1".into() - } - ); - - let uri = ShareReplica { - node: mayastor.into(), - uuid: "replica1".into(), - pool: "pooloop".into(), - protocol: Protocol::Nvmf, - } - .request() - .await - .unwrap(); - - let mut replica_updated = replica; - replica_updated.uri = uri; - replica_updated.share = Protocol::Nvmf; - let replica = GetReplicas::default().request().await.unwrap(); - let replica = replica.0.first().unwrap(); - assert_eq!(replica, &replica_updated); - - DestroyReplica { - node: mayastor.into(), - uuid: "replica1".into(), - pool: "pooloop".into(), - } - .request() - .await - .unwrap(); - - assert!(GetReplicas::default().request().await.unwrap().0.is_empty()); - - DestroyPool { - node: mayastor.into(), - id: "pooloop".into(), - } - .request() - .await - .unwrap(); - - assert!(GetPools::default().request().await.unwrap().0.is_empty()); - } -} diff --git a/control-plane/agents/pool/src/service.rs b/control-plane/agents/pool/src/service.rs deleted file mode 100644 index 6164a6646..000000000 --- a/control-plane/agents/pool/src/service.rs +++ /dev/null @@ -1,196 +0,0 @@ -// clippy warning caused by the instrument macro -#![allow(clippy::unit_arg)] - -use super::*; -use common::wrapper::v0::*; - -/// Pool service implementation methods -#[derive(Clone, Debug, Default)] -pub(super) struct PoolSvc { - registry: Registry, -} - -impl PoolSvc { - /// New Service with the update `period` - pub fn new(period: std::time::Duration) -> Self { - let obj = Self { - registry: Registry::new(period), - }; - obj.start(); - obj - } - /// Start registry poller - fn start(&self) { - self.registry.start(); - } - - /// Get all pools from node or from all nodes - async fn get_node_pools( - &self, - node_id: Option, - ) -> Result, SvcError> { - Ok(match node_id { - None => self.registry.list_pools().await, - Some(node_id) => self.registry.list_node_pools(&node_id).await, - }) - } - - /// Get all replicas from node or from all nodes - async fn get_node_replicas( - &self, - node_id: Option, - ) -> Result, SvcError> { - Ok(match node_id { - None => self.registry.list_replicas().await, - Some(node_id) => self.registry.list_node_replicas(&node_id).await, - }) - } - - /// Get pools according to the filter - #[tracing::instrument(level = "debug", err)] - pub(super) async fn get_pools( - &self, - request: &GetPools, - ) -> Result { - let filter = request.filter.clone(); - Ok(Pools(match filter { - Filter::None => self.get_node_pools(None).await?, - Filter::Node(node_id) => self.get_node_pools(Some(node_id)).await?, - Filter::NodePool(node_id, pool_id) => { - let pools = self.get_node_pools(Some(node_id)).await?; - pools.iter().filter(|&p| p.id == pool_id).cloned().collect() - } - Filter::Pool(pool_id) => { - let pools = self.get_node_pools(None).await?; - pools.iter().filter(|&p| p.id == pool_id).cloned().collect() - } - _ => { - return Err(SvcError::InvalidFilter { - filter, - }) - } - })) - } - - /// Get replicas according to the filter - #[tracing::instrument(level = "debug", err)] - pub(super) async fn get_replicas( - &self, - request: &GetReplicas, - ) -> Result { - let filter = request.filter.clone(); - Ok(Replicas(match filter { - Filter::None => self.get_node_replicas(None).await?, - Filter::Node(node_id) => { - self.get_node_replicas(Some(node_id)).await? - } - Filter::NodePool(node_id, pool_id) => { - let replicas = self.get_node_replicas(Some(node_id)).await?; - replicas - .iter() - .filter(|&p| p.pool == pool_id) - .cloned() - .collect() - } - Filter::Pool(pool_id) => { - let replicas = self.get_node_replicas(None).await?; - replicas - .iter() - .filter(|&p| p.pool == pool_id) - .cloned() - .collect() - } - Filter::NodePoolReplica(node_id, pool_id, replica_id) => { - let replicas = self.get_node_replicas(Some(node_id)).await?; - replicas - .iter() - .filter(|&p| p.pool == pool_id && p.uuid == replica_id) - .cloned() - .collect() - } - Filter::NodeReplica(node_id, replica_id) => { - let replicas = self.get_node_replicas(Some(node_id)).await?; - replicas - .iter() - .filter(|&p| p.uuid == replica_id) - .cloned() - .collect() - } - Filter::PoolReplica(pool_id, replica_id) => { - let replicas = self.get_node_replicas(None).await?; - replicas - .iter() - .filter(|&p| p.pool == pool_id && p.uuid == replica_id) - .cloned() - .collect() - } - Filter::Replica(replica_id) => { - let replicas = self.get_node_replicas(None).await?; - replicas - .iter() - .filter(|&p| p.uuid == replica_id) - .cloned() - .collect() - } - _ => { - return Err(SvcError::InvalidFilter { - filter, - }) - } - })) - } - - /// Create replica - #[tracing::instrument(level = "debug", err)] - pub(super) async fn create_replica( - &self, - request: &CreateReplica, - ) -> Result { - self.registry.create_replica(&request).await - } - - /// Destroy replica - #[tracing::instrument(level = "debug", err)] - pub(super) async fn destroy_replica( - &self, - request: &DestroyReplica, - ) -> Result<(), SvcError> { - self.registry.destroy_replica(&request).await - } - - /// Share replica - #[tracing::instrument(level = "debug", err)] - pub(super) async fn share_replica( - &self, - request: &ShareReplica, - ) -> Result { - self.registry.share_replica(&request).await - } - - /// Unshare replica - #[tracing::instrument(level = "debug", err)] - pub(super) async fn unshare_replica( - &self, - request: &UnshareReplica, - ) -> Result<(), SvcError> { - self.registry.unshare_replica(&request).await - } - - /// Create pool - #[tracing::instrument(level = "debug", err)] - pub(super) async fn create_pool( - &self, - request: &CreatePool, - ) -> Result { - self.registry.create_pool(request).await - } - - /// Destroy pool - #[tracing::instrument(level = "debug", err)] - pub(super) async fn destroy_pool( - &self, - request: &DestroyPool, - ) -> Result<(), SvcError> { - self.registry.destroy_pool(request).await - } -} diff --git a/control-plane/deployer/src/infra/mod.rs b/control-plane/deployer/src/infra/mod.rs index 385bfb10f..93fc8b05a 100644 --- a/control-plane/deployer/src/infra/mod.rs +++ b/control-plane/deployer/src/infra/mod.rs @@ -327,12 +327,10 @@ impl_component! { Dns, 1, Jaeger, 1, Rest, 2, - Node, 3, - Pool, 4, - Volume, 4, + Core, 3, JsonGrpc, 4, Mayastor, 5, } // Message Bus Control Plane Agents -impl_ctrlp_agents!(Node, Pool, Volume, JsonGrpc); +impl_ctrlp_agents!(Core, JsonGrpc); diff --git a/control-plane/deployer/src/lib.rs b/control-plane/deployer/src/lib.rs index 50586aa45..cb75aa8d0 100644 --- a/control-plane/deployer/src/lib.rs +++ b/control-plane/deployer/src/lib.rs @@ -47,7 +47,7 @@ pub struct ListOptions { } pub fn default_agents() -> &'static str { - "Node, Pool, Volume" + "Core" } #[derive(Debug, Default, Clone, StructOpt)] diff --git a/control-plane/mbus-api/examples/server/main.rs b/control-plane/mbus-api/examples/server/main.rs index ab77758b9..f24e0cb7b 100644 --- a/control-plane/mbus-api/examples/server/main.rs +++ b/control-plane/mbus-api/examples/server/main.rs @@ -120,8 +120,11 @@ async fn receive_v3(sub: &mut nats::asynk::Subscription, count: u64) { message.try_into().unwrap(); message // same function can receive an error - .reply(Err(ReplyError::WithMessage { - message: format!("Fake Error {}", count), + .reply(Err(ReplyError { + kind: ReplyErrorKind::WithMessage, + resource: ResourceKind::Unknown, + source: "".to_string(), + extra: format!("Fake Error {}", count), })) .await .unwrap(); diff --git a/control-plane/mbus-api/src/lib.rs b/control-plane/mbus-api/src/lib.rs index 8e2f4208a..9c3df523d 100644 --- a/control-plane/mbus-api/src/lib.rs +++ b/control-plane/mbus-api/src/lib.rs @@ -23,10 +23,11 @@ pub use mbus_nats::{ }; pub use receive::*; pub use send::*; -use serde::{Deserialize, Serialize}; +use serde::{de::StdError, Deserialize, Serialize}; use smol::io; use snafu::{ResultExt, Snafu}; use std::{fmt::Debug, marker::PhantomData, str::FromStr, time::Duration}; +use strum_macros::{AsRefStr, ToString}; /// Result wrapper for send/receive pub type BusResult = Result; @@ -107,8 +108,6 @@ pub enum Error { Subscribe { channel: String, source: io::Error }, #[snafu(display("Reply message came back with an error"))] ReplyWithError { source: ReplyError }, - #[snafu(display("Service error whilst handling request: {}", message))] - ServiceError { message: String }, } /// Report error chain @@ -284,17 +283,113 @@ struct SendPayload { pub(crate) data: T, } +/// All the different variants of Resources +#[derive(Serialize, Deserialize, Debug, Clone, AsRefStr, ToString)] +pub enum ResourceKind { + /// Unknown or unspecified resource + Unknown, + /// Node resource + Node, + /// Pool resource + Pool, + /// Replica resource + Replica, + /// Nexus resource + Nexus, + /// Child resource + Child, + /// Volume resource + Volume, + /// Json Grpc methods + JsonGrpc, + /// Block devices + Block, +} + /// Error type which is returned over the bus /// for any other operation -#[derive(Serialize, Deserialize, Debug, Snafu, strum_macros::AsRefStr)] +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct ReplyError { + /// error kind + pub kind: ReplyErrorKind, + /// resource kind + pub resource: ResourceKind, + /// last source of this error + pub source: String, + /// extra information + pub extra: String, +} + +impl StdError for ReplyError {} +impl ReplyError { + /// extend error with source + /// useful when another error wraps around a `ReplyError` and we want to + /// convert back to `ReplyError` so we can send it over the wire + pub fn extend(&mut self, source: &str, extra: &str) { + self.source = format!("{}::{}", source, self.source); + self.extra = format!("{}::{}", extra, self.extra); + } +} + +impl std::fmt::Display for ReplyError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "'{}' Error on '{}' resources, from Error '{}', extra: '{}'", + self.kind.as_ref(), + self.resource.as_ref(), + self.source, + self.extra + ) + } +} + +/// All the different variants of `ReplyError` +#[derive(Serialize, Deserialize, Debug, Clone, strum_macros::AsRefStr)] #[allow(missing_docs)] -pub enum ReplyError { - #[snafu(display("Generic Failure, message={}", message))] - WithMessage { message: String }, - #[snafu(display("Failed to deserialize the request: '{}'", message))] - DeserializeReq { message: String }, - #[snafu(display("Failed to process the request: '{}'", message))] - Process { message: String }, +pub enum ReplyErrorKind { + WithMessage, + DeserializeReq, + Internal, + Timeout, + InvalidArgument, + DeadlineExceeded, + NotFound, + AlreadyExists, + PermissionDenied, + ResourceExhausted, + FailedPrecondition, + Aborted, + OutOfRange, + Unimplemented, + Unavailable, + Unauthenticated, +} + +impl From for ReplyError { + fn from(error: Error) -> Self { + #[allow(deprecated)] + let source_name = error.description().to_string(); + match error { + Error::RequestTimeout { + .. + } => Self { + kind: ReplyErrorKind::Timeout, + resource: ResourceKind::Unknown, + source: source_name, + extra: error.to_string(), + }, + Error::ReplyWithError { + source, + } => source, + _ => Self { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Unknown, + extra: error.to_string(), + source: source_name, + }, + } + } } /// Payload returned to the sender diff --git a/control-plane/mbus-api/src/message_bus/v0.rs b/control-plane/mbus-api/src/message_bus/v0.rs index a418681e3..0995c7ab0 100644 --- a/control-plane/mbus-api/src/message_bus/v0.rs +++ b/control-plane/mbus-api/src/message_bus/v0.rs @@ -6,38 +6,31 @@ use async_trait::async_trait; /// Error sending/receiving /// Common error type for send/receive -#[derive(Debug, Snafu, strum_macros::AsRefStr)] -#[allow(missing_docs)] -pub enum BusError { - #[snafu(display("Bus Internal error"))] - MessageBusError { source: Error }, - #[snafu(display("Resource not unique"))] - NotUnique, - #[snafu(display("Resource not found"))] - NotFound, -} - -impl From for BusError { - fn from(source: Error) -> Self { - BusError::MessageBusError { - source, - } - } -} +pub type BusError = ReplyError; /// Result for sending/receiving pub type BusResult = Result; macro_rules! only_one { - ($list:ident) => { + ($list:ident, $resource:expr, $filter:expr) => { if let Some(obj) = $list.first() { if $list.len() > 1 { - Err(BusError::NotUnique) + Err(ReplyError { + kind: ReplyErrorKind::FailedPrecondition, + resource: $resource, + source: "".to_string(), + extra: $filter.to_string(), + }) } else { Ok(obj.clone()) } } else { - Err(BusError::NotFound) + Err(ReplyError { + kind: ReplyErrorKind::NotFound, + resource: $resource, + source: "".to_string(), + extra: $filter.to_string(), + }) } }; } @@ -60,14 +53,14 @@ pub trait MessageBusTrait: Sized { .into_iter() .filter(|n| &n.id == id) .collect::>(); - only_one!(nodes) + only_one!(nodes, ResourceKind::Node, Filter::Node(id.clone())) } /// Get pool with filter #[tracing::instrument(level = "debug", err)] async fn get_pool(filter: Filter) -> BusResult { - let pools = Self::get_pools(filter).await?; - only_one!(pools) + let pools = Self::get_pools(filter.clone()).await?; + only_one!(pools, ResourceKind::Pool, filter) } /// Get pools with filter @@ -97,8 +90,8 @@ pub trait MessageBusTrait: Sized { /// Get replica with filter #[tracing::instrument(level = "debug", err)] async fn get_replica(filter: Filter) -> BusResult { - let replicas = Self::get_replicas(filter).await?; - only_one!(replicas) + let replicas = Self::get_replicas(filter.clone()).await?; + only_one!(replicas, ResourceKind::Replica, filter) } /// Get replicas with filter @@ -152,8 +145,8 @@ pub trait MessageBusTrait: Sized { /// Get nexus with filter #[tracing::instrument(level = "debug", err)] async fn get_nexus(filter: Filter) -> BusResult { - let nexuses = Self::get_nexuses(filter).await?; - only_one!(nexuses) + let nexuses = Self::get_nexuses(filter.clone()).await?; + only_one!(nexuses, ResourceKind::Nexus, filter) } /// create nexus @@ -211,8 +204,8 @@ pub trait MessageBusTrait: Sized { /// Get volume with filter #[tracing::instrument(level = "debug", err)] async fn get_volume(filter: Filter) -> BusResult { - let volumes = Self::get_volumes(filter).await?; - only_one!(volumes) + let volumes = Self::get_volumes(filter.clone()).await?; + only_one!(volumes, ResourceKind::Volume, filter) } /// create volume diff --git a/control-plane/mbus-api/src/v0.rs b/control-plane/mbus-api/src/v0.rs index 8f7b8f7e7..b59ecc9ea 100644 --- a/control-plane/mbus-api/src/v0.rs +++ b/control-plane/mbus-api/src/v0.rs @@ -29,6 +29,8 @@ pub enum ChannelVs { Kiiss, /// Json gRPC Service JsonGrpc, + /// Core Service combines Node, Pool and Volume services + Core, } impl Default for ChannelVs { fn default() -> Self { @@ -258,7 +260,7 @@ bus_impl_message_all!(GetNodes, GetNodes, Nodes, Node); /// // Get all nexuses from the node `node_id` /// let nexuses = /// MessageBus::get_nexuses(Filter::Node(node_id)).await.unwrap(); -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, strum_macros::ToString)] // likely this ToString does not do the right thing... pub enum Filter { /// All objects None, diff --git a/control-plane/rest/service/src/v0/children.rs b/control-plane/rest/service/src/v0/children.rs index 66527f424..fdc9313b5 100644 --- a/control-plane/rest/service/src/v0/children.rs +++ b/control-plane/rest/service/src/v0/children.rs @@ -121,7 +121,12 @@ fn find_nexus_child( if let Some(child) = nexus.children.iter().find(|&c| &c.uri == child_uri) { Ok(child.clone()) } else { - Err(BusError::NotFound) + Err(BusError { + kind: ReplyErrorKind::NotFound, + resource: ResourceKind::Child, + source: "find_nexus_child".to_string(), + extra: "".to_string(), + }) } } diff --git a/control-plane/rest/service/src/v0/nexuses.rs b/control-plane/rest/service/src/v0/nexuses.rs index b2b3f4959..b39a05062 100644 --- a/control-plane/rest/service/src/v0/nexuses.rs +++ b/control-plane/rest/service/src/v0/nexuses.rs @@ -108,7 +108,14 @@ async fn destroy_nexus(filter: Filter) -> Result, RestError> { uuid: nexus_id, } } - _ => return Err(RestError::from(BusError::NotFound)), + _ => { + return Err(RestError::from(BusError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Nexus, + source: "destroy_nexus".to_string(), + extra: "invalid filter for resource".to_string(), + })) + } }; RestRespond::result(MessageBus::destroy_nexus(destroy).await) diff --git a/control-plane/rest/service/src/v0/pools.rs b/control-plane/rest/service/src/v0/pools.rs index 631a34640..b9d9e9a3f 100644 --- a/control-plane/rest/service/src/v0/pools.rs +++ b/control-plane/rest/service/src/v0/pools.rs @@ -75,7 +75,14 @@ async fn destroy_pool(filter: Filter) -> Result, RestError> { id: pool_id, } } - _ => return Err(RestError::from(BusError::NotFound)), + _ => { + return Err(RestError::from(BusError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Pool, + source: "destroy_pool".to_string(), + extra: "invalid filter for resource".to_string(), + })) + } }; RestRespond::result(MessageBus::destroy_pool(destroy).await) diff --git a/control-plane/rest/service/src/v0/replicas.rs b/control-plane/rest/service/src/v0/replicas.rs index 8d88003fc..88e7cf917 100644 --- a/control-plane/rest/service/src/v0/replicas.rs +++ b/control-plane/rest/service/src/v0/replicas.rs @@ -180,13 +180,22 @@ async fn put_replica( body.bus_request(node_id, pool_id, replica_id) } Filter::PoolReplica(pool_id, replica_id) => { - let node_id = match MessageBus::get_replica(filter).await { - Ok(replica) => replica.node, - Err(error) => return Err(RestError::from(error)), - }; + let node_id = + match MessageBus::get_pool(Filter::Pool(pool_id.clone())).await + { + Ok(replica) => replica.node, + Err(error) => return Err(RestError::from(error)), + }; body.bus_request(node_id, pool_id, replica_id) } - _ => return Err(RestError::from(BusError::NotFound)), + _ => { + return Err(RestError::from(BusError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Replica, + source: "put_replica".to_string(), + extra: "invalid filter for resource".to_string(), + })) + } }; RestRespond::result(MessageBus::create_replica(create).await) @@ -213,7 +222,14 @@ async fn destroy_replica(filter: Filter) -> Result, RestError> { uuid: replica_id, } } - _ => return Err(RestError::from(BusError::NotFound)), + _ => { + return Err(RestError::from(BusError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Replica, + source: "destroy_replica".to_string(), + extra: "invalid filter for resource".to_string(), + })) + } }; RestRespond::result(MessageBus::destroy_replica(destroy).await) @@ -243,7 +259,14 @@ async fn share_replica( protocol, } } - _ => return Err(RestError::from(BusError::NotFound)), + _ => { + return Err(RestError::from(BusError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Replica, + source: "share_replica".to_string(), + extra: "invalid filter for resource".to_string(), + })) + } }; RestRespond::result(MessageBus::share_replica(share).await) @@ -270,7 +293,14 @@ async fn unshare_replica(filter: Filter) -> Result, RestError> { uuid: replica_id, } } - _ => return Err(RestError::from(BusError::NotFound)), + _ => { + return Err(RestError::from(BusError { + kind: ReplyErrorKind::Internal, + resource: ResourceKind::Replica, + source: "unshare_replica".to_string(), + extra: "invalid filter for resource".to_string(), + })) + } }; RestRespond::result(MessageBus::unshare_replica(unshare).await) diff --git a/control-plane/rest/src/versions/v0.rs b/control-plane/rest/src/versions/v0.rs index c569aac3c..ca928047e 100644 --- a/control-plane/rest/src/versions/v0.rs +++ b/control-plane/rest/src/versions/v0.rs @@ -543,8 +543,46 @@ impl ActixRestClient { /// Rest Error #[derive(Debug)] pub struct RestError { - kind: BusError, - message: String, + inner: BusError, +} + +/// Rest Json Error format +#[derive(Serialize, Deserialize, Debug)] +pub struct RestJsonError { + /// error kind + kind: RestJsonErrorKind, + /// detailed error information + details: String, +} + +/// RestJson error kind +#[derive(Serialize, Deserialize, Debug)] +#[allow(missing_docs)] +pub enum RestJsonErrorKind { + Deserialize, + Internal, + Timeout, + InvalidArgument, + DeadlineExceeded, + NotFound, + AlreadyExists, + PermissionDenied, + ResourceExhausted, + FailedPrecondition, + Aborted, + OutOfRange, + Unimplemented, + Unavailable, + Unauthenticated, +} + +impl RestJsonError { + fn new(kind: RestJsonErrorKind, details: &str) -> Self { + Self { + kind, + details: details.to_string(), + } + } } #[cfg(not(feature = "nightly"))] @@ -553,20 +591,108 @@ impl paperclip::v2::schema::Apiv2Errors for RestError {} impl RestError { // todo: response type convention fn get_resp_error(&self) -> HttpResponse { - match &self.kind { - BusError::NotFound => HttpResponse::NotFound().json(()), - BusError::NotUnique => { - let error = serde_json::json!({"error": self.kind.as_ref(), "message": self.message }); - tracing::error!("Got error: {}", error); + let details = self.inner.extra.clone(); + match &self.inner.kind { + ReplyErrorKind::WithMessage => { + let error = + RestJsonError::new(RestJsonErrorKind::Internal, &details); + HttpResponse::InternalServerError().json(error) + } + ReplyErrorKind::DeserializeReq => { + let error = RestJsonError::new( + RestJsonErrorKind::Deserialize, + &details, + ); HttpResponse::InternalServerError().json(error) } - BusError::MessageBusError { - source, - } => { - let error = serde_json::json!({"error": source.as_ref(), "message": source.full_string() }); - tracing::error!("Got error: {}", error); + ReplyErrorKind::Internal => { + let error = + RestJsonError::new(RestJsonErrorKind::Internal, &details); HttpResponse::InternalServerError().json(error) } + ReplyErrorKind::Timeout => { + let error = + RestJsonError::new(RestJsonErrorKind::Timeout, &details); + HttpResponse::RequestTimeout().json(error) + } + ReplyErrorKind::InvalidArgument => { + let error = RestJsonError::new( + RestJsonErrorKind::InvalidArgument, + &details, + ); + HttpResponse::BadRequest().json(error) + } + ReplyErrorKind::DeadlineExceeded => { + let error = RestJsonError::new( + RestJsonErrorKind::DeadlineExceeded, + &details, + ); + HttpResponse::GatewayTimeout().json(error) + } + ReplyErrorKind::NotFound => { + let error = + RestJsonError::new(RestJsonErrorKind::NotFound, &details); + HttpResponse::NotFound().json(error) + } + ReplyErrorKind::AlreadyExists => { + let error = RestJsonError::new( + RestJsonErrorKind::AlreadyExists, + &details, + ); + HttpResponse::UnprocessableEntity().json(error) + } + ReplyErrorKind::PermissionDenied => { + let error = RestJsonError::new( + RestJsonErrorKind::PermissionDenied, + &details, + ); + HttpResponse::Unauthorized().json(error) + } + ReplyErrorKind::ResourceExhausted => { + let error = RestJsonError::new( + RestJsonErrorKind::ResourceExhausted, + &details, + ); + HttpResponse::InsufficientStorage().json(error) + } + ReplyErrorKind::FailedPrecondition => { + let error = RestJsonError::new( + RestJsonErrorKind::FailedPrecondition, + &details, + ); + HttpResponse::PreconditionFailed().json(error) + } + ReplyErrorKind::Aborted => { + let error = + RestJsonError::new(RestJsonErrorKind::Aborted, &details); + HttpResponse::ServiceUnavailable().json(error) + } + ReplyErrorKind::OutOfRange => { + let error = + RestJsonError::new(RestJsonErrorKind::OutOfRange, &details); + HttpResponse::RangeNotSatisfiable().json(error) + } + ReplyErrorKind::Unimplemented => { + let error = RestJsonError::new( + RestJsonErrorKind::Unimplemented, + &details, + ); + HttpResponse::NotImplemented().json(error) + } + ReplyErrorKind::Unavailable => { + let error = RestJsonError::new( + RestJsonErrorKind::Unavailable, + &details, + ); + HttpResponse::ServiceUnavailable().json(error) + } + ReplyErrorKind::Unauthenticated => { + let error = RestJsonError::new( + RestJsonErrorKind::Unauthenticated, + &details, + ); + HttpResponse::Unauthorized().json(error) + } } } } @@ -586,10 +712,9 @@ impl ResponseError for RestError { } } impl From for RestError { - fn from(kind: BusError) -> Self { + fn from(inner: BusError) -> Self { Self { - message: kind.to_string(), - kind, + inner, } } } diff --git a/control-plane/rest/tests/v0_test.rs b/control-plane/rest/tests/v0_test.rs index 8d27a14cb..9e111d625 100644 --- a/control-plane/rest/tests/v0_test.rs +++ b/control-plane/rest/tests/v0_test.rs @@ -17,11 +17,9 @@ async fn wait_for_services() { // to avoid waiting for timeouts async fn orderly_start(test: &ComposeTest) { - test.start_containers(vec![ - "nats", "node", "pool", "volume", "jsongrpc", "rest", "jaeger", - ]) - .await - .unwrap(); + test.start_containers(vec!["nats", "core", "jsongrpc", "rest", "jaeger"]) + .await + .unwrap(); test.connect_to_bus("nats").await; wait_for_services().await; @@ -50,15 +48,13 @@ async fn client() { ) .with_portmap("4222", "4222"), ) - .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) - .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) - .add_container_bin("volume", Binary::from_dbg("volume").with_nats("-n")) + .add_container_bin("core", Binary::from_dbg("core").with_nats("-n")) .add_container_spec( ContainerSpec::from_binary( "rest", Binary::from_dbg("rest").with_nats("-n").with_args(vec![ "-j", - "10.1.0.8:6831", + "10.1.0.6:6831", "--dummy-certificates", ]), ) @@ -70,7 +66,7 @@ async fn client() { Binary::from_dbg("mayastor") .with_nats("-n") .with_args(vec!["-N", mayastor]) - .with_args(vec!["-g", "10.1.0.7:10124"]), + .with_args(vec!["-g", "10.1.0.5:10124"]), ) .add_container_spec( ContainerSpec::from_image( @@ -78,8 +74,7 @@ async fn client() { "jaegertracing/all-in-one:latest", ) .with_portmap("16686", "16686") - .with_portmap("6831/udp", "6831/udp") - .with_portmap("6832/udp", "6832/udp"), + .with_portmap("6831/udp", "6831/udp"), ) .add_container_bin( "jsongrpc", @@ -101,15 +96,13 @@ async fn client_test(mayastor: &NodeId, test: &ComposeTest) { .unwrap() .v0(); let nodes = client.get_nodes().await.unwrap(); + let mut node = Node { + id: mayastor.clone(), + grpc_endpoint: "10.1.0.5:10124".to_string(), + state: NodeState::Online, + }; assert_eq!(nodes.len(), 1); - assert_eq!( - nodes.first().unwrap(), - &Node { - id: mayastor.clone(), - grpc_endpoint: "10.1.0.7:10124".to_string(), - state: NodeState::Online, - } - ); + assert_eq!(nodes.first().unwrap(), &node); info!("Nodes: {:#?}", nodes); let _ = client.get_pools(Filter::None).await.unwrap(); let pool = client.create_pool(CreatePool { @@ -156,7 +149,7 @@ async fn client_test(mayastor: &NodeId, test: &ComposeTest) { thin: false, size: 12582912, share: Protocol::Nvmf, - uri: "nvmf://10.1.0.7:8420/nqn.2019-05.io.openebs:replica1" + uri: "nvmf://10.1.0.5:8420/nqn.2019-05.io.openebs:replica1" .to_string(), } ); @@ -289,5 +282,6 @@ async fn client_test(mayastor: &NodeId, test: &ComposeTest) { test.stop("mayastor").await.unwrap(); tokio::time::delay_for(std::time::Duration::from_millis(250)).await; - assert!(client.get_nodes().await.unwrap().is_empty()); + node.state = NodeState::Unknown; + assert_eq!(client.get_nodes().await.unwrap(), vec![node]); } diff --git a/mayastor/src/grpc/pool_grpc.rs b/mayastor/src/grpc/pool_grpc.rs index 2d9982712..757c3176e 100644 --- a/mayastor/src/grpc/pool_grpc.rs +++ b/mayastor/src/grpc/pool_grpc.rs @@ -41,9 +41,15 @@ impl From for Status { Error::Create { .. } => Status::invalid_argument(e.to_string()), + Error::Destroy { + source, .. + } => source.into(), Error::Invalid { .. } => Status::invalid_argument(e.to_string()), + Error::InvalidBdev { + source, .. + } => source.into(), _ => Status::internal(e.to_string()), } } diff --git a/nix/pkgs/control-plane/cargo-project.nix b/nix/pkgs/control-plane/cargo-project.nix index ad862767b..5048c10eb 100644 --- a/nix/pkgs/control-plane/cargo-project.nix +++ b/nix/pkgs/control-plane/cargo-project.nix @@ -29,7 +29,7 @@ let buildProps = rec { name = "control-plane"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1r2g0ni8cxkphazbbkvzwdlcvkgk076llp18wqnkirj5d3xhbx4x"; + cargoSha256 = "05j0bcvisnz4j8v2aiqsdv5lawl0i4kg9wvrnigc9l7gz45i2750"; inherit version; src = whitelistSource ../../../. (pkgs.callPackage ../mayastor { }).src_list; cargoBuildFlags = [ "-p mbus_api" "-p agents" "-p rest" ]; diff --git a/nix/pkgs/control-plane/default.nix b/nix/pkgs/control-plane/default.nix index 6550b8633..5e63f3f9d 100644 --- a/nix/pkgs/control-plane/default.nix +++ b/nix/pkgs/control-plane/default.nix @@ -17,9 +17,7 @@ let }; components = { src }: { kiiss = agent { inherit src; name = "kiiss"; }; - node = agent { inherit src; name = "node"; }; - pool = agent { inherit src; name = "pool"; }; - volume = agent { inherit src; name = "volume"; }; + core = agent { inherit src; name = "core"; }; rest = agent { inherit src; name = "rest"; }; }; in diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index 198ca7453..ff2b1a050 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -84,9 +84,7 @@ let }; agent-images = { build }: { kiiss = build-agent-image { inherit build; name = "kiiss"; }; - node = build-agent-image { inherit build; name = "node"; }; - pool = build-agent-image { inherit build; name = "pool"; }; - volume = build-agent-image { inherit build; name = "volume"; }; + core = build-agent-image { inherit build; name = "core"; }; rest = build-agent-image { inherit build; name = "rest"; config = { ExposedPorts = { "8080/tcp" = { }; "8081/tcp" = { }; }; }; diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index d12615e3b..60b4cf969 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -56,7 +56,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "q2Dp9IuwxHSwZEkEiIc49M4d/BI/GyS+lnTrs4TKRs8="; + cargoSha256 = "05zsfs6vwm6q8fhlmqv2s6568y1jqafkq83zi3zr9l3lcswnbgqv"; inherit version; src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; From 30c792d8088ec2aeed652a5b43dd5a6e88a36869 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Tue, 2 Mar 2021 14:19:53 +0000 Subject: [PATCH 60/78] feat(rest): authenticate clients Provide the ability to authenticate client requests through the use of a JSON Web Token (JWT). This requires a JSON Web Key (JWK) to be available for use by the REST service so that it can validate the JWT. The REST service can be started with the '--no-auth' argument to disable authentication. This is useful for test cases where authentication is unnecessary. The REST service can be started with the '--jwk' argument to enable authentication. The path to the relevant JWK file must also be provided. Example authentication files are provided in ../Mayastor/control-plane/rest/authentication for test purposes and should not be used in production. --- Cargo.lock | 52 ++++++- control-plane/deployer/src/infra/rest.rs | 2 + control-plane/macros/actix/src/lib.rs | 13 +- control-plane/rest/Cargo.toml | 1 + control-plane/rest/authentication/README.md | 23 +++ control-plane/rest/authentication/id_rsa | 39 +++++ control-plane/rest/authentication/id_rsa.pub | 11 ++ control-plane/rest/authentication/jwk | 7 + control-plane/rest/authentication/token | 1 + .../rest/openapi-specs/v0_api_spec.json | 2 +- .../rest/service/src/authentication.rs | 145 ++++++++++++++++++ control-plane/rest/service/src/main.rs | 21 +++ control-plane/rest/service/src/v0/mod.rs | 30 ++++ control-plane/rest/src/lib.rs | 19 ++- control-plane/rest/tests/v0_test.rs | 107 ++++++++++--- control-plane/tests/tests/common/mod.rs | 5 + nix/pkgs/control-plane/cargo-project.nix | 2 +- nix/pkgs/control-plane/default.nix | 1 - nix/pkgs/mayastor/default.nix | 2 +- scripts/openapi-check.sh | 2 +- 20 files changed, 449 insertions(+), 36 deletions(-) create mode 100644 control-plane/rest/authentication/README.md create mode 100644 control-plane/rest/authentication/id_rsa create mode 100644 control-plane/rest/authentication/id_rsa.pub create mode 100644 control-plane/rest/authentication/jwk create mode 100644 control-plane/rest/authentication/token create mode 100644 control-plane/rest/service/src/authentication.rs diff --git a/Cargo.lock b/Cargo.lock index 6792b3ad1..efd69404e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2201,6 +2201,20 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -2636,6 +2650,17 @@ dependencies = [ "rand 0.7.3", ] +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg 1.0.1", + "num-integer", + "num-traits 0.2.14", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -2700,9 +2725,9 @@ checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" [[package]] name = "once_cell" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "4ad167a2f54e832b82dbe003a046280dceffe5227b5f79e08e363a29638cfddd" [[package]] name = "opaque-debug" @@ -2894,6 +2919,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +dependencies = [ + "base64 0.13.0", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "1.0.1" @@ -3445,6 +3481,7 @@ dependencies = [ "composer", "futures", "http 0.2.3", + "jsonwebtoken", "macros", "mbus_api", "opentelemetry", @@ -3842,6 +3879,17 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" +[[package]] +name = "simple_asn1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +dependencies = [ + "chrono", + "num-bigint", + "num-traits 0.2.14", +] + [[package]] name = "slab" version = "0.4.2" diff --git a/control-plane/deployer/src/infra/rest.rs b/control-plane/deployer/src/infra/rest.rs index 07bf78ada..47ba9e6cc 100644 --- a/control-plane/deployer/src/infra/rest.rs +++ b/control-plane/deployer/src/infra/rest.rs @@ -22,6 +22,7 @@ impl ComponentAction for Rest { Binary::from_dbg("rest") .with_nats("-n") .with_arg("--dummy-certificates") + .with_arg("--no-auth") .with_args(vec!["--https", "rest:8080"]) .with_args(vec!["--http", "rest:8081"]), ) @@ -36,6 +37,7 @@ impl ComponentAction for Rest { Binary::from_dbg("rest") .with_nats("-n") .with_arg("--dummy-certificates") + .with_arg("--no-auth") .with_args(vec!["-j", &jaeger_config]) .with_args(vec!["--https", "rest:8080"]) .with_args(vec!["--http", "rest:8081"]), diff --git a/control-plane/macros/actix/src/lib.rs b/control-plane/macros/actix/src/lib.rs index ddcdbdccc..07b683f29 100644 --- a/control-plane/macros/actix/src/lib.rs +++ b/control-plane/macros/actix/src/lib.rs @@ -57,6 +57,16 @@ impl Method { let handler: ItemFn = syn::parse(item)?; Ok(handler.sig.ident) } + /// Add authorisation to handler functions by adding a BearerToken as an + /// additional function argument. + /// The BearerToken is defined in + /// ../Mayastor/control-plane/rest/service/src/v0/mod.rs + fn handler_fn_with_auth(item: TokenStream) -> syn::Result { + let mut func: ItemFn = syn::parse(item)?; + let new_input = syn::parse_str("_token: BearerToken")?; + func.sig.inputs.push(new_input); + Ok(func) + } fn generate( &self, attr: TokenStream, @@ -65,7 +75,8 @@ impl Method { let full_uri: TokenStream2 = Self::handler_uri(attr.clone()).into(); let relative_uri: TokenStream2 = Self::openapi_uri(attr.clone()).into(); let handler_name = Self::handler_name(item.clone())?; - let handler_fn: TokenStream2 = item.into(); + let handler_fn: TokenStream2 = + Self::handler_fn_with_auth(item)?.to_token_stream(); let method: TokenStream2 = self.method().parse()?; let variant: TokenStream2 = self.variant().parse()?; let handler_name_str = handler_name.to_string(); diff --git a/control-plane/rest/Cargo.toml b/control-plane/rest/Cargo.toml index 147eb6c84..ed6dc0d50 100644 --- a/control-plane/rest/Cargo.toml +++ b/control-plane/rest/Cargo.toml @@ -39,6 +39,7 @@ paperclip = { version = "0.5.0", default-features = false, optional = true } macros = { path = "../macros" } http = "0.2.3" tinytemplate = { version = "1.2" } +jsonwebtoken = "7.2.0" [dev-dependencies] composer = { path = "../../composer" } diff --git a/control-plane/rest/authentication/README.md b/control-plane/rest/authentication/README.md new file mode 100644 index 000000000..ef3cc88c9 --- /dev/null +++ b/control-plane/rest/authentication/README.md @@ -0,0 +1,23 @@ +**WARNING**: These are dummy example RSA keys and should not be used in production. + +There are various websites (such as https://russelldavies.github.io/jwk-creator/) which provide the capability of generating the JSON Web Key from the public RSA key. +For convenience the 'jwk' file has already been generated from the provided public key. + +# Usage +To try out the dummy JSON Web Key (JWK), execute the following steps from within the nix-shell: +1. Run the deployer without launching the rest service +```bash +./target/debug/deployer start -a "Node, Pool, Volume" --no-rest +``` +2. Start the REST service within the nix-shell +```bash +./target/debug/rest --dummy-certificates --jwk "../Mayastor/control-plane/rest/authentication/jwk" +``` +2. Set the token value (located in ../Mayastor/control-plane/rest/authentication/token) +```bash +export TOKEN=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJyYW5kb20gc3ViamVjdCIsImNvbXBhbnkiOiJteSBjb21wYW55IiwiZXhwIjoxMDAwMDAwMDAwMH0.GkcWHAJ4-qXihaR2j8ZvJgFB1OPpo9P5PkauTmb4PHvlDTYpDQy_nfTHmZCKHS1WEBtsH-HOXApKf32oJEU0K_2SAO76PVZrqvfMewccny-aB9gyu6WMlgSWK8wvGq4h_t_Ma4KIBlPv5PCQO1fyv9bWM3Y3Lu2rPxvNg0O_V_mfnq_Ynwcy4qhnZmse8pZ9zJJaM5OPv2ucWRPKWNzSX8OOz11MGBcdV5QBM-eBpjeSvejEwQ1xOxfiwZwZosFKjPnwMWn8dirMhMNqyRwWgjmOFU2hpc13Ik2VcSWEKTF4ndoUmMLXmCmQ2pSrn9MihEfkpO_VHx_sRVtmYVe2R4iy7ocul3eG7ZAvRq-_GIqBpwbcdUPANIyEFWUWgiPB5_kFvf4-iIBip7NhZ0_4DVoqukYBM2XodejXY863p2frglljt23EimNoKlrtqyxw1wXcbsYtiqCsd3cFTMUkrVesu9xNQPfpM8so37SmTsrC1nOssGEiADAGowqu5SsS +``` +3. Use curl to make a REST request using the above token +```bash +curl -X GET "https://localhost:8080/v0/nodes" -H "accept: application/json" -H "Authorization: Bearer ${TOKEN}" -k +``` \ No newline at end of file diff --git a/control-plane/rest/authentication/id_rsa b/control-plane/rest/authentication/id_rsa new file mode 100644 index 000000000..8326e1b77 --- /dev/null +++ b/control-plane/rest/authentication/id_rsa @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4wIBAAKCAYEAtTtUE2YgN2te7Hd29BZxeGjmagg0Ch9zvDIlHRjl7Y6Y9Gan +kign24dOXFC0t/3XzylySG0w56YkAgZPbu+7NRUbjE8ev5gFEBVfHgXmPvFKwPSk +CtZG94Kx+lK/BZ4oOieLSoqSSsCdm6Mr5q57odkWghnXXohmRgKVgrg2OS1fUcw5 +l2AYljierf2vsFDGU6DU1PqeKiDrflsu8CFxDBAkVdUJCZH5BJcUMhjK41FCyYIm +tEb13eXRIr46rwxOGjwj6Szthd+sZIDDP/VVBJ3bGNk80buaWYQnojtllseNBg9p +GCTBtYHB+kd+NNm2rwPWQLjmcY1ym9LtJmrQCXvA4EUgsG7qBNj1dl2NHcG03eEo +JBejQ5xwTNgQZ6311lXuKByP5gkiLctCtwn1wGTJpjbLKo8xReNdKgFqrIOT1mC7 +6oZpT3AsWlVH60H4aVTthuYEBCJgBQh5Bh6y44ANGcybj+q7sOOtuWi96sXNOCLc +zEbqKYpeuckYp1LPAgMBAAECggGAUglTG5zlBHEj/OJvBDqMjrbdZi3kcJigKRaB +2lQE8K3V6vv06qImuKbc/8jApXDQmcPnKYXT12hLcGcu2cbG9VZiq/a8snm8APXL +oqmE+gT7k7Cp+QXaBfwxWGDQe1iGWRzBXrKvWgsqzOLl4nwlFrRQDgBojzArK5HL +3+pHEUbKmRpbD3y+ZHGo0pW9S5Ck1gI9lVME+YkBUKcx7h0VMSK1b+0JND3RfRRu +Xeb/IDsOgmzZ3E0qypFXQ+TcZ5SnmxEltRdgo3lSidglt4hDD9yApJmxtLaVmUfN +3ZKnmAKU+oODXIZvl9s71nRvs89SgAgABn23M270jEiSD/gcSP6nVcaSY/IVrJTy +elKkJebjYcuP3TL8dgPCbdjPbWk3SNbHv5/Pdv5/JrSOJgP43RbQD5ea5Mvft5/x +obpPtfijhYWg9UQHhOeivO0QvzcRCue+kOoqYUVYqjBe7nE7Gc2yePKlGRDXRQ4j +LKod37n98EXZ1O0xCZfdIPci7hxxAoHBANlgm9KCSIXeFgmk4npHXI3KofmGOnzw +k4nmzINTEPIgo9VweasYfyJ7huOAJqeGUvFs9bVy1dW61EbyawRp5lhlA+CZrUIt +it7Ow+lu7wW5gOOUE9M7oR/g2g1RiHOk/div2E0fk3KiN5Sh3ADAcfrOFHPUmMGh +fR3CgW3TsrXFeCA2kYIXj8Ae0l+zF4C/t1fyDSBOVd4UF76Ir+BWJN0zIrhhYe1j +Ctgsq+VaCrzLnS1jFJvpDr+1QPoON1eBqwKBwQDVbqOY1yhHzZ70/dAIrbB1lhIg +C9TDshoox2N2TTucth/offRp1ul7Vo5Ut9wHUcgobzLY2rNQm4LhHMpWy/pWQ8xW +jSkGAg1ntEmL6+L4GCOcSMAWFfdlXbrJ2B6nw0WIwRchC6n7MdgjY93hFoZMul8x +UQDAt63g/Y2f3+dI+lQhFyRtvX3SOGGNAduI5zdMtxqpK7Ks4k6zHgE/vUETbXIv +QU2WrHvUD2M2ggBcxGCoZJpO7FQzegkDknX8V20CgcBl3GVoMXC2eiktf7w4vHPc +ZZWdDY8euMUKG8K9zxDjxPPAsqHw0NvSVrwQox555fG7++jvi840BwYt8K7BNLah +uUQl3R1ZI2otmgonurn6nsCM4/ieRRTtkTncf9ZHCouBHHVpPmCjmOwek/I5z/QZ +KLRgysCCC6BLb7eitU7K6qutvKRWp5/O0SKXgZ6D0FKjvWL1Pn/yPswZloeDwhoo +JSwh5lAzIvQT9GrgYF8jtO4ENKeVn5Ivt0mpYzv/n10CgcEAtJyK3pT8Zj7PzBxZ +Bm8NC4RyVCIO64f08RtBxOO4lXXdbJ3hzgrqy8/EZFauYJdJXUY0biQsaAMhbyQw +6eB1OLjo2zlbRNVJyL9dGYYFLNMol2FNA6OVFneJ0LMNxgPN/NsBmppHPuXANLqX +EZpBDf8M/SvCClOlVebbCTatfykvNk1iK2eWaOYDTxMKV0DqoAW3Dv+GlRxxYsv6 +XJjnz+vnG6wUX3QY2awn1gGPEvGvpfB0UGNXIbScmiQ/qcnFAoHAVGCKsHVVjIQC +TA2TThk0olH7wJpF4jYgskPxGLS7Hl3H+eyNSffLOlxzK6M1dbB4pr9hTX91QdNg +KTyW5j+pCK5V1n19OjfbmcCcWIFUlApB6w3Ka2J6JYku+ngjjYbhZzlz/Z778SpZ +fSEiunz/xePu7hvJBytblLAyln+gbule1vXYhlXBpE+752+f8rQmknGBFVFRGXUb +ID6qZUQGwFlbcfHjvV2bMecPIUFFC9YzgDxkVkPRs3P5TifNE0Bs +-----END RSA PRIVATE KEY----- diff --git a/control-plane/rest/authentication/id_rsa.pub b/control-plane/rest/authentication/id_rsa.pub new file mode 100644 index 000000000..6322ee5a9 --- /dev/null +++ b/control-plane/rest/authentication/id_rsa.pub @@ -0,0 +1,11 @@ +-----BEGIN PUBLIC KEY----- +MIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAtTtUE2YgN2te7Hd29BZx +eGjmagg0Ch9zvDIlHRjl7Y6Y9Gankign24dOXFC0t/3XzylySG0w56YkAgZPbu+7 +NRUbjE8ev5gFEBVfHgXmPvFKwPSkCtZG94Kx+lK/BZ4oOieLSoqSSsCdm6Mr5q57 +odkWghnXXohmRgKVgrg2OS1fUcw5l2AYljierf2vsFDGU6DU1PqeKiDrflsu8CFx +DBAkVdUJCZH5BJcUMhjK41FCyYImtEb13eXRIr46rwxOGjwj6Szthd+sZIDDP/VV +BJ3bGNk80buaWYQnojtllseNBg9pGCTBtYHB+kd+NNm2rwPWQLjmcY1ym9LtJmrQ +CXvA4EUgsG7qBNj1dl2NHcG03eEoJBejQ5xwTNgQZ6311lXuKByP5gkiLctCtwn1 +wGTJpjbLKo8xReNdKgFqrIOT1mC76oZpT3AsWlVH60H4aVTthuYEBCJgBQh5Bh6y +44ANGcybj+q7sOOtuWi96sXNOCLczEbqKYpeuckYp1LPAgMBAAE= +-----END PUBLIC KEY----- diff --git a/control-plane/rest/authentication/jwk b/control-plane/rest/authentication/jwk new file mode 100644 index 000000000..7ee2faf48 --- /dev/null +++ b/control-plane/rest/authentication/jwk @@ -0,0 +1,7 @@ +{ + "kty": "RSA", + "n": "tTtUE2YgN2te7Hd29BZxeGjmagg0Ch9zvDIlHRjl7Y6Y9Gankign24dOXFC0t_3XzylySG0w56YkAgZPbu-7NRUbjE8ev5gFEBVfHgXmPvFKwPSkCtZG94Kx-lK_BZ4oOieLSoqSSsCdm6Mr5q57odkWghnXXohmRgKVgrg2OS1fUcw5l2AYljierf2vsFDGU6DU1PqeKiDrflsu8CFxDBAkVdUJCZH5BJcUMhjK41FCyYImtEb13eXRIr46rwxOGjwj6Szthd-sZIDDP_VVBJ3bGNk80buaWYQnojtllseNBg9pGCTBtYHB-kd-NNm2rwPWQLjmcY1ym9LtJmrQCXvA4EUgsG7qBNj1dl2NHcG03eEoJBejQ5xwTNgQZ6311lXuKByP5gkiLctCtwn1wGTJpjbLKo8xReNdKgFqrIOT1mC76oZpT3AsWlVH60H4aVTthuYEBCJgBQh5Bh6y44ANGcybj-q7sOOtuWi96sXNOCLczEbqKYpeuckYp1LP", + "e": "AQAB", + "alg": "RS256", + "use": "sig" +} diff --git a/control-plane/rest/authentication/token b/control-plane/rest/authentication/token new file mode 100644 index 000000000..daff94755 --- /dev/null +++ b/control-plane/rest/authentication/token @@ -0,0 +1 @@ +eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJyYW5kb20gc3ViamVjdCIsImNvbXBhbnkiOiJteSBjb21wYW55IiwiZXhwIjoxMDAwMDAwMDAwMH0.GkcWHAJ4-qXihaR2j8ZvJgFB1OPpo9P5PkauTmb4PHvlDTYpDQy_nfTHmZCKHS1WEBtsH-HOXApKf32oJEU0K_2SAO76PVZrqvfMewccny-aB9gyu6WMlgSWK8wvGq4h_t_Ma4KIBlPv5PCQO1fyv9bWM3Y3Lu2rPxvNg0O_V_mfnq_Ynwcy4qhnZmse8pZ9zJJaM5OPv2ucWRPKWNzSX8OOz11MGBcdV5QBM-eBpjeSvejEwQ1xOxfiwZwZosFKjPnwMWn8dirMhMNqyRwWgjmOFU2hpc13Ik2VcSWEKTF4ndoUmMLXmCmQ2pSrn9MihEfkpO_VHx_sRVtmYVe2R4iy7ocul3eG7ZAvRq-_GIqBpwbcdUPANIyEFWUWgiPB5_kFvf4-iIBip7NhZ0_4DVoqukYBM2XodejXY863p2frglljt23EimNoKlrtqyxw1wXcbsYtiqCsd3cFTMUkrVesu9xNQPfpM8so37SmTsrC1nOssGEiADAGowqu5SsS \ No newline at end of file diff --git a/control-plane/rest/openapi-specs/v0_api_spec.json b/control-plane/rest/openapi-specs/v0_api_spec.json index 441f4118c..41b9c18c5 100644 --- a/control-plane/rest/openapi-specs/v0_api_spec.json +++ b/control-plane/rest/openapi-specs/v0_api_spec.json @@ -1 +1 @@ -{"swagger":"2.0","definitions":{"BlockDevice":{"description":"Block device information","type":"object","properties":{"available":{"description":"identifies if device is available for use (ie. is not \"currently\" in\n use)","type":"boolean"},"devlinks":{"description":"list of udev generated symlinks by which device may be identified","type":"array","items":{"type":"string"}},"devmajor":{"description":"major device number","type":"integer","format":"int32"},"devminor":{"description":"minor device number","type":"integer","format":"int32"},"devname":{"description":"entry in /dev associated with device","type":"string"},"devpath":{"description":"official device path","type":"string"},"devtype":{"description":"currently \"disk\" or \"partition\"","type":"string"},"filesystem":{"description":"filesystem information in case where a filesystem is present","type":"object","properties":{"fstype":{"description":"filesystem type: ext3, ntfs, ...","type":"string"},"label":{"description":"volume label","type":"string"},"mountpoint":{"description":"path where filesystem is currently mounted","type":"string"},"uuid":{"description":"UUID identifying the volume (filesystem)","type":"string"}},"required":["fstype","label","mountpoint","uuid"]},"model":{"description":"device model - useful for identifying mayastor devices","type":"string"},"partition":{"description":"partition information in case where device represents a partition","type":"object","properties":{"name":{"description":"partition name","type":"string"},"number":{"description":"partition number","type":"integer","format":"int32"},"parent":{"description":"devname of parent device to which this partition belongs","type":"string"},"scheme":{"description":"partition scheme: gpt, dos, ...","type":"string"},"typeid":{"description":"partition type identifier","type":"string"},"uuid":{"description":"UUID identifying partition","type":"string"}},"required":["name","number","parent","scheme","typeid","uuid"]},"size":{"description":"size of device in (512 byte) blocks","type":"integer","format":"int64"}},"required":["available","devlinks","devmajor","devminor","devname","devpath","devtype","filesystem","model","partition","size"]},"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/block_devices":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/BlockDevice"}}}},"parameters":[{"description":"specifies whether to list all devices or only usable ones","in":"query","name":"all","type":"boolean"},{"in":"path","name":"node","required":true,"type":"string"}],"tags":["BlockDevices"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{pool_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file +{"swagger":"2.0","definitions":{"BlockDevice":{"description":"Block device information","type":"object","properties":{"available":{"description":"identifies if device is available for use (ie. is not \"currently\" in\n use)","type":"boolean"},"devlinks":{"description":"list of udev generated symlinks by which device may be identified","type":"array","items":{"type":"string"}},"devmajor":{"description":"major device number","type":"integer","format":"int32"},"devminor":{"description":"minor device number","type":"integer","format":"int32"},"devname":{"description":"entry in /dev associated with device","type":"string"},"devpath":{"description":"official device path","type":"string"},"devtype":{"description":"currently \"disk\" or \"partition\"","type":"string"},"filesystem":{"description":"filesystem information in case where a filesystem is present","type":"object","properties":{"fstype":{"description":"filesystem type: ext3, ntfs, ...","type":"string"},"label":{"description":"volume label","type":"string"},"mountpoint":{"description":"path where filesystem is currently mounted","type":"string"},"uuid":{"description":"UUID identifying the volume (filesystem)","type":"string"}},"required":["fstype","label","mountpoint","uuid"]},"model":{"description":"device model - useful for identifying mayastor devices","type":"string"},"partition":{"description":"partition information in case where device represents a partition","type":"object","properties":{"name":{"description":"partition name","type":"string"},"number":{"description":"partition number","type":"integer","format":"int32"},"parent":{"description":"devname of parent device to which this partition belongs","type":"string"},"scheme":{"description":"partition scheme: gpt, dos, ...","type":"string"},"typeid":{"description":"partition type identifier","type":"string"},"uuid":{"description":"UUID identifying partition","type":"string"}},"required":["name","number","parent","scheme","typeid","uuid"]},"size":{"description":"size of device in (512 byte) blocks","type":"integer","format":"int64"}},"required":["available","devlinks","devmajor","devminor","devname","devpath","devtype","filesystem","model","partition","size"]},"Child":{"type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]},"CreateNexusBody":{"type":"object","properties":{"children":{"description":"replica can be iscsi and nvmf remote targets or a local spdk bdev\n (i.e. bdev:///name-of-the-bdev).\n\n uris to the targets we connect to","type":"array","items":{"description":"URI of a mayastor nexus child","type":"string"}},"size":{"description":"size of the device in bytes","type":"integer","format":"int64"}},"required":["children","size"]},"CreatePoolBody":{"type":"object","properties":{"disks":{"description":"disk device paths or URIs to be claimed by the pool","type":"array","items":{"type":"string"}}},"required":["disks"]},"CreateReplicaBody":{"type":"object","properties":{"share":{"description":"protocol to expose the replica over","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"}},"required":["share","size","thin"]},"CreateVolumeBody":{"type":"object","properties":{"allowed_nodes":{"description":"only these nodes can be used for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"nexuses":{"description":"number of children nexuses (ANA)","type":"integer","format":"int64"},"preferred_nexus_nodes":{"description":"preferred nodes for the nexuses","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"preferred_nodes":{"description":"preferred nodes for the replicas","type":"array","items":{"description":"ID of a mayastor node","type":"string"}},"replicas":{"description":"number of replicas per nexus","type":"integer","format":"int64"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"}},"required":["nexuses","replicas","size"]},"JsonGeneric":{"type":"object","properties":{"inner":{}},"required":["inner"]},"Nexus":{"type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]},"Node":{"type":"object","properties":{"grpcEndpoint":{"description":"grpc_endpoint of the mayastor instance","type":"string"},"id":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"deemed state of the node","type":"string","enum":["Unknown","Online","Offline"]}},"required":["grpcEndpoint","id","state"]},"Pool":{"type":"object","properties":{"capacity":{"description":"size of the pool in bytes","type":"integer","format":"int64"},"disks":{"description":"absolute disk paths claimed by the pool","type":"array","items":{"type":"string"}},"id":{"description":"id of the pool","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"state":{"description":"current state of the pool","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"used":{"description":"used bytes from the pool","type":"integer","format":"int64"}},"required":["capacity","disks","id","node","state","used"]},"Replica":{"type":"object","properties":{"node":{"description":"id of the mayastor instance","type":"string"},"pool":{"description":"id of the pool","type":"string"},"share":{"description":"protocol used for exposing the replica","type":"string","enum":["off","nvmf","iscsi","nbd"]},"size":{"description":"size of the replica in bytes","type":"integer","format":"int64"},"thin":{"description":"thin provisioning","type":"boolean"},"uri":{"description":"uri usable by nexus to access it","type":"string"},"uuid":{"description":"uuid of the replica","type":"string"}},"required":["node","pool","share","size","thin","uri","uuid"]},"Volume":{"type":"object","properties":{"children":{"description":"array of children nexuses","type":"array","items":{"description":"Nexus information","type":"object","properties":{"children":{"description":"array of children","type":"array","items":{"description":"Child information","type":"object","properties":{"rebuildProgress":{"description":"current rebuild progress (%)","type":"integer","format":"int32"},"state":{"description":"state of the child","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uri":{"description":"uri of the child device","type":"string"}},"required":["state","uri"]}},"deviceUri":{"description":"URI of the device for the volume (missing if not published).\n Missing property and empty string are treated the same.","type":"string"},"node":{"description":"id of the mayastor instance","type":"string"},"rebuilds":{"description":"total number of rebuild tasks","type":"integer","format":"int32"},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the nexus","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"uuid of the nexus","type":"string"}},"required":["children","deviceUri","node","rebuilds","size","state","uuid"]}},"size":{"description":"size of the volume in bytes","type":"integer","format":"int64"},"state":{"description":"current state of the volume","type":"string","enum":["Unknown","Online","Degraded","Faulted"]},"uuid":{"description":"name of the volume","type":"string"}},"required":["children","size","state","uuid"]}},"paths":{"/nexuses":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"tags":["Nexuses"]}},"/nexuses/{nexus_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nexuses/{nexus_id}/children":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}}},"tags":["Nodes"]}},"/nodes/{id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Node"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nodes"]}},"/nodes/{id}/nexuses":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Nexus"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{id}/pools":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{id}/replicas":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/nexuses/{nexus_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]},"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Nexus"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateNexusBody"}}],"tags":["Nexuses"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Child"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Child"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"child_id:.*","required":true,"type":"string"}],"tags":["Children"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share":{"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"}],"tags":["Nexuses"]}},"/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}":{"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"nexus_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Nexuses"]}},"/nodes/{node_id}/pools/{pool_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreatePoolBody"}}],"tags":["Pools"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/nodes/{node_id}/pools/{pool_id}/replicas":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]},"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/nodes/{node_id}/volumes":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node_id}/volumes/{volume_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"node_id","required":true,"type":"string"},{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}},"/nodes/{node}/block_devices":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/BlockDevice"}}}},"parameters":[{"description":"specifies whether to list all devices or only usable ones","in":"query","name":"all","type":"boolean"},{"in":"path","name":"node","required":true,"type":"string"}],"tags":["BlockDevices"]}},"/nodes/{node}/jsongrpc/{method}":{"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/JsonGeneric"}}},"parameters":[{"in":"path","name":"node","required":true,"type":"string"},{"in":"path","name":"method","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/JsonGeneric"}}],"tags":["JsonGrpc"]}},"/pools":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Pool"}}}},"tags":["Pools"]}},"/pools/{pool_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Pool"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"}],"tags":["Pools"]}},"/pools/{pool_id}/replicas/{replica_id}":{"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateReplicaBody"}}],"tags":["Replicas"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share":{"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"}],"tags":["Replicas"]}},"/pools/{pool_id}/replicas/{replica_id}/share/{protocol}":{"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"string"}}},"parameters":[{"in":"path","name":"pool_id","required":true,"type":"string"},{"in":"path","name":"replica_id","required":true,"type":"string"},{"in":"path","name":"protocol","required":true,"type":"string","enum":["off","nvmf","iscsi","nbd"]}],"tags":["Replicas"]}},"/replicas":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Replica"}}}},"tags":["Replicas"]}},"/replicas/{id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Replica"}}},"parameters":[{"in":"path","name":"id","required":true,"type":"string"}],"tags":["Replicas"]}},"/volumes":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"type":"array","items":{"$ref":"#/definitions/Volume"}}}},"tags":["Volumes"]}},"/volumes/{volume_id}":{"get":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]},"put":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{"$ref":"#/definitions/Volume"}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"},{"in":"body","name":"body","required":true,"schema":{"$ref":"#/definitions/CreateVolumeBody"}}],"tags":["Volumes"]},"delete":{"security":[{"JWT":[]}],"responses":{"200":{"description":"OK","schema":{}}},"parameters":[{"in":"path","name":"volume_id","required":true,"type":"string"}],"tags":["Volumes"]}}},"basePath":"/v0","securityDefinitions":{"JWT":{"name":"Authorization","type":"apiKey","in":"header","description":"Use format 'Bearer TOKEN'"}},"info":{"version":"v0","title":"Mayastor RESTful API"}} \ No newline at end of file diff --git a/control-plane/rest/service/src/authentication.rs b/control-plane/rest/service/src/authentication.rs new file mode 100644 index 000000000..ef839da9f --- /dev/null +++ b/control-plane/rest/service/src/authentication.rs @@ -0,0 +1,145 @@ +use actix_web::{Error, HttpRequest}; +use jsonwebtoken::{crypto, Algorithm, DecodingKey}; +use std::str::FromStr; + +use http::HeaderValue; +use std::fs::File; + +/// Initialise JWK with the contents of the file at 'jwk_path'. +/// If jwk_path is 'None', authentication is disabled. +pub fn init(jwk_path: Option) -> JsonWebKey { + match jwk_path { + Some(path) => { + let jwk_file = File::open(path).expect("Failed to open JWK file"); + let jwk = serde_json::from_reader(jwk_file) + .expect("Failed to deserialise JWK"); + JsonWebKey { + jwk, + } + } + None => JsonWebKey { + ..Default::default() + }, + } +} + +#[derive(Default, Debug)] +pub struct JsonWebKey { + jwk: serde_json::Value, +} + +impl JsonWebKey { + // Returns true if REST calls should be authenticated. + fn auth_enabled(&self) -> bool { + !self.jwk.is_null() + } + + // Return the algorithm. + fn algorithm(&self) -> Algorithm { + Algorithm::from_str(self.jwk["alg"].as_str().unwrap()).unwrap() + } + + // Return the modulus. + fn modulus(&self) -> &str { + self.jwk["n"].as_str().unwrap() + } + + // Return the exponent. + fn exponent(&self) -> &str { + self.jwk["e"].as_str().unwrap() + } + + // Return the decoding key + fn decoding_key(&self) -> DecodingKey { + DecodingKey::from_rsa_components(self.modulus(), self.exponent()) + } +} + +/// Authenticate the HTTP request by checking the authorisation token to ensure +/// the sender is who they claim to be. +pub fn authenticate(req: &HttpRequest) -> Result<(), Error> { + let jwk: &JsonWebKey = req.app_data().unwrap(); + + // If authentication is disabled there is nothing to do. + if !jwk.auth_enabled() { + return Ok(()); + } + + match req.headers().get(http::header::AUTHORIZATION) { + Some(token) => validate(&format_token(token), jwk), + None => { + tracing::error!("Missing bearer token in HTTP request."); + Err(Error::from(actix_web::HttpResponse::Unauthorized())) + } + } +} + +// Ensure the token is formatted correctly by removing the "Bearer" prefix if +// present. +fn format_token(token: &HeaderValue) -> String { + let token = token + .to_str() + .expect("Failed to convert token to string") + .replace("Bearer", ""); + token.trim().into() +} + +/// Validate a bearer token. +pub fn validate(token: &str, jwk: &JsonWebKey) -> Result<(), Error> { + let (message, signature) = split_token(&token); + return match crypto::verify( + &signature, + &message, + &jwk.decoding_key(), + jwk.algorithm(), + ) { + Ok(true) => Ok(()), + Ok(false) => { + tracing::error!("Signature verification failed."); + Err(Error::from(actix_web::HttpResponse::Unauthorized())) + } + Err(e) => { + tracing::error!( + "Failed to complete signature verification with error {}", + e + ); + Err(Error::from(actix_web::HttpResponse::Unauthorized())) + } + }; +} + +// Split the JSON Web Token (JWT) into 2 parts, message and signature. +// The message comprises the header and payload. +// +// JWT format: +//
.. +// \______ ________/ +// \/ +// message +fn split_token(token: &str) -> (String, String) { + let elems = token.split('.').collect::>(); + let message = format!("{}.{}", elems[0], elems[1]); + let signature = elems[2]; + (message, signature.into()) +} + +#[test] +fn validate_test() { + let token_file = std::env::current_dir() + .expect("Failed to get current directory") + .join("authentication") + .join("token"); + let mut token = std::fs::read_to_string(token_file) + .expect("Failed to get bearer token"); + let jwk_file = std::env::current_dir() + .expect("Failed to get current directory") + .join("authentication") + .join("jwk"); + let jwk = init(Some(jwk_file.to_str().unwrap().into())); + + validate(&token, &jwk).expect("Validation should pass"); + // create invalid token + token.push_str("invalid"); + validate(&token, &jwk) + .expect_err("Validation should fail with an invalid token"); +} diff --git a/control-plane/rest/service/src/main.rs b/control-plane/rest/service/src/main.rs index 6dc18be44..6104d3f4e 100644 --- a/control-plane/rest/service/src/main.rs +++ b/control-plane/rest/service/src/main.rs @@ -1,3 +1,4 @@ +mod authentication; mod v0; use actix_service::ServiceFactory; @@ -7,6 +8,7 @@ use actix_web::{ App, HttpServer, }; + use rustls::{ internal::pemfile::{certs, rsa_private_keys}, NoClientAuth, @@ -47,6 +49,14 @@ pub(crate) struct CliArgs { /// Trace rest requests to the Jaeger endpoint agent #[structopt(long, short)] jaeger: Option, + + /// Path to JSON Web KEY file used for authenticating REST requests + #[structopt(long, required_unless = "no-auth")] + jwk: Option, + + /// Don't authenticate REST requests + #[structopt(long, required_unless = "jwk")] + no_auth: bool, } fn parse_dir(src: &str) -> anyhow::Result { @@ -162,6 +172,16 @@ fn load_certificates( Ok(config) } +fn get_jwk_path() -> Option { + match CliArgs::from_args().jwk { + Some(path) => Some(path), + None => match CliArgs::from_args().no_auth { + true => None, + false => panic!("Cannot authenticate without a JWK file"), + }, + } +} + #[actix_web::main] async fn main() -> anyhow::Result<()> { // need to keep the jaeger pipeline tracer alive, if enabled @@ -171,6 +191,7 @@ async fn main() -> anyhow::Result<()> { App::new() .wrap(RequestTracing::new()) .wrap(middleware::Logger::default()) + .app_data(authentication::init(get_jwk_path())) .configure_api(&v0::configure_api) }; diff --git a/control-plane/rest/service/src/v0/mod.rs b/control-plane/rest/service/src/v0/mod.rs index 662cb58ee..c50af9427 100644 --- a/control-plane/rest/service/src/v0/mod.rs +++ b/control-plane/rest/service/src/v0/mod.rs @@ -14,18 +14,25 @@ pub mod volumes; use rest_client::{versions::v0::*, JsonGeneric}; +use crate::authentication::authenticate; use actix_service::ServiceFactory; use actix_web::{ dev::{MessageBody, ServiceRequest, ServiceResponse}, web::{self, Json}, + Error, + FromRequest, HttpRequest, }; +use futures::future::Ready; use macros::actix::{delete, get, put}; use paperclip::actix::OpenApiExt; use std::io::Write; use structopt::StructOpt; use tracing::info; +use paperclip::actix::Apiv2Security; +use serde::Deserialize; + fn version() -> String { "v0".into() } @@ -84,3 +91,26 @@ where .build() .configure(swagger_ui::configure) } + +#[derive(Apiv2Security, Deserialize)] +#[openapi( + apiKey, + alias = "JWT", + in = "header", + name = "Authorization", + description = "Use format 'Bearer TOKEN'" +)] +pub struct BearerToken; + +impl FromRequest for BearerToken { + type Error = Error; + type Future = Ready>; + type Config = (); + + fn from_request( + req: &HttpRequest, + _payload: &mut actix_web::dev::Payload, + ) -> Self::Future { + futures::future::ready(authenticate(req).map(|_| Self {})) + } +} diff --git a/control-plane/rest/src/lib.rs b/control-plane/rest/src/lib.rs index 1166f9c82..faf718a40 100644 --- a/control-plane/rest/src/lib.rs +++ b/control-plane/rest/src/lib.rs @@ -45,18 +45,31 @@ pub struct ActixRestClient { impl ActixRestClient { /// creates a new client which uses the specified `url` /// uses the rustls connector if the url has the https scheme - pub fn new(url: &str, trace: bool) -> anyhow::Result { - Self::new_timeout(url, trace, std::time::Duration::from_secs(5)) + pub fn new( + url: &str, + trace: bool, + bearer_token: Option, + ) -> anyhow::Result { + Self::new_timeout( + url, + trace, + bearer_token, + std::time::Duration::from_secs(5), + ) } /// creates a new client which uses the specified `url` /// uses the rustls connector if the url has the https scheme pub fn new_timeout( url: &str, trace: bool, + bearer_token: Option, timeout: std::time::Duration, ) -> anyhow::Result { let url: url::Url = url.parse()?; - let builder = Client::builder().timeout(timeout); + let mut builder = Client::builder().timeout(timeout); + if let Some(token) = bearer_token { + builder = builder.bearer_auth(token); + } match url.scheme() { "https" => Self::new_https(builder, &url, trace), diff --git a/control-plane/rest/tests/v0_test.rs b/control-plane/rest/tests/v0_test.rs index 9e111d625..ae23ca856 100644 --- a/control-plane/rest/tests/v0_test.rs +++ b/control-plane/rest/tests/v0_test.rs @@ -15,29 +15,30 @@ async fn wait_for_services() { Liveness {}.request_on(ChannelVs::JsonGrpc).await.unwrap(); } -// to avoid waiting for timeouts -async fn orderly_start(test: &ComposeTest) { - test.start_containers(vec!["nats", "core", "jsongrpc", "rest", "jaeger"]) - .await - .unwrap(); - - test.connect_to_bus("nats").await; - wait_for_services().await; - - test.start("mayastor").await.unwrap(); - - let mut hdl = test.grpc_handle("mayastor").await.unwrap(); - hdl.mayastor.list_nexus(Null {}).await.unwrap(); +// Returns the path to the JWK file. +fn jwk_file() -> String { + let jwk_file = std::env::current_dir() + .unwrap() + .join("authentication") + .join("jwk"); + jwk_file.to_str().unwrap().into() } -#[actix_rt::test] -async fn client() { +// Setup the infrastructure ready for the tests. +async fn test_setup(auth: &bool) -> (String, ComposeTest) { global::set_text_map_propagator(TraceContextPropagator::new()); let (_tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() .with_service_name("rest-client") .install() .unwrap(); + let jwk_file = jwk_file(); + let mut rest_args = match auth { + true => vec!["--jwk", &jwk_file], + false => vec!["--no-auth"], + }; + rest_args.append(&mut vec!["-j", "10.1.0.8:6831", "--dummy-certificates"]); + let mayastor = "node-test-name"; let test = Builder::new() .name("rest") @@ -52,11 +53,9 @@ async fn client() { .add_container_spec( ContainerSpec::from_binary( "rest", - Binary::from_dbg("rest").with_nats("-n").with_args(vec![ - "-j", - "10.1.0.6:6831", - "--dummy-certificates", - ]), + Binary::from_dbg("rest") + .with_nats("-n") + .with_args(rest_args), ) .with_portmap("8080", "8080") .with_portmap("8081", "8081"), @@ -85,16 +84,55 @@ async fn client() { .build() .await .unwrap(); + (mayastor.into(), test) +} + +// to avoid waiting for timeouts +async fn orderly_start(test: &ComposeTest) { + test.start_containers(vec!["nats", "core", "jsongrpc", "rest", "jaeger"]) + .await + .unwrap(); + + test.connect_to_bus("nats").await; + wait_for_services().await; + + test.start("mayastor").await.unwrap(); + + let mut hdl = test.grpc_handle("mayastor").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.unwrap(); +} + +// Return a bearer token to be sent with REST requests. +fn bearer_token() -> String { + let token_file = std::env::current_dir() + .expect("Failed to get current directory") + .join("authentication") + .join("token"); + std::fs::read_to_string(token_file).expect("Failed to get bearer token") +} - client_test(&mayastor.into(), &test).await; +#[actix_rt::test] +async fn client() { + // Run the client test both with and without authentication. + for auth in &[true, false] { + let (mayastor, test) = test_setup(auth).await; + client_test(&mayastor.into(), &test, auth).await; + } } -async fn client_test(mayastor: &NodeId, test: &ComposeTest) { +async fn client_test(mayastor: &NodeId, test: &ComposeTest, auth: &bool) { orderly_start(&test).await; - let client = ActixRestClient::new("https://localhost:8080", true) - .unwrap() - .v0(); + let client = ActixRestClient::new( + "https://localhost:8080", + true, + match auth { + true => Some(bearer_token()), + false => None, + }, + ) + .unwrap() + .v0(); let nodes = client.get_nodes().await.unwrap(); let mut node = Node { id: mayastor.clone(), @@ -285,3 +323,22 @@ async fn client_test(mayastor: &NodeId, test: &ComposeTest) { node.state = NodeState::Unknown; assert_eq!(client.get_nodes().await.unwrap(), vec![node]); } + +#[actix_rt::test] +async fn client_invalid_token() { + let (_, test) = test_setup(&true).await; + orderly_start(&test).await; + + // Use an invalid token to make requests. + let mut token = bearer_token(); + token.push_str("invalid"); + + let client = + ActixRestClient::new("https://localhost:8080", true, Some(token)) + .unwrap() + .v0(); + client + .get_nodes() + .await + .expect_err("Request should fail with invalid token"); +} diff --git a/control-plane/tests/tests/common/mod.rs b/control-plane/tests/tests/common/mod.rs index 0bdd79a06..92278b15f 100644 --- a/control-plane/tests/tests/common/mod.rs +++ b/control-plane/tests/tests/common/mod.rs @@ -71,6 +71,7 @@ impl Cluster { async fn new( trace_rest: bool, timeout_rest: std::time::Duration, + bearer_token: Option, components: Components, composer: ComposeTest, jaeger: (Tracer, Uninstall), @@ -78,6 +79,7 @@ impl Cluster { let rest_client = ActixRestClient::new_timeout( "https://localhost:8080", trace_rest, + bearer_token, timeout_rest, ) .unwrap(); @@ -163,6 +165,7 @@ pub struct ClusterBuilder { pools: Vec, replicas: Replica, trace: bool, + bearer_token: Option, timeout: std::time::Duration, } @@ -181,6 +184,7 @@ impl ClusterBuilder { pools: vec![], replicas: Default::default(), trace: true, + bearer_token: None, timeout: std::time::Duration::from_secs(3), } } @@ -281,6 +285,7 @@ impl ClusterBuilder { let cluster = Cluster::new( self.trace, self.timeout, + self.bearer_token.clone(), components, composer, jaeger, diff --git a/nix/pkgs/control-plane/cargo-project.nix b/nix/pkgs/control-plane/cargo-project.nix index 5048c10eb..445c938af 100644 --- a/nix/pkgs/control-plane/cargo-project.nix +++ b/nix/pkgs/control-plane/cargo-project.nix @@ -29,7 +29,7 @@ let buildProps = rec { name = "control-plane"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "05j0bcvisnz4j8v2aiqsdv5lawl0i4kg9wvrnigc9l7gz45i2750"; + cargoSha256 = "0ixkil6sl0h72yb9rg66a32pc1ap903fnr4frnm9y218fk5g82vi"; inherit version; src = whitelistSource ../../../. (pkgs.callPackage ../mayastor { }).src_list; cargoBuildFlags = [ "-p mbus_api" "-p agents" "-p rest" ]; diff --git a/nix/pkgs/control-plane/default.nix b/nix/pkgs/control-plane/default.nix index 5e63f3f9d..b1bb92c4e 100644 --- a/nix/pkgs/control-plane/default.nix +++ b/nix/pkgs/control-plane/default.nix @@ -16,7 +16,6 @@ let ''; }; components = { src }: { - kiiss = agent { inherit src; name = "kiiss"; }; core = agent { inherit src; name = "core"; }; rest = agent { inherit src; name = "rest"; }; }; diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 60b4cf969..f038499fe 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -56,7 +56,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "05zsfs6vwm6q8fhlmqv2s6568y1jqafkq83zi3zr9l3lcswnbgqv"; + cargoSha256 = "0kzbbfg36rcly8xif64pqazf8687skwp3ajrlhf90d7g5rnc9lfn"; inherit version; src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; diff --git a/scripts/openapi-check.sh b/scripts/openapi-check.sh index 694de11d8..8e749e0e2 100755 --- a/scripts/openapi-check.sh +++ b/scripts/openapi-check.sh @@ -19,7 +19,7 @@ if [[ $check_rest_src = "yes" ]]; then git diff --cached --exit-code $SRC 1>/dev/null && exit 0 fi -cargo run --bin rest -- -d -o $SPECS +cargo run --bin rest -- -d --no-auth -o $SPECS # If the spec was modified then fail the check git diff --exit-code $SPECS From f0076b7cc78d18fcc995e73bbcfde07677b95e08 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Thu, 4 Mar 2021 12:37:04 -0800 Subject: [PATCH 61/78] chore: fixup core_6 test failure Signed-off-by: Ana Hobden --- spdk-sys/logwrapper.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/spdk-sys/logwrapper.c b/spdk-sys/logwrapper.c index 1216fda20..8f4e056b1 100644 --- a/spdk-sys/logwrapper.c +++ b/spdk-sys/logwrapper.c @@ -6,26 +6,8 @@ void maya_log(int level, const char *file, const int line, const char *func, const char *format, va_list args) { - // There is a delicate balance here! This `buf` ideally should not be resized, since a heap alloc is expensive. - char buf[4096] = {0}; - int should_have_written = vsnprintf(buf, sizeof(buf), format, args); - - if (should_have_written > (int) sizeof(buf)) { - logfn(level, file, line, func, &buf[0], sizeof(buf)); - } else { - // If `should_have_written` is bigger than `buf`, then the message is too long. - // Instead, we'll try to malloc onto the heap and log with that instead. - char *dynamic_buf = malloc(should_have_written); - if (!dynamic_buf) { - // We are out of memory. Trying to allocate more is not going to work out ok. - // Since C strings need `\0` on the end, we'll do that. - buf[sizeof(buf) - 1] = '\0'; - logfn(level, file, line, func, &buf[0], sizeof(buf)); - } else { - vsnprintf(dynamic_buf, should_have_written, format, args); - logfn(level, file, line, func, &dynamic_buf[0], sizeof(dynamic_buf)); - free(dynamic_buf); - } - } + char buf[512] = {0}; + vsnprintf(buf, sizeof(buf), format, args); + logfn(level, file, line, func, &buf[0], sizeof(buf)); } From 46049fb273c1ec54f999143c3d965e64330164f8 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Wed, 3 Mar 2021 15:15:35 +0000 Subject: [PATCH 62/78] test(e2e): install loki on the cluster --- Jenkinsfile | 24 ++ test/e2e/loki/promtail_configmap_e2e.yaml | 274 ++++++++++++++++++ .../loki/promtail_daemonset_e2e.template.yaml | 69 +++++ test/e2e/loki/promtail_namespace_e2e.yaml | 5 + test/e2e/loki/promtail_rbac_e2e.yaml | 36 +++ 5 files changed, 408 insertions(+) create mode 100644 test/e2e/loki/promtail_configmap_e2e.yaml create mode 100644 test/e2e/loki/promtail_daemonset_e2e.template.yaml create mode 100644 test/e2e/loki/promtail_namespace_e2e.yaml create mode 100644 test/e2e/loki/promtail_rbac_e2e.yaml diff --git a/Jenkinsfile b/Jenkinsfile index 2002c7224..201ec3295 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -45,6 +45,28 @@ def getTestPlan() { return xray_on_demand_testplan } +// Install Loki on the cluster +def lokiInstall(tag) { + sh 'kubectl apply -f ./test/e2e/loki/promtail_namespace_e2e.yaml' + sh 'kubectl apply -f ./test/e2e/loki/promtail_rbac_e2e.yaml' + sh 'kubectl apply -f ./test/e2e/loki/promtail_configmap_e2e.yaml' + withCredentials([string(credentialsId: 'GRAFANA_API_KEY', variable: 'grafana_api_key')]) { + def cmd = "run=\"${env.BUILD_NUMBER}\" version=\"${tag}\" envsubst -no-unset < ./test/e2e/loki/promtail_daemonset_e2e.template.yaml | kubectl apply -f -" + sh "nix-shell --run '${cmd}'" + } +} + +// Unnstall Loki +def lokiUninstall(tag) { + withCredentials([string(credentialsId: 'GRAFANA_API_KEY', variable: 'grafana_api_key')]) { + def cmd = "run=\"${env.BUILD_NUMBER}\" version=\"${tag}\" envsubst -no-unset < ./test/e2e/loki/promtail_daemonset_e2e.template.yaml | kubectl delete -f -" + sh "nix-shell --run '${cmd}'" + } + sh 'kubectl delete -f ./test/e2e/loki/promtail_configmap_e2e.yaml' + sh 'kubectl delete -f ./test/e2e/loki/promtail_rbac_e2e.yaml' + sh 'kubectl delete -f ./test/e2e/loki/promtail_namespace_e2e.yaml' +} + // Send out a slack message if branch got broken or has recovered def notifySlackUponStateChange(build) { def cur = build.getResult() @@ -295,6 +317,7 @@ pipeline { } else { tag = e2e_continuous_image_tag } + lokiInstall(tag) def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${tag}\" --logs --logsdir \"./logs/mayastor\" --profile \"${e2e_test_profile}\" " // building images also means using the CI registry @@ -302,6 +325,7 @@ pipeline { cmd = cmd + " --registry \"" + env.REGISTRY + "\"" } sh "nix-shell --run '${cmd}'" + lokiUninstall(tag) // so that, if we keep the cluster, the next Loki instance can use different parameters } } post { diff --git a/test/e2e/loki/promtail_configmap_e2e.yaml b/test/e2e/loki/promtail_configmap_e2e.yaml new file mode 100644 index 000000000..397e25b76 --- /dev/null +++ b/test/e2e/loki/promtail_configmap_e2e.yaml @@ -0,0 +1,274 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: promtail + namespace: loki-mayastor +data: + promtail.yml: | + scrape_configs: + - pipeline_stages: + - docker: + job_name: kubernetes-pods-name + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_name + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - pipeline_stages: + - docker: + job_name: kubernetes-pods-app + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: .+ + source_labels: + - __meta_kubernetes_pod_label_name + - source_labels: + - __meta_kubernetes_pod_label_app + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - action: replace + source_labels: + - __host__ + target_label: node_name + - pipeline_stages: + - docker: + job_name: kubernetes-pods-direct-controllers + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: .+ + separator: '' + source_labels: + - __meta_kubernetes_pod_label_name + - __meta_kubernetes_pod_label_app + - action: drop + regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ + source_labels: + - __meta_kubernetes_pod_controller_name + - source_labels: + - __meta_kubernetes_pod_controller_name + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - pipeline_stages: + - docker: + job_name: kubernetes-pods-indirect-controller + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: .+ + separator: '' + source_labels: + - __meta_kubernetes_pod_label_name + - __meta_kubernetes_pod_label_app + - action: keep + regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ + source_labels: + - __meta_kubernetes_pod_controller_name + - action: replace + regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ + source_labels: + - __meta_kubernetes_pod_controller_name + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - pipeline_stages: + - docker: + job_name: kubernetes-pods-static + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: drop + regex: ^$ + source_labels: + - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror + - action: replace + source_labels: + - __meta_kubernetes_pod_label_component + target_label: __service__ + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: drop + regex: ^$ + source_labels: + - __service__ + - action: replace + replacement: $1 + separator: / + source_labels: + - __meta_kubernetes_namespace + - __service__ + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: instance + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror + - __meta_kubernetes_pod_container_name + target_label: __path__ + - pipeline_stages: + - docker: + job_name: journal + journal: + labels: + job: systemd-journal + relabel_configs: + - action: replace + source_labels: + - __journal__systemd_unit + target_label: unit + - action: replace + source_labels: + - __journal__hostname + target_label: node_name + diff --git a/test/e2e/loki/promtail_daemonset_e2e.template.yaml b/test/e2e/loki/promtail_daemonset_e2e.template.yaml new file mode 100644 index 000000000..21b354195 --- /dev/null +++ b/test/e2e/loki/promtail_daemonset_e2e.template.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: promtail + namespace: loki-mayastor +spec: + minReadySeconds: 10 + selector: + matchLabels: + name: promtail + template: + metadata: + labels: + name: promtail + spec: + containers: + - args: + - -client.url=https://${grafana_api_key}@logs-prod-us-central1.grafana.net/api/prom/push + - -config.file=/etc/promtail/promtail.yml + - -client.external-labels=run=${run},version=${version} + env: + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: grafana/promtail:2.1.0 + imagePullPolicy: Always + name: promtail + readinessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + initialDelaySeconds: 10 + ports: + - containerPort: 80 + name: http-metrics + securityContext: + privileged: true + runAsUser: 0 + volumeMounts: + - mountPath: /etc/promtail + name: promtail + - mountPath: /var/log + name: varlog + - mountPath: /var/lib/docker/containers + name: varlibdockercontainers + readOnly: true + - mountPath: /etc/machine-id + name: machineid + serviceAccount: promtail + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - configMap: + name: promtail + name: promtail + - hostPath: + path: /var/log + name: varlog + - hostPath: + path: /var/lib/docker/containers + name: varlibdockercontainers + - hostPath: + path: /etc/machine-id + name: machineid + updateStrategy: + type: RollingUpdate diff --git a/test/e2e/loki/promtail_namespace_e2e.yaml b/test/e2e/loki/promtail_namespace_e2e.yaml new file mode 100644 index 000000000..5efed877a --- /dev/null +++ b/test/e2e/loki/promtail_namespace_e2e.yaml @@ -0,0 +1,5 @@ + +apiVersion: v1 +kind: Namespace +metadata: + name: loki-mayastor diff --git a/test/e2e/loki/promtail_rbac_e2e.yaml b/test/e2e/loki/promtail_rbac_e2e.yaml new file mode 100644 index 000000000..9e1359280 --- /dev/null +++ b/test/e2e/loki/promtail_rbac_e2e.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: promtail + namespace: loki-mayastor +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: promtail +rules: +- apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: promtail +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: promtail +subjects: +- kind: ServiceAccount + name: promtail + namespace: loki-mayastor From 0bbf6a8a7682e92b8d8743d83d4f6d9d6c466259 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Wed, 3 Mar 2021 15:47:59 +0000 Subject: [PATCH 63/78] ci: fixes for correctness and maintainability MQ-232 Add support for configuration of test parameters by a selectable yaml file. Can be JSON or TOML as well. Add a configuration file for CI/CD Fix install test to conform with other tests Replace uses of Log.Error with Log.Info when reporting k8s errors Check mayastor pods health in common AfterEach function Generate artifacts under single directory artifacts, and modify so that jenkins collects artifacts from there for each test run. This includes configuration files, install yaml files and log files Use a symbol for mayastor namespace instead of using explicit strings. Support configurable replica counts Update the config spec for replica specification for all relevant tests Refactor so that each test creates and uses storage classes, instead of using the ones defined in deplot/storage-class.yaml Do not apply deploy/storage-class.yaml --- .gitignore | 1 + Jenkinsfile | 4 +- scripts/e2e-cluster-dump.sh | 1 - scripts/e2e-test.sh | 74 ++-- .../basic_volume_io/basic_volume_io_test.go | 18 +- test/e2e/common/e2e_config/e2e_config.go | 104 ++++++ test/e2e/common/locations/util_locations.go | 51 +++ test/e2e/common/reporter/junit.go | 9 +- test/e2e/common/test.go | 74 +++- test/e2e/common/util.go | 142 +++++--- test/e2e/common/util_cleanup.go | 49 +-- test/e2e/common/util_mayastor_crds.go | 12 +- test/e2e/common/util_pvc.go | 2 +- test/e2e/common/util_testpods.go | 4 + test/e2e/configurations/ci_e2e_config.yaml | 34 ++ test/e2e/csi/dynamic_provisioning_test.go | 15 +- test/e2e/csi/e2e_suite_test.go | 5 +- test/e2e/csi/testsuites/testsuites.go | 5 +- test/e2e/go.mod | 2 + test/e2e/go.sum | 7 + test/e2e/install/install_test.go | 329 +++++------------- test/e2e/io_soak/filesystem_fio.go | 6 +- test/e2e/io_soak/fio.go | 22 +- test/e2e/io_soak/io_soak_test.go | 49 +-- test/e2e/io_soak/rawblock_fio.go | 6 +- .../lib/node_disconnect_lib.go | 4 +- .../lib/node_disconnect_setup.go | 21 +- .../replica_pod_remove_test.go | 4 +- .../e2e/pvc_stress_fio/pvc_stress_fio_test.go | 88 +++-- test/e2e/rebuild/basic_rebuild_test.go | 24 +- test/e2e/replica/replica_test.go | 24 +- .../e2e/resource_check/resource_check_test.go | 10 +- test/e2e/uninstall/uninstall_test.go | 87 ++--- 33 files changed, 708 insertions(+), 579 deletions(-) create mode 100644 test/e2e/common/e2e_config/e2e_config.go create mode 100644 test/e2e/common/locations/util_locations.go create mode 100644 test/e2e/configurations/ci_e2e_config.yaml diff --git a/.gitignore b/.gitignore index 9b453f9a6..5802f6813 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ test-yamls/* /package-lock.json /node_modules artifacts/ +.idea diff --git a/Jenkinsfile b/Jenkinsfile index 201ec3295..7bd66d1a2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -318,7 +318,7 @@ pipeline { tag = e2e_continuous_image_tag } lokiInstall(tag) - def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${tag}\" --logs --logsdir \"./logs/mayastor\" --profile \"${e2e_test_profile}\" " + def cmd = "./scripts/e2e-test.sh --device /dev/sdb --tag \"${tag}\" --logs --profile \"${e2e_test_profile}\" " // building images also means using the CI registry if (e2e_build_images == true) { @@ -360,7 +360,7 @@ pipeline { } } always { - archiveArtifacts 'logs/**/*.*' + archiveArtifacts 'artifacts/**/*.*' // always send the junit results back to Xray and Jenkins junit 'e2e.*.xml' script { diff --git a/scripts/e2e-cluster-dump.sh b/scripts/e2e-cluster-dump.sh index ad0755111..5c5f9b702 100755 --- a/scripts/e2e-cluster-dump.sh +++ b/scripts/e2e-cluster-dump.sh @@ -196,7 +196,6 @@ function getLogs { cluster-describe >& "$dest/cluster.describe.txt" echo "logfiles generated in $dest" - ls -l "$dest" echo "" else diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index c545000ab..1789404f0 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -5,6 +5,8 @@ set -eu SCRIPTDIR=$(dirname "$(realpath "$0")") TESTDIR=$(realpath "$SCRIPTDIR/../test/e2e") REPORTSDIR=$(realpath "$SCRIPTDIR/..") +ARTIFACTSDIR=$(realpath "$SCRIPTDIR/../artifacts") +TOPDIR=$(realpath "$SCRIPTDIR/..") # List and Sequence of tests. #tests="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall" @@ -19,6 +21,14 @@ ONDEMAND_TESTS="install basic_volume_io csi resource_check uninstall" EXTENDED_TESTS="install basic_volume_io csi resource_check uninstall" CONTINUOUS_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall" +#exit values +EXITV_OK=0 +EXITV_INVALID_OPTION=1 +EXITV_MISSING_OPTION=2 +EXITV_REPORTS_DIR_NOT_EXIST=3 +EXITV_FAILED=4 +EXITV_FAILED_CLUSTER_OK=255 + # Global state variables # test configuration state variables device= @@ -31,7 +41,7 @@ profile="default" on_fail="stop" uninstall_cleanup="n" generate_logs=0 -logsdir="" +logsdir="$ARTIFACTSDIR/logs" help() { cat < Location to generate logs (default: emit to stdout). - --onfail On fail, stop immediately or continue default($on_fail) - Behaviour for "continue" only differs if uninstall is in the list of tests (the default). + --onfail On fail, stop immediately or uninstall default($on_fail) + Behaviour for "uninstall" only differs if uninstall is in the list of tests (the default). --uninstall_cleanup On uninstall cleanup for reusable cluster. default($uninstall_cleanup) + --config config name or configuration file default(test/e2e/configurations/ci_e2e_config.yaml) Examples: $0 --device /dev/nvme0n1 --registry 127.0.0.1:5000 --tag a80ce0c @@ -84,7 +95,7 @@ while [ "$#" -gt 0 ]; do ;; -h|--help) help - exit 0 + exit $EXITV_OK ;; --logs) generate_logs=1 @@ -103,15 +114,16 @@ while [ "$#" -gt 0 ]; do --onfail) shift case $1 in - continue) + uninstall) on_fail=$1 ;; stop) on_fail=$1 ;; *) + echo "invalid option for --onfail" help - exit 2 + exit $EXITV_INVALID_OPTION esac ;; --uninstall_cleanup) @@ -121,14 +133,19 @@ while [ "$#" -gt 0 ]; do uninstall_cleanup=$1 ;; *) + echo "invalid option for --uninstall_cleanup" help - exit 2 + exit $EXITV_INVALID_OPTION esac ;; + --config) + shift + export e2e_config_file="$1" + ;; *) echo "Unknown option: $1" help - exit 1 + exit $EXITV_INVALID_OPTION ;; esac shift @@ -137,7 +154,7 @@ done if [ -z "$device" ]; then echo "Device for storage pools must be specified" help - exit 1 + exit $EXITV_MISSING_OPTION fi export e2e_pool_device=$device @@ -146,12 +163,13 @@ if [ -n "$tag" ]; then fi export e2e_docker_registry="$registry" # can be empty string +export e2e_top_dir="$TOPDIR" if [ -n "$custom_tests" ]; then if [ "$profile" != "default" ]; then echo "cannot specify --profile with --tests" help - exit 1 + exit $EXITV_INVALID_OPTION fi profile="custom" fi @@ -175,14 +193,14 @@ case "$profile" in *) echo "Unknown profile: $profile" help - exit 1 + exit $EXITV_INVALID_OPTION ;; esac export e2e_reports_dir="$REPORTSDIR" if [ ! -d "$e2e_reports_dir" ] ; then echo "Reports directory $e2e_reports_dir does not exist" - exit 1 + exit $EXITV_REPORTS_DIR_NOT_EXIST fi if [ "$uninstall_cleanup" == 'n' ] ; then @@ -191,6 +209,8 @@ else export e2e_uninstall_cleanup=1 fi +mkdir -p "$ARTIFACTSDIR" + test_failed=0 # Run go test in directory specified as $1 (relative path) @@ -220,6 +240,7 @@ contains() { } echo "Environment:" +echo " e2e_top_dir=$e2e_top_dir" echo " e2e_pool_device=$e2e_pool_device" echo " e2e_image_tag=$e2e_image_tag" echo " e2e_docker_registry=$e2e_docker_registry" @@ -238,13 +259,13 @@ for testname in $tests; do # defer uninstall till after other tests have been run. if [ "$testname" != "uninstall" ] ; then if ! runGoTest "$testname" ; then - echo "Test \"$testname\" Failed!!" + echo "Test \"$testname\" FAILED!" test_failed=1 break fi if ! ("$SCRIPTDIR/e2e_check_pod_restarts.sh") ; then - echo "Test \"$testname\" Failed!! mayastor pods were restarted." + echo "Test \"$testname\" FAILED! mayastor pods were restarted." test_failed=1 generate_logs=1 break @@ -268,27 +289,28 @@ if [ "$generate_logs" -ne 0 ]; then fi if [ "$test_failed" -ne 0 ] && [ "$on_fail" == "stop" ]; then - exit 3 + echo "At least one test FAILED!" + exit $EXITV_FAILED fi # Always run uninstall test if specified if contains "$tests" "uninstall" ; then if ! runGoTest "uninstall" ; then - echo "Test \"uninstall\" Failed!!" + echo "Test \"uninstall\" FAILED!" test_failed=1 - # Dump to the screen only, we do NOT want to overwrite - # logfiles that may have been generated. - if ! "$SCRIPTDIR/e2e-cluster-dump.sh" --clusteronly ; then - # ignore failures in the dump script - : - fi + elif [ "$test_failed" -ne 0 ] ; then + # tests failed, but uninstall was successful + # so cluster is reusable + echo "At least one test FAILED! Cluster is usable." + exit $EXITV_FAILED_CLUSTER_OK fi fi -if [ "$test_failed" -ne 0 ]; then - echo "At least one test has FAILED!" - exit 1 + +if [ "$test_failed" -ne 0 ] ; then + echo "At least one test FAILED!" + exit $EXITV_FAILED fi echo "All tests have PASSED!" -exit 0 +exit $EXITV_OK diff --git a/test/e2e/basic_volume_io/basic_volume_io_test.go b/test/e2e/basic_volume_io/basic_volume_io_test.go index e31a6c85b..1f27dc4a1 100644 --- a/test/e2e/basic_volume_io/basic_volume_io_test.go +++ b/test/e2e/basic_volume_io/basic_volume_io_test.go @@ -4,13 +4,13 @@ package basic_volume_io_test import ( "e2e-basic/common" + "e2e-basic/common/e2e_config" rep "e2e-basic/common/reporter" "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var defTimeoutSecs = "120s" @@ -28,8 +28,12 @@ func TestBasicVolumeIO(t *testing.T) { RunSpecsWithDefaultAndCustomReporters(t, "Basic volume IO tests, NVMe-oF TCP and iSCSI", rep.GetReporters("basic-volume-io")) } -func basicVolumeIOTest(scName string) { - volName := "basic-vol-io-test-" + scName +func basicVolumeIOTest(protocol common.ShareProto) { + scName := "basic-vol-io-test-" + string(protocol) + err := common.MkStorageClass(scName, e2e_config.GetConfig().BasicVolumeIO.Replicas, protocol) + Expect(err).ToNot(HaveOccurred(), "Creating storage class %s", scName) + + volName := "basic-vol-io-test-" + string(protocol) // Create the volume common.MkPVC(volName, scName) tmp := volSc{volName, scName} @@ -63,6 +67,9 @@ func basicVolumeIOTest(scName string) { // Delete the volume common.RmPVC(volName, scName) volNames = volNames[:len(volNames)-1] + + err = common.RmStorageClass(scName) + Expect(err).ToNot(HaveOccurred(), "Deleting storage class %s", scName) } var _ = Describe("Mayastor Volume IO test", func() { @@ -76,15 +83,14 @@ var _ = Describe("Mayastor Volume IO test", func() { }) It("should verify an NVMe-oF TCP volume can process IO", func() { - basicVolumeIOTest("mayastor-nvmf") + basicVolumeIOTest(common.ShareProtoNvmf) }) It("should verify an iSCSI volume can process IO", func() { - basicVolumeIOTest("mayastor-iscsi") + basicVolumeIOTest(common.ShareProtoIscsi) }) }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) diff --git a/test/e2e/common/e2e_config/e2e_config.go b/test/e2e/common/e2e_config/e2e_config.go new file mode 100644 index 000000000..41169ab28 --- /dev/null +++ b/test/e2e/common/e2e_config/e2e_config.go @@ -0,0 +1,104 @@ +package e2e_config + +import ( + "e2e-basic/common/locations" + "fmt" + "github.com/ilyakaznacheev/cleanenv" + "gopkg.in/yaml.v2" + "io/ioutil" + "os" + "path" + "sync" +) + +// E2EConfig is a application configuration structure +type E2EConfig struct { + // Operational parameters + Cores int `yaml:"cores,omitempty"` + Registry string `yaml:"registry" env:"e2e_docker_registry" env-default:"ci-registry.mayastor-ci.mayadata.io"` + ImageTag string `yaml:"imageTag" env:"e2e_image_tag" env-default:"ci"` + PoolDevice string `yaml:"poolDevice" env:"e2e_pool_device"` + PoolYamlFiles []string `yaml:"poolYamlFiles" env:"e2e_pool_yaml_files"` + // Individual Test parameters + PVCStress struct { + Replicas int `yaml:"replicas" env-default:"1"` + CdCycles int `yaml:"cdCycles" env-default:"100"` + CrudCycles int `yaml:"crudCycles" env-default:"20"` + } + IOSoakTest struct { + Replicas int `yaml:"replicas" env-default:"2"` + Duration string `yaml:"duration" env-default:"30m"` + LoadFactor int `yaml:"loadFactor" env-default:"10"` + Protocols []string `yaml:"protocols" env-default:"nvmf,iscsi"` + FioFixedDuration int `yaml:"fioFixedDuration" env-default:"60"` + FioDutyCycles []struct { + ThinkTime int `yaml:"thinkTime"` + ThinkTimeBlocks int `yaml:"thinkTimeBlocks"` + } `yaml:"fioDutyCycles"` + } `yaml:"ioSoakTest"` + CSI struct { + Replicas int `yaml:"replicas" env-default:"1"` + SmallClaimSize string `yaml:"smallClaimSize" env-default:"50Mi"` + LargeClaimSize string `yaml:"largeClaimSize" env-default:"500Mi"` + } `yaml:"csi"` + Uninstall struct { + Cleanup int `yaml:"cleanup" env:"e2e_uninstall_cleanup"` + } `yaml:"uninstall"` + BasicVolumeIO struct { + Replicas int `yaml:"replicas" env-default:"1"` + } `yaml:"basicVolumeIO"` + // Run configuration + ReportsDir string `yaml:"reportsDir" env:"e2e_reports_dir"` +} + +var once sync.Once +var e2eConfig E2EConfig + +// This works because *ALL* tests source directories are 1 level deep. +const configDir = "../configurations" + +func configFileExists(path string) bool { + if _, err := os.Stat(path); err == nil { + return true + } else if os.IsNotExist(err) { + fmt.Printf("Configuration file %s does not exist\n", path) + } else { + fmt.Printf("Configuration file %s is not accessible\n", path) + } + return false +} + +// This function is called early from junit and various bits have not been initialised yet +// so we cannot use logf or Expect instead we use fmt.Print... and panic. +func GetConfig() E2EConfig { + var err error + once.Do(func() { + // We absorb the complexity of locating the configuration file here + // so that scripts invoking the tests can be simpler + // - if OS envvar e2e_config is not defined the config file is defaulted to ci_e2e_config + // - if OS envvar e2e_config is defined + // - if it is a path to a file then that file is used as the config file + // - else try to use a file of the same name in the configuration directory + configFile := fmt.Sprintf("%s/ci_e2e_config.yaml", configDir) + // A configuration file *MUST* be provided. + value, ok := os.LookupEnv("e2e_config_file") + if ok { + if configFileExists(value) { + configFile = value + } else { + configFile = fmt.Sprintf("%s/%s", configDir, value) + } + } + fmt.Printf("Using configuration file %s\n", configFile) + err = cleanenv.ReadConfig(configFile, &e2eConfig) + if err != nil { + panic(fmt.Sprintf("%v", err)) + } + + cfgBytes, _ := yaml.Marshal(e2eConfig) + cfgUsedFile := path.Clean(locations.GetArtifactsDir() + "/e2e_config.used.yaml") + _ = ioutil.WriteFile(cfgUsedFile, cfgBytes, 0644) + }) + + return e2eConfig +} diff --git a/test/e2e/common/locations/util_locations.go b/test/e2e/common/locations/util_locations.go new file mode 100644 index 000000000..2a71a38e3 --- /dev/null +++ b/test/e2e/common/locations/util_locations.go @@ -0,0 +1,51 @@ +package locations + +import ( + "os" + "path" + "runtime" + "sync" + + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var once sync.Once +var topDir string + +func init() { + once.Do(func() { + value, ok := os.LookupEnv("e2e_top_dir") + if !ok { + _, filename, _, _ := runtime.Caller(0) + topDir = path.Clean(filename + "/../../../../") + } else { + topDir = value + } + logf.Log.Info("Repo", "top directory", topDir) + }) +} + +func locationExists(path string) string { + _, err := os.Stat(path) + Expect(err).To(BeNil(), "%s", err) + return path +} + +func GetDeployDir() string { + return locationExists(path.Clean(topDir + "/deploy")) +} + +func GetScriptsDir() string { + return locationExists(path.Clean(topDir + "/scripts")) +} + +func GetArtifactsDir() string { + return path.Clean(topDir + "/artifacts") +} + +// This is a generate directory, so may not exist yet. +func GetGeneratedYamlsDir() string { + return path.Clean(topDir + "/artifacts/install/yamls") +} diff --git a/test/e2e/common/reporter/junit.go b/test/e2e/common/reporter/junit.go index dd45e6a96..1809e071f 100644 --- a/test/e2e/common/reporter/junit.go +++ b/test/e2e/common/reporter/junit.go @@ -1,19 +1,20 @@ package reporter import ( - "os" + "e2e-basic/common/e2e_config" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/reporters" ) func GetReporters(name string) []Reporter { - reportDir := os.Getenv("e2e_reports_dir") - if reportDir == "" { + cfg := e2e_config.GetConfig() + + if cfg.ReportsDir == "" { panic("reportDir not defined - define via e2e_reports_dir environment variable") } testGroupPrefix := "e2e." - xmlFileSpec := reportDir + "/" + testGroupPrefix + name + "-junit.xml" + xmlFileSpec := cfg.ReportsDir + "/" + testGroupPrefix + name + "-junit.xml" junitReporter := reporters.NewJUnitReporter(xmlFileSpec) return []Reporter{junitReporter} } diff --git a/test/e2e/common/test.go b/test/e2e/common/test.go index e3612a4ef..37ea3b46f 100644 --- a/test/e2e/common/test.go +++ b/test/e2e/common/test.go @@ -2,21 +2,26 @@ package common import ( "context" + "errors" "fmt" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "time" - "sigs.k8s.io/controller-runtime/pkg/client/config" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/deprecated/scheme" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" ) type TestEnvironment struct { @@ -31,15 +36,16 @@ type TestEnvironment struct { var gTestEnv TestEnvironment func SetupTestEnv() { - + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) By("bootstrapping test environment") + var err error + useCluster := true testEnv := &envtest.Environment{ UseExistingCluster: &useCluster, AttachControlPlaneOutput: true, } - var err error cfg, err := testEnv.Start() Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) @@ -84,8 +90,64 @@ func SetupTestEnv() { } } -func TeardownTestEnv() { - AfterSuiteCleanup() +func TeardownTestEnvNoCleanup() { err := gTestEnv.TestEnv.Stop() Expect(err).ToNot(HaveOccurred()) } + +func TeardownTestEnv() { + AfterSuiteCleanup() + TeardownTestEnvNoCleanup() +} + +// placeholder function for now +// To aid postmortem analysis for the most common CI use case +// namely cluster is retained aon failure, we do nothing +// For other situations behaviour should be configurable +func AfterSuiteCleanup() { + logf.Log.Info("AfterSuiteCleanup") +} + +// Check that no PVs, PVCs and MSVs are still extant. +// Returns an error if resources exists. +func AfterEachCheck() error { + var errorMsg = "" + + logf.Log.Info("AfterEachCheck") + + // Phase 1 to delete dangling resources + pvcs, _ := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metaV1.ListOptions{}) + if len(pvcs.Items) != 0 { + errorMsg += " found leftover PersistentVolumeClaims" + logf.Log.Info("AfterEachCheck: found leftover PersistentVolumeClaims, test fails.") + } + + pvs, _ := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metaV1.ListOptions{}) + if len(pvs.Items) != 0 { + errorMsg += " found leftover PersistentVolumes" + logf.Log.Info("AfterEachCheck: found leftover PersistentVolumes, test fails.") + } + + // Mayastor volumes + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + msvs, _ := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).List(context.TODO(), metaV1.ListOptions{}) + if len(msvs.Items) != 0 { + errorMsg += " found leftover MayastorVolumes" + logf.Log.Info("AfterEachCheck: found leftover MayastorVolumes, test fails.") + } + + // Check that Mayastor pods are healthy no restarts or fails. + err := CheckPods(NSMayastor) + if err != nil { + errorMsg = fmt.Sprintf("%s %v", errorMsg, err) + } + + if len(errorMsg) != 0 { + return errors.New(errorMsg) + } + return nil +} diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 52729802c..ff062b5db 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -8,45 +8,56 @@ import ( "strconv" "time" + appsV1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + logf "sigs.k8s.io/controller-runtime/pkg/log" . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" ) const NSMayastor = "mayastor" +const CSIProvisioner = "io.openebs.csi-mayastor" -func ApplyDeployYaml(filename string) { +type ShareProto string +const( + ShareProtoNvmf ShareProto = "nvmf" + ShareProtoIscsi = "iscsi" +) + +// Helper for passing yaml from the specified directory to kubectl +func KubeCtlApplyYaml(filename string, dir string) { cmd := exec.Command("kubectl", "apply", "-f", filename) - cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + cmd.Dir = dir + logf.Log.Info("kubectl apply ", "yaml file", filename, "path", cmd.Dir) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } -func DeleteDeployYaml(filename string) { +// Helper for passing yaml from the specified directory to kubectl +func KubeCtlDeleteYaml(filename string, dir string) { cmd := exec.Command("kubectl", "delete", "-f", filename) - cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + cmd.Dir = dir + logf.Log.Info("kubectl delete ", "yaml file", filename, "path", cmd.Dir) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } // create a storage class -func MkStorageClass(scName string, scReplicas int, protocol string, provisioner string) error { +func MkStorageClass(scName string, scReplicas int, protocol ShareProto) error { + logf.Log.Info("Creating storage class", "name", scName, "replicas", scReplicas, "protocol", protocol) createOpts := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: scName, Namespace: "default", }, - Provisioner: provisioner, + Provisioner: CSIProvisioner, } createOpts.Parameters = make(map[string]string) - createOpts.Parameters["protocol"] = protocol + createOpts.Parameters["protocol"] = string(protocol) createOpts.Parameters["repl"] = strconv.Itoa(scReplicas) ScApi := gTestEnv.KubeInt.StorageV1().StorageClasses @@ -56,6 +67,7 @@ func MkStorageClass(scName string, scReplicas int, protocol string, provisioner // remove a storage class func RmStorageClass(scName string) error { + logf.Log.Info("Deleting storage class", "name", scName) ScApi := gTestEnv.KubeInt.StorageV1().StorageClasses deleteErr := ScApi().Delete(context.TODO(), scName, metav1.DeleteOptions{}) return deleteErr @@ -78,7 +90,7 @@ func ApplyNodeSelectorToDeployment(deploymentName string, namespace string, labe deployment.Spec.Template.Spec.NodeSelector = make(map[string]string) } deployment.Spec.Template.Spec.NodeSelector[label] = value - _, err = depApi(NSMayastor).Update(context.TODO(), deployment, metav1.UpdateOptions{}) + _, err = depApi(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) } @@ -89,7 +101,7 @@ func RemoveAllNodeSelectorsFromDeployment(deploymentName string, namespace strin Expect(err).ToNot(HaveOccurred()) if deployment.Spec.Template.Spec.NodeSelector != nil { deployment.Spec.Template.Spec.NodeSelector = nil - _, err = depApi(NSMayastor).Update(context.TODO(), deployment, metav1.UpdateOptions{}) + _, err = depApi(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) } Expect(err).ToNot(HaveOccurred()) } @@ -105,7 +117,7 @@ func SetDeploymentReplication(deploymentName string, namespace string, replicas deployment, err := depAPI(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) deployment.Spec.Replicas = replicas - deployment, err = depAPI(NSMayastor).Update(context.TODO(), deployment, metav1.UpdateOptions{}) + deployment, err = depAPI(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) if err == nil { break } @@ -203,48 +215,74 @@ func PodPresentOnNode(podNameRegexp string, namespace string, nodeName string) b return false } -func AfterSuiteCleanup() { - logf.Log.Info("AfterSuiteCleanup") - // Place holder function, - // to facilitate post-mortem analysis do nothing - // however we may choose to cleanup based on - // test configuration. +func mayastorReadyPodCount() int { + var mayastorDaemonSet appsV1.DaemonSet + if gTestEnv.K8sClient.Get(context.TODO(), types.NamespacedName{Name: "mayastor", Namespace: NSMayastor}, &mayastorDaemonSet) != nil { + logf.Log.Info("Failed to get mayastor DaemonSet") + return -1 + } + logf.Log.Info("mayastor daemonset", "available instances", mayastorDaemonSet.Status.NumberAvailable) + return int(mayastorDaemonSet.Status.NumberAvailable) } -// Check that no PVs, PVCs and MSVs are still extant. -// Returns an error if resources exists. -func AfterEachCheck() error { - var errorMsg = "" - - logf.Log.Info("AfterEachCheck") +func moacReady() bool { + var moacDeployment appsV1.Deployment + if gTestEnv.K8sClient.Get(context.TODO(), types.NamespacedName{Name: "moac", Namespace: NSMayastor}, &moacDeployment) != nil { + logf.Log.Info("Failed to get MOAC deployment") + return false + } - // Phase 1 to delete dangling resources - pvcs, _ := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) - if len(pvcs.Items) != 0 { - errorMsg += " found leftover PersistentVolumeClaims" - logf.Log.Info("AfterEachCheck: found leftover PersistentVolumeClaims, test fails.") + logf.Log.Info("moacDeployment.Status", + "ObservedGeneration", moacDeployment.Status.ObservedGeneration, + "Replicas", moacDeployment.Status.Replicas, + "UpdatedReplicas", moacDeployment.Status.UpdatedReplicas, + "ReadyReplicas", moacDeployment.Status.ReadyReplicas, + "AvailableReplicas", moacDeployment.Status.AvailableReplicas, + "UnavailableReplicas", moacDeployment.Status.UnavailableReplicas, + "CollisionCount", moacDeployment.Status.CollisionCount) + for ix, condition := range moacDeployment.Status.Conditions { + logf.Log.Info("Condition", "ix", ix, + "Status", condition.Status, + "Type", condition.Type, + "Message", condition.Message, + "Reason", condition.Reason) } - pvs, _ := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) - if len(pvs.Items) != 0 { - errorMsg += " found leftover PersistentVolumes" - logf.Log.Info("AfterEachCheck: found leftover PersistentVolumes, test fails.") + for _, condition := range moacDeployment.Status.Conditions { + if condition.Type == appsV1.DeploymentAvailable { + if condition.Status == corev1.ConditionTrue { + logf.Log.Info("MOAC is Available") + return true + } + } } + logf.Log.Info("MOAC is Not Available") + return false +} - // Mayastor volumes - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", +// Checks if MOAC is available and if the requisite number of mayastor instances are +// up and running. +func MayastorReady(sleepTime int, duration int) (bool, error) { + nodes, err := GetNodeLocs() + if err != nil { + return false, err } - msvs, _ := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) - if len(msvs.Items) != 0 { - errorMsg += " found leftover MayastorVolumes" - logf.Log.Info("AfterEachCheck: found leftover MayastorVolumes, test fails.") + + var mayastorNodes []string + numMayastorInstances := 0 + for _, node := range nodes { + if node.MayastorNode && !node.MasterNode { + mayastorNodes = append(mayastorNodes, node.NodeName) + numMayastorInstances += 1 + } } - if len(errorMsg) != 0 { - return errors.New(errorMsg) + count := (duration + sleepTime - 1) / sleepTime + ready := false + for ix := 0; ix < count && !ready; ix++ { + time.Sleep(time.Duration(sleepTime) * time.Second) + ready = mayastorReadyPodCount() == numMayastorInstances && moacReady() } - return nil + + return ready, nil } diff --git a/test/e2e/common/util_cleanup.go b/test/e2e/common/util_cleanup.go index fb1bdb2dd..a2ed10497 100644 --- a/test/e2e/common/util_cleanup.go +++ b/test/e2e/common/util_cleanup.go @@ -145,7 +145,7 @@ func DeleteAllMsvs() (int, error) { Resource: "mayastorvolumes", } - msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err != nil { // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly logf.Log.Info("DeleteAllMsvs: list MSVs failed.", "Error", err) @@ -159,10 +159,10 @@ func DeleteAllMsvs() (int, error) { } } - // Wait 2 minutes for resources to be deleted numMsvs := 0 + // Wait 2 minutes for resources to be deleted for attempts := 0; attempts < 120; attempts++ { - msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err == nil && msvs != nil { numMsvs = len(msvs.Items) if numMsvs == 0 { @@ -186,7 +186,7 @@ func DeleteAllPoolFinalizers() (bool, error) { Resource: "mayastorpools", } - pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err != nil { logf.Log.Info("DeleteAllPoolFinalisers: list MSPs failed.", "Error", err) return false, err @@ -198,7 +198,7 @@ func DeleteAllPoolFinalizers() (bool, error) { if finalizers != nil { logf.Log.Info("Removing all finalizers", "pool", pool.GetName(), "finalizer", finalizers) pool.SetFinalizers(empty) - _, err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Update(context.TODO(), &pool, metav1.UpdateOptions{}) + _, err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace(NSMayastor).Update(context.TODO(), &pool, metav1.UpdateOptions{}) if err != nil { deleteErr = err logf.Log.Info("Pool update finalizer", "error", err) @@ -218,7 +218,7 @@ func DeleteAllPools() bool { Resource: "mayastorpools", } - pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err != nil { // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly logf.Log.Info("DeleteAllPools: list MSPs failed.", "Error", err) @@ -227,9 +227,9 @@ func DeleteAllPools() bool { logf.Log.Info("DeleteAllPools: deleting MayastorPools") for _, pool := range pools.Items { logf.Log.Info("DeleteAllPools: deleting", "pool", pool.GetName()) - err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").Delete(context.TODO(), pool.GetName(), metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}) + err = gTestEnv.DynamicClient.Resource(poolGVR).Namespace(NSMayastor).Delete(context.TODO(), pool.GetName(), metav1.DeleteOptions{GracePeriodSeconds: &ZeroInt64}) if err != nil { - logf.Log.Error(err, "DeleteAllPools: failed to delete pool", pool.GetName(), "error", err) + logf.Log.Info("DeleteAllPools: failed to delete pool", pool.GetName(), "error", err) } } } @@ -237,7 +237,7 @@ func DeleteAllPools() bool { numPools := 0 // Wait 2 minutes for resources to be deleted for attempts := 0; attempts < 120; attempts++ { - pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + pools, err := gTestEnv.DynamicClient.Resource(poolGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err == nil && pools != nil { numPools = len(pools.Items) } @@ -259,48 +259,53 @@ func DeleteAllPools() bool { func MayastorUndeletedPodCount() int { ns, err := gTestEnv.KubeInt.CoreV1().Namespaces().Get(context.TODO(), NSMayastor, metav1.GetOptions{}) if err != nil { - logf.Log.Error(err, "MayastorUndeletedPodCount: get namespace") + logf.Log.Info("MayastorUndeletedPodCount: get namespace", "error", err) + //FIXME: if the error is namespace not found return 0 return -1 } if ns == nil { // No namespace => no mayastor pods return 0 } - pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + pods, err := gTestEnv.KubeInt.CoreV1().Pods(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logf.Log.Error(err, "MayastorUndeletedPodCount: list pods failed.") + logf.Log.Info("MayastorUndeletedPodCount: list pods failed.", "error", err) return -1 } return len(pods.Items) } // Force deletion of all existing mayastor pods -// Returns true if pods were deleted, false otherwise, -// and the number of pods still present +// returns the number of pods still present, and error func ForceDeleteMayastorPods() (bool, int, error) { + var err error + podsDeleted := false + logf.Log.Info("EnsureMayastorDeleted") - pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + pods, err := gTestEnv.KubeInt.CoreV1().Pods(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logf.Log.Error(err, "EnsureMayastorDeleted: list pods failed.") - return false, 0, err + logf.Log.Info("EnsureMayastorDeleted: list pods failed.", "error", err) + return podsDeleted, 0, err } else if len(pods.Items) == 0 { - return false, 0, nil + return podsDeleted, 0, nil } logf.Log.Info("EnsureMayastorDeleted: MayastorPods found.", "Count", len(pods.Items)) for _, pod := range pods.Items { logf.Log.Info("EnsureMayastorDeleted: Force deleting", "pod", pod.Name) - cmd := exec.Command("kubectl", "-n", "mayastor", "delete", "pod", pod.Name, "--grace-period", "0", "--force") + cmd := exec.Command("kubectl", "-n", NSMayastor, "delete", "pod", pod.Name, "--grace-period", "0", "--force") _, err := cmd.CombinedOutput() if err != nil { - logf.Log.Error(err, "EnsureMayastorDeleted", "podName", pod.Name) + logf.Log.Info("EnsureMayastorDeleted", "podName", pod.Name, "error", err) + } else { + podsDeleted = true } } podCount := 0 // We have made the best effort to cleanup, give things time to settle. for attempts := 0; attempts < 60 && MayastorUndeletedPodCount() != 0; attempts++ { - pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + pods, err = gTestEnv.KubeInt.CoreV1().Pods(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err == nil { podCount = len(pods.Items) if podCount == 0 { @@ -310,7 +315,7 @@ func ForceDeleteMayastorPods() (bool, int, error) { time.Sleep(2 * time.Second) } - return true, podCount, nil + return podsDeleted, podCount, err } // "Big" sweep, attempts to remove artefacts left over in the cluster diff --git a/test/e2e/common/util_mayastor_crds.go b/test/e2e/common/util_mayastor_crds.go index 2af167467..240dd4f01 100644 --- a/test/e2e/common/util_mayastor_crds.go +++ b/test/e2e/common/util_mayastor_crds.go @@ -29,7 +29,7 @@ func GetMSV(uuid string) *MayastorVolStatus { Version: "v1alpha1", Resource: "mayastorvolumes", } - msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).Get(context.TODO(), uuid, metav1.GetOptions{}) if err != nil { fmt.Println(err) return nil @@ -96,7 +96,7 @@ func IsMSVDeleted(uuid string) bool { Resource: "mayastorvolumes", } - msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).Get(context.TODO(), uuid, metav1.GetOptions{}) if err != nil { // Unfortunately there is no associated error code so we resort to string comparison @@ -118,7 +118,7 @@ func DeleteMSV(uuid string) error { Resource: "mayastorvolumes", } - err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Delete(context.TODO(), uuid, metav1.DeleteOptions{}) + err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).Delete(context.TODO(), uuid, metav1.DeleteOptions{}) return err } @@ -149,7 +149,7 @@ func getMsvGvr() schema.GroupVersionResource { // Get the k8s MSV CRD func getMsv(uuid string) (*unstructured.Unstructured, error) { msvGVR := getMsvGvr() - return gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + return gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).Get(context.TODO(), uuid, metav1.GetOptions{}) } // Get a field within the MSV. @@ -213,7 +213,7 @@ func UpdateNumReplicas(uuid string, numReplicas int64) error { // Update the k8s MSV object. msvGVR := getMsvGvr() - _, err = gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Update(context.TODO(), msv, metav1.UpdateOptions{}) + _, err = gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).Update(context.TODO(), msv, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("Failed to update MSV: %v", err) } @@ -313,7 +313,7 @@ func CheckForMSVs() (bool, error) { Resource: "mayastorvolumes", } - msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace(NSMayastor).List(context.TODO(), metav1.ListOptions{}) if err == nil && msvs != nil && len(msvs.Items) != 0 { logf.Log.Info("CheckForVolumeResources: found MayastorVolumes", "MayastorVolumes", msvs.Items) diff --git a/test/e2e/common/util_pvc.go b/test/e2e/common/util_pvc.go index e16215735..c8165e540 100644 --- a/test/e2e/common/util_pvc.go +++ b/test/e2e/common/util_pvc.go @@ -98,7 +98,7 @@ var blockVolumeMode = corev1.PersistentVolumeBlock // 2. The associated PV is created and its status transitions bound // 3. The associated MV is created and has a State "healthy" func mkPVC(volName string, scName string, rawBlock bool) string { - logf.Log.Info("creating", "volume", volName, "storageClass", scName) + logf.Log.Info("Creating", "volume", volName, "storageClass", scName) // PVC create options createOpts := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/common/util_testpods.go b/test/e2e/common/util_testpods.go index 14b7ae909..435c03694 100644 --- a/test/e2e/common/util_testpods.go +++ b/test/e2e/common/util_testpods.go @@ -71,11 +71,13 @@ func IsPodRunning(podName string) bool { /// Create a Pod in default namespace, no options and no context func CreatePod(podDef *corev1.Pod) (*corev1.Pod, error) { + logf.Log.Info("Creating", "pod", podDef.Name) return gTestEnv.KubeInt.CoreV1().Pods("default").Create(context.TODO(), podDef, metav1.CreateOptions{}) } /// Delete a Pod in default namespace, no options and no context func DeletePod(podName string) error { + logf.Log.Info("Deleting", "pod", podName) return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) } @@ -137,6 +139,7 @@ func CreateFioPodDef(podName string, volName string) *corev1.Pod { /// Create a test fio pod in default namespace, no options and no context /// mayastor volume is mounted on /volume func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { + logf.Log.Info("Creating fio pod definition", "name", podName, "volume type", "filesystem") podDef := createFioPodDef(podName, volName, false) return CreatePod(podDef) } @@ -144,6 +147,7 @@ func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { /// Create a test fio pod in default namespace, no options and no context /// mayastor device is mounted on /dev/sdm func CreateRawBlockFioPod(podName string, volName string) (*corev1.Pod, error) { + logf.Log.Info("Creating fio pod definition", "name", podName, "volume type", "raw block") podDef := createFioPodDef(podName, volName, true) return CreatePod(podDef) } diff --git a/test/e2e/configurations/ci_e2e_config.yaml b/test/e2e/configurations/ci_e2e_config.yaml new file mode 100644 index 000000000..1f7857e3b --- /dev/null +++ b/test/e2e/configurations/ci_e2e_config.yaml @@ -0,0 +1,34 @@ +# configuration/parameters for CI/CD e2e test runs +pvcstress: + replicas: 1 + cdCycles: 100 + crudCycles: 10 +ioSoakTest: + replicas: 1 + duration: 10m + loadFactor: 10 + protocols: + - nvmf + - iscsi + fioFixedDuration: 60 + fioDutyCycles: + - thinkTime: 500000 + thinkTimeBlocks: 1000 + - thinkTime: 750000 + thinkTimeBlocks: 1000 + - thinkTime: 1250000 + thinkTimeBlocks: 2000 + - thinkTime: 1500000 + thinkTimeBlocks: 3000 + - thinkTime: 1750000 + thinkTimeBlocks: 3000 + - thinkTime: 2000000 + thinkTimeBlocks: 4000 +csi: + replicas: 1 + smallClaimSize: 50Mi + largeClaimSize: 500Mi +uninstall: + cleanup: 0 +basicVolumeIO: + replicas: 1 diff --git a/test/e2e/csi/dynamic_provisioning_test.go b/test/e2e/csi/dynamic_provisioning_test.go index 426e0f0c6..c559866cb 100644 --- a/test/e2e/csi/dynamic_provisioning_test.go +++ b/test/e2e/csi/dynamic_provisioning_test.go @@ -18,11 +18,12 @@ package e2e import ( "fmt" - "os" "strings" + "e2e-basic/common/e2e_config" "e2e-basic/csi/driver" "e2e-basic/csi/testsuites" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -41,16 +42,10 @@ var smallClaimSize = "50Mi" var largeClaimSize = "500Mi" var _ = ginkgo.Describe("Dynamic Provisioning", func() { - f := framework.NewDefaultFramework("mayastor") + f := framework.NewDefaultFramework("mayastordynprov") - tmp := os.Getenv("SMALL_CLAIM_SIZE") - if tmp != "" { - smallClaimSize = tmp - } - tmp = os.Getenv("LARGE_CLAIM_SIZE") - if tmp != "" { - largeClaimSize = tmp - } + smallClaimSize = e2e_config.GetConfig().CSI.SmallClaimSize + largeClaimSize = e2e_config.GetConfig().CSI.LargeClaimSize var ( cs clientset.Interface diff --git a/test/e2e/csi/e2e_suite_test.go b/test/e2e/csi/e2e_suite_test.go index 22226716a..111875bb2 100644 --- a/test/e2e/csi/e2e_suite_test.go +++ b/test/e2e/csi/e2e_suite_test.go @@ -17,8 +17,11 @@ limitations under the License. package e2e import ( + "e2e-basic/common/e2e_config" rep "e2e-basic/common/reporter" + "flag" + "fmt" "os" "path/filepath" "testing" @@ -36,7 +39,7 @@ const ( var ( defaultStorageClassParameters = map[string]string{ - "repl": "1", + "repl": fmt.Sprintf("%d", e2e_config.GetConfig().CSI.Replicas), "protocol": "nvmf", } ) diff --git a/test/e2e/csi/testsuites/testsuites.go b/test/e2e/csi/testsuites/testsuites.go index f12e0ce1b..8ec083904 100644 --- a/test/e2e/csi/testsuites/testsuites.go +++ b/test/e2e/csi/testsuites/testsuites.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "e2e-basic/common/e2e_config" + "context" "encoding/json" "fmt" @@ -426,7 +428,7 @@ type TestDeployment struct { func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string, pvc *v1.PersistentVolumeClaim, volumeName, mountPath string, readOnly bool) *TestDeployment { generateName := "mayastor-volume-tester-" selectorValue := fmt.Sprintf("%s%d", generateName, rand.Int()) - replicas := int32(1) + replicas := int32(e2e_config.GetConfig().CSI.Replicas) testDeployment := &TestDeployment{ client: c, namespace: ns, @@ -557,4 +559,3 @@ func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() { err = e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) framework.ExpectNoError(err) } - diff --git a/test/e2e/go.mod b/test/e2e/go.mod index c3df99906..dd0520b5d 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -4,12 +4,14 @@ go 1.15 require ( github.com/container-storage-interface/spec v1.2.0 + github.com/ilyakaznacheev/cleanenv v1.2.5 // indirect github.com/onsi/ginkgo v1.14.1 github.com/onsi/gomega v1.10.2 github.com/pkg/errors v0.9.1 // indirect github.com/stretchr/testify v1.5.1 // indirect golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/yaml.v2 v2.3.0 // indirect k8s.io/api v0.19.2 k8s.io/apimachinery v0.19.2 k8s.io/client-go v0.19.2 diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 87079e424..d9ba045fd 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -27,6 +27,7 @@ github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocm github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= @@ -316,6 +317,8 @@ github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mo github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ilyakaznacheev/cleanenv v1.2.5 h1:/SlcF9GaIvefWqFJzsccGG/NJdoaAwb7Mm7ImzhO3DM= +github.com/ilyakaznacheev/cleanenv v1.2.5/go.mod h1:/i3yhzwZ3s7hacNERGFwvlhwXMDcaqwIzmayEhbRplk= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -326,6 +329,8 @@ github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -931,6 +936,8 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +olympos.io/encoding/edn v0.0.0-20200308123125-93e3b8dd0e24 h1:sreVOrDp0/ezb0CHKVek/l7YwpxPJqv+jT3izfSphA4= +olympos.io/encoding/edn v0.0.0-20200308123125-93e3b8dd0e24/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index 155d2b81c..773d198a4 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -1,257 +1,138 @@ package basic_test import ( - "context" - "errors" + "e2e-basic/common" + "e2e-basic/common/e2e_config" + "e2e-basic/common/locations" + rep "e2e-basic/common/reporter" + "fmt" - "os" "os/exec" - "path" - "runtime" - "strings" "testing" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - rep "e2e-basic/common/reporter" - - appsV1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/deprecated/scheme" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment - -/// Enumerate the nodes in the k8s cluster and return -/// 1. the IP address of the master node (if one exists), -/// 2. the number of nodes labelled openebs.io/engine=mayastor -/// 3. the names of nodes labelled openebs.io/engine=mayastor -/// The assumption is that the test-registry is accessible via the IP addr of the master, -/// or any node in the cluster if the master noe does not exist -/// TODO Refine how we workout the address of the test-registry -func getTestClusterDetails() (string, string, int, []string, error) { - var nme = 0 - nodeList := coreV1.NodeList{} - if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { - return "", "", 0, nil, errors.New("failed to list nodes") - } - nodeIPs := make([]string, len(nodeList.Items)) - for ix, k8node := range nodeList.Items { - for _, k8Addr := range k8node.Status.Addresses { - if k8Addr.Type == coreV1.NodeInternalIP { - nodeIPs[ix] = k8Addr.Address - for label, value := range k8node.Labels { - if label == "openebs.io/engine" && value == "mayastor" { - nme++ - } - } - } - } - } - - // At least one node where mayastor can be deployed must exist - if nme == 0 { - return "", "", 0, nil, errors.New("no usable nodes found for the mayastor engine") - } - - mayastorNodes := make([]string, nme) - ix := 0 - for _, k8node := range nodeList.Items { - for _, k8Addr := range k8node.Status.Addresses { - if k8Addr.Type == coreV1.NodeHostName { - for label, value := range k8node.Labels { - if label == "openebs.io/engine" && value == "mayastor" { - mayastorNodes[ix] = k8Addr.Address - ix++ - } - } - } - } - } - - // Redundant check, but keep it anyway, we are writing a test after all. - // We should have found at least one node! - if len(nodeIPs) == 0 { - return "", "", 0, nil, errors.New("no usable nodes found") - } - - tag := os.Getenv("e2e_image_tag") - if len(tag) == 0 { - tag = "ci" - } - registry := os.Getenv("e2e_docker_registry") - - return tag, registry, nme, mayastorNodes, nil -} - -// Encapsulate the logic to find where the deploy yamls are -func getDeployYamlDir() string { - _, filename, _, _ := runtime.Caller(0) - return path.Clean(filename + "/../../../../deploy") -} - // Create mayastor namespace func createNamespace() { - cmd := exec.Command("kubectl", "create", "namespace", "mayastor") - out, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred(), "%s", out) -} - -// Helper for passing yaml from the deploy directory to kubectl -func applyDeployYaml(filename string) { - cmd := exec.Command("kubectl", "apply", "-f", filename) - cmd.Dir = getDeployYamlDir() + cmd := exec.Command("kubectl", "create", "namespace", common.NSMayastor) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) } -// Encapsulate the logic to find where the templated yamls are -func getTemplateYamlDir() string { - _, filename, _, _ := runtime.Caller(0) - return path.Clean(filename + "/../deploy") -} - -func generateYamls(imageTag string, registryAddress string) { - bashcmd := fmt.Sprintf("../../../scripts/generate-deploy-yamls.sh -o ../../../test-yamls -t '%s' -r '%s' test", imageTag, registryAddress) - cmd := exec.Command("bash", "-c", bashcmd) - out, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred(), "%s", out) -} - -// We expect this to fail a few times before it succeeds, -// so no throwing errors from here. -func mayastorReadyPodCount() int { - var mayastorDaemonSet appsV1.DaemonSet - if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "mayastor", Namespace: "mayastor"}, &mayastorDaemonSet) != nil { - fmt.Println("Failed to get mayastor DaemonSet") - return -1 +func generateYamlFiles(imageTag string, registryAddress string, mayastorNodes []string, e2eCfg *e2e_config.E2EConfig) { + coresDirective := "" + if e2eCfg.Cores != 0 { + coresDirective = fmt.Sprintf("%s -c %d", coresDirective, e2eCfg.Cores) } - return int(mayastorDaemonSet.Status.NumberAvailable) -} -func moacReady() bool { - var moacDeployment appsV1.Deployment - if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "moac", Namespace: "mayastor"}, &moacDeployment) != nil { - logf.Log.Info("Failed to get MOAC deployment") - return false + poolDirectives := "" + if len(e2eCfg.PoolDevice) != 0 { + poolDevice := e2eCfg.PoolDevice + for _, mayastorNode := range mayastorNodes { + poolDirectives += fmt.Sprintf(" -p '%s,%s'", mayastorNode, poolDevice) + } } - // { Remove/Reduce verbosity once we have fixed install test occasional failure. - logf.Log.Info("moacDeployment.Status", - "ObservedGeneration", moacDeployment.Status.ObservedGeneration, - "Replicas", moacDeployment.Status.Replicas, - "UpdatedReplicas", moacDeployment.Status.UpdatedReplicas, - "ReadyReplicas", moacDeployment.Status.ReadyReplicas, - "AvailableReplicas", moacDeployment.Status.AvailableReplicas, - "UnavailableReplicas", moacDeployment.Status.UnavailableReplicas, - "CollisionCount", moacDeployment.Status.CollisionCount) - for ix, condition := range moacDeployment.Status.Conditions { - logf.Log.Info("Condition", "ix", ix, - "Status", condition.Status, - "Type", condition.Type, - "Message", condition.Message, - "Reason", condition.Reason) + registryDirective := "" + if len(e2eCfg.Registry) != 0 { + registryDirective = fmt.Sprintf(" -r '%s'", e2eCfg.Registry) } - // } - for _, condition := range moacDeployment.Status.Conditions { - if condition.Type == appsV1.DeploymentAvailable { - if condition.Status == coreV1.ConditionTrue { - logf.Log.Info("MOAC is Available") - return true - } - } - } - logf.Log.Info("MOAC is Not Available") - return false + bashCmd := fmt.Sprintf( + "%s/generate-deploy-yamls.sh -o %s -t '%s' %s %s %s test", + locations.GetScriptsDir(), + locations.GetGeneratedYamlsDir(), + imageTag, registryDirective, coresDirective, poolDirectives, + ) + cmd := exec.Command("bash", "-c", bashCmd) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } // create pools for the cluster // // TODO: Ideally there should be one way how to create pools without using // two env variables to do a similar thing. -func createPools(mayastorNodes []string) { - envPoolYamls := os.Getenv("e2e_pool_yaml_files") - poolDevice := os.Getenv("e2e_pool_device") - if len(envPoolYamls) != 0 { +func createPools(e2eCfg *e2e_config.E2EConfig) { + poolYamlFiles := e2eCfg.PoolYamlFiles + poolDevice := e2eCfg.PoolDevice + // TODO: It is an error if configuration specifies both + // - pool device + // - pool yaml files, + // this simple code does not resolve that use case. + if len(poolYamlFiles) != 0 { // Apply the list of externally defined pool yaml files // NO check is made on the status of pools - poolYamlFiles := strings.Split(envPoolYamls, ",") for _, poolYaml := range poolYamlFiles { - fmt.Println("applying ", poolYaml) - bashcmd := "kubectl apply -f " + poolYaml - cmd := exec.Command("bash", "-c", bashcmd) - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) - } - } else if len(poolDevice) != 0 { - // Use the template file to create pools as per the devices - // NO check is made on the status of pools - for _, mayastorNode := range mayastorNodes { - fmt.Println("creating pool on:", mayastorNode, " using device:", poolDevice) - bashcmd := "NODE_NAME=" + mayastorNode + " POOL_DEVICE=" + poolDevice + " envsubst < " + "pool.yaml.template" + " | kubectl apply -f -" - cmd := exec.Command("bash", "-c", bashcmd) - cmd.Dir = getTemplateYamlDir() + logf.Log.Info("applying ", "yaml", poolYaml) + bashCmd := "kubectl apply -f " + poolYaml + cmd := exec.Command("bash", "-c", bashCmd) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) } + } else if len(poolDevice) != 0 { + // Use the generated file to create pools as per the devices + // NO check is made on the status of pools + common.KubeCtlApplyYaml("pool.yaml", locations.GetGeneratedYamlsDir()) } else { - Expect(false).To(BeTrue(), "Neither e2e_pool_yaml_files nor e2e_pool_device specified") + Expect(false).To(BeTrue(), "Neither pool yaml files nor pool device specified") } } // Install mayastor on the cluster under test. // We deliberately call out to kubectl, rather than constructing the client-go -// objects, so that we can verfiy the local deploy yamls are correct. +// objects, so that we can verify the local deploy yaml files are correct. func installMayastor() { - imageTag, registryAddress, numMayastorInstances, mayastorNodes, err := getTestClusterDetails() + e2eCfg := e2e_config.GetConfig() + + Expect(e2eCfg.ImageTag).ToNot(BeEmpty(), + "mayastor image tag not defined") + Expect(e2eCfg.PoolDevice != "" || len(e2eCfg.PoolYamlFiles) != 0).To(BeTrue(), + "configuration error pools are not defined.") + Expect(e2eCfg.PoolDevice == "" || len(e2eCfg.PoolYamlFiles) == 0).To(BeTrue(), + "Unable to resolve pool definitions if both pool device and pool yaml files are defined") + + imageTag := e2eCfg.ImageTag + registry := e2eCfg.Registry + + nodes, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) - Expect(numMayastorInstances).ToNot(Equal(0)) - fmt.Printf("tag %v, registry %v, # of mayastor instances=%v\n", imageTag, registryAddress, numMayastorInstances) + var mayastorNodes []string + numMayastorInstances := 0 - // FIXME use absolute paths, do not depend on CWD - createNamespace() - applyDeployYaml("storage-class.yaml") - applyDeployYaml("moac-rbac.yaml") - applyDeployYaml("mayastorpoolcrd.yaml") - applyDeployYaml("nats-deployment.yaml") - generateYamls(imageTag, registryAddress) - applyDeployYaml("../test-yamls/csi-daemonset.yaml") - applyDeployYaml("../test-yamls/moac-deployment.yaml") - applyDeployYaml("../test-yamls/mayastor-daemonset.yaml") + for _, node := range nodes { + if node.MayastorNode && !node.MasterNode { + mayastorNodes = append(mayastorNodes, node.NodeName) + numMayastorInstances += 1 + } + } + Expect(numMayastorInstances).ToNot(Equal(0)) + + logf.Log.Info("Install", "tag", imageTag, "registry", registry, "# of mayastor instances", numMayastorInstances) - // Given the yaml files and the environment described in the test readme, - // we expect mayastor to be running on exactly numMayastorInstances nodes. - Eventually(func() int { - return mayastorReadyPodCount() - }, - "180s", // timeout - "1s", // polling interval - ).Should(Equal(numMayastorInstances)) + generateYamlFiles(imageTag, registry, mayastorNodes, &e2eCfg) + deployDir := locations.GetDeployDir() + yamlsDir := locations.GetGeneratedYamlsDir() - // Wait for MOAC to be ready before creating the pools, - Eventually(func() bool { - return moacReady() - }, - "360s", // timeout - "2s", // polling interval - ).Should(Equal(true)) + createNamespace() + common.KubeCtlApplyYaml("moac-rbac.yaml", yamlsDir) + common.KubeCtlApplyYaml("mayastorpoolcrd.yaml", deployDir) + common.KubeCtlApplyYaml("nats-deployment.yaml", yamlsDir) + common.KubeCtlApplyYaml("csi-daemonset.yaml", yamlsDir) + common.KubeCtlApplyYaml("moac-deployment.yaml", yamlsDir) + common.KubeCtlApplyYaml("mayastor-daemonset.yaml", yamlsDir) + + ready, err := common.MayastorReady(2, 540) + Expect(err).ToNot(HaveOccurred()) + Expect(ready).To(BeTrue()) // Now create pools on all nodes. - createPools(mayastorNodes) + createPools(&e2eCfg) // Mayastor has been installed and is now ready for use. } @@ -262,54 +143,18 @@ func TestInstallSuite(t *testing.T) { } var _ = Describe("Mayastor setup", func() { - It("should install using yamls", func() { + It("should install using yaml files", func() { installMayastor() }) }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) - - By("bootstrapping test environment") - useCluster := true - testEnv = &envtest.Environment{ - UseExistingCluster: &useCluster, - AttachControlPlaneOutput: true, - } - - var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - // We do not consume prometheus metrics. - MetricsBindAddress: "0", - }) - Expect(err).ToNot(HaveOccurred()) - - go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) - }() - - mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer mgrSyncCtxCancel() - if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx); !synced { - fmt.Println("Failed to sync") - } - - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + common.SetupTestEnv() close(done) }, 60) var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) + common.TeardownTestEnvNoCleanup() }) diff --git a/test/e2e/io_soak/filesystem_fio.go b/test/e2e/io_soak/filesystem_fio.go index 16e0ff180..9fd239e14 100644 --- a/test/e2e/io_soak/filesystem_fio.go +++ b/test/e2e/io_soak/filesystem_fio.go @@ -2,6 +2,7 @@ package io_soak import ( "e2e-basic/common" + "e2e-basic/common/e2e_config" "fmt" "time" @@ -36,12 +37,13 @@ func (job FioFsSoakJob) removeTestPod() error { } func (job FioFsSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) { + FioDutyCycles := e2e_config.GetConfig().IOSoakTest.FioDutyCycles ixp := job.id % len(FioDutyCycles) RunIoSoakFio( job.podName, duration, - FioDutyCycles[ixp].thinkTime, - FioDutyCycles[ixp].thinkTimeBlocks, + FioDutyCycles[ixp].ThinkTime, + FioDutyCycles[ixp].ThinkTimeBlocks, false, doneC, errC, diff --git a/test/e2e/io_soak/fio.go b/test/e2e/io_soak/fio.go index 8fc5f8076..f553414f1 100644 --- a/test/e2e/io_soak/fio.go +++ b/test/e2e/io_soak/fio.go @@ -2,6 +2,7 @@ package io_soak import ( "e2e-basic/common" + "e2e-basic/common/e2e_config" "fmt" "io/ioutil" @@ -10,23 +11,6 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" ) -// This table of duty cycles is guesstimates and bear no relation to real loads. -// TODO: make configurable -var FioDutyCycles = []struct { - thinkTime int - thinkTimeBlocks int -}{ - {500000, 1000}, // 0.5 second, 1000 blocks - {750000, 1000}, // 0.75 second, 1000 blocks - {1000000, 2000}, // 1 second, 2000 blocks - {1250000, 2000}, // 1.25 seconds, 2000 blocks - {1500000, 3000}, // 1.5 seconds, 3000 blocks - {1750000, 3000}, // 1.75 seconds, 3000 blocks - {2000000, 4000}, // 2 seconds, 4000 blocks -} - -const fixedDuration = 60 - // see https://fio.readthedocs.io/en/latest/fio_doc.html#i-o-rate // run fio in a loop of fixed duration to fulfill a larger duration, // this to facilitate a relatively timely termination when an error @@ -57,7 +41,7 @@ func RunIoSoakFio(podName string, duration time.Duration, thinkTime int, thinkTi } for ix := 1; secs > 0; ix++ { - runtime := fixedDuration + runtime := e2e_config.GetConfig().IOSoakTest.FioFixedDuration if runtime > secs { runtime = secs } @@ -72,7 +56,7 @@ func RunIoSoakFio(podName string, duration time.Duration, thinkTime int, thinkTi "rawBlock", rawBlock, "fioFile", fioFile, ) - output, err := common.RunFio(podName, runtime, fioFile, argThinkTime, argThinkTimeBlocks ) + output, err := common.RunFio(podName, runtime, fioFile, argThinkTime, argThinkTimeBlocks) //TODO: for now shove the output into /tmp _ = ioutil.WriteFile("/tmp/"+podName+".out", output, 0644) diff --git a/test/e2e/io_soak/io_soak_test.go b/test/e2e/io_soak/io_soak_test.go index 5f12ed565..8709e0276 100644 --- a/test/e2e/io_soak/io_soak_test.go +++ b/test/e2e/io_soak/io_soak_test.go @@ -4,29 +4,26 @@ package io_soak import ( "e2e-basic/common" + "e2e-basic/common/e2e_config" rep "e2e-basic/common/reporter" "fmt" - "os" "sort" - "strconv" - "strings" "testing" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" + coreV1 "k8s.io/api/core/v1" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var defTimeoutSecs = "120s" type IoSoakJob interface { makeVolume() - makeTestPod() (*corev1.Pod, error) + makeTestPod() (*coreV1.Pod, error) removeTestPod() error removeVolume() run(time.Duration, chan<- string, chan<- error) @@ -63,7 +60,7 @@ func monitor(errC chan<- error) { /// proto - protocol "nvmf" or "isci" /// replicas - number of replicas for each volume /// loadFactor - number of volumes for each mayastor instance -func IOSoakTest(protocols []string, replicas int, loadFactor int, duration time.Duration) { +func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, duration time.Duration) { nodeList, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) @@ -84,7 +81,7 @@ func IOSoakTest(protocols []string, replicas int, loadFactor int, duration time. for _, proto := range protocols { scName := fmt.Sprintf("io-soak-%s", proto) logf.Log.Info("Creating", "storage class", scName) - err = common.MkStorageClass(scName, replicas, proto, "io.openebs.csi-mayastor") + err = common.MkStorageClass(scName, replicas, proto) Expect(err).ToNot(HaveOccurred()) scNames = append(scNames, scName) } @@ -151,7 +148,7 @@ func IOSoakTest(protocols []string, replicas int, loadFactor int, duration time. logf.Log.Info("Completed", "pod", podName) case err := <-errC: close(doneC) - logf.Log.Error(err, "fio run") + logf.Log.Info("fio run error", "error", err) Expect(err).To(BeNil()) } } @@ -184,38 +181,22 @@ var _ = Describe("Mayastor Volume IO test", func() { }) It("should verify an NVMe-oF TCP volume can process IO on multiple volumes simultaneously", func() { - replicas := 1 - loadFactor := 2 - duration, _ := time.ParseDuration("30s") - protocols := []string{"nvmf"} - var err error - tmp := os.Getenv("e2e_io_soak_load_factor") - if tmp != "" { - loadFactor, err = strconv.Atoi(tmp) - Expect(err).ToNot(HaveOccurred()) - } - tmp = os.Getenv("e2e_io_soak_duration") - if tmp != "" { - duration, err = time.ParseDuration(tmp) - Expect(err).ToNot(HaveOccurred()) - Expect(duration.Seconds() > 0).To(BeTrue()) - } - tmp = os.Getenv("e2e_io_soak_replicas") - if tmp != "" { - replicas, err = strconv.Atoi(tmp) - Expect(err).ToNot(HaveOccurred()) - } - tmp = os.Getenv("e2e_io_soak_protocols") - if tmp != "" { - protocols = strings.Split(tmp, ",") + e2eCfg := e2e_config.GetConfig() + loadFactor := e2eCfg.IOSoakTest.LoadFactor + replicas := e2eCfg.IOSoakTest.Replicas + strProtocols := e2eCfg.IOSoakTest.Protocols + var protocols []common.ShareProto + for _, proto := range strProtocols { + protocols = append(protocols, common.ShareProto(proto)) } + duration, err := time.ParseDuration(e2eCfg.IOSoakTest.Duration) + Expect(err).ToNot(HaveOccurred(), "Duration configuration string format is invalid.") logf.Log.Info("Parameters", "replicas", replicas, "loadFactor", loadFactor, "duration", duration) IOSoakTest(protocols, replicas, loadFactor, duration) }) }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) diff --git a/test/e2e/io_soak/rawblock_fio.go b/test/e2e/io_soak/rawblock_fio.go index dcb6fb53c..f4a8b6ab2 100644 --- a/test/e2e/io_soak/rawblock_fio.go +++ b/test/e2e/io_soak/rawblock_fio.go @@ -2,6 +2,7 @@ package io_soak import ( "e2e-basic/common" + "e2e-basic/common/e2e_config" "fmt" "time" @@ -36,12 +37,13 @@ func (job FioRawBlockSoakJob) removeTestPod() error { } func (job FioRawBlockSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) { + FioDutyCycles := e2e_config.GetConfig().IOSoakTest.FioDutyCycles ixp := job.id % len(FioDutyCycles) RunIoSoakFio( job.podName, duration, - FioDutyCycles[ixp].thinkTime, - FioDutyCycles[ixp].thinkTimeBlocks, + FioDutyCycles[ixp].ThinkTime, + FioDutyCycles[ixp].ThinkTimeBlocks, true, doneC, errC, diff --git a/test/e2e/node_disconnect/lib/node_disconnect_lib.go b/test/e2e/node_disconnect/lib/node_disconnect_lib.go index 12d3e23d1..c341bd4cd 100644 --- a/test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -37,7 +37,7 @@ func createFioOnRefugeNode(podName string, volClaimName string) { // prevent mayastor pod from running on the given node func SuppressMayastorPodOn(nodeName string) { common.UnlabelNode(nodeName, engineLabel) - err := common.WaitForPodNotRunningOnNode(mayastorRegexp, namespace, nodeName, podUnscheduleTimeoutSecs) + err := common.WaitForPodNotRunningOnNode(mayastorRegexp, common.NSMayastor, nodeName, podUnscheduleTimeoutSecs) Expect(err).ToNot(HaveOccurred()) } @@ -45,7 +45,7 @@ func SuppressMayastorPodOn(nodeName string) { func UnsuppressMayastorPodOn(nodeName string) { // add the mayastor label to the node common.LabelNode(nodeName, engineLabel, mayastorLabel) - err := common.WaitForPodRunningOnNode(mayastorRegexp, namespace, nodeName, podRescheduleTimeoutSecs) + err := common.WaitForPodRunningOnNode(mayastorRegexp, common.NSMayastor, nodeName, podRescheduleTimeoutSecs) Expect(err).ToNot(HaveOccurred()) } diff --git a/test/e2e/node_disconnect/lib/node_disconnect_setup.go b/test/e2e/node_disconnect/lib/node_disconnect_setup.go index 7fa3fbbc7..5130507bd 100644 --- a/test/e2e/node_disconnect/lib/node_disconnect_setup.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_setup.go @@ -10,7 +10,6 @@ import ( const mayastorRegexp = "^mayastor-.....$" const moacRegexp = "^moac-..........-.....$" -const namespace = "mayastor" const engineLabel = "openebs.io/engine" const mayastorLabel = "mayastor" const refugeLabel = "openebs.io/podrefuge" @@ -40,11 +39,11 @@ func DisconnectSetup() { } Expect(refugeNode).NotTo(Equal("")) - moacOnRefugeNode := common.PodPresentOnNode(moacRegexp, namespace, refugeNode) + moacOnRefugeNode := common.PodPresentOnNode(moacRegexp, common.NSMayastor, refugeNode) // Update moac to ensure it stays on the refuge node (even if it currently is) fmt.Printf("apply moac node selector for node \"%s\"\n", refugeNode) - common.ApplyNodeSelectorToDeployment("moac", namespace, refugeLabel, refugeLabelValue) + common.ApplyNodeSelectorToDeployment("moac", common.NSMayastor, refugeLabel, refugeLabelValue) // if not already on the refuge node if moacOnRefugeNode == false { @@ -52,34 +51,34 @@ func DisconnectSetup() { // reduce the number of moac instances to be zero // this seems to be needed to guarantee that moac moves to the refuge node var repl int32 = 0 - common.SetDeploymentReplication("moac", namespace, &repl) + common.SetDeploymentReplication("moac", common.NSMayastor, &repl) // wait for moac to disappear from the cluster for _, node := range nodeList { fmt.Printf("waiting for moac absence from %s\n", node.NodeName) - err = common.WaitForPodAbsentFromNode(moacRegexp, namespace, node.NodeName, timeoutSeconds) + err = common.WaitForPodAbsentFromNode(moacRegexp, common.NSMayastor, node.NodeName, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } // bring the number of moac instances back to 1 repl = 1 - common.SetDeploymentReplication("moac", namespace, &repl) + common.SetDeploymentReplication("moac", common.NSMayastor, &repl) // wait for moac to be running on the refuge node fmt.Printf("waiting for moac presence on %s\n", refugeNode) - err = common.WaitForPodRunningOnNode(moacRegexp, namespace, refugeNode, timeoutSeconds) + err = common.WaitForPodRunningOnNode(moacRegexp, common.NSMayastor, refugeNode, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } // wait until all mayastor pods are in state "Running" and only on the non-refuge nodes fmt.Printf("waiting for mayastor absence from %s\n", refugeNode) - err = common.WaitForPodAbsentFromNode(mayastorRegexp, namespace, refugeNode, timeoutSeconds) + err = common.WaitForPodAbsentFromNode(mayastorRegexp, common.NSMayastor, refugeNode, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) for _, node := range nodeList { if node.NodeName != refugeNode { fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) - err = common.WaitForPodRunningOnNode(mayastorRegexp, namespace, node.NodeName, timeoutSeconds) + err = common.WaitForPodRunningOnNode(mayastorRegexp, common.NSMayastor, node.NodeName, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } } @@ -100,12 +99,12 @@ func DisconnectTeardown() { } fmt.Printf("remove moac node affinity\n") - common.RemoveAllNodeSelectorsFromDeployment("moac", namespace) + common.RemoveAllNodeSelectorsFromDeployment("moac", common.NSMayastor) // wait until all nodes have mayastor pods in state "Running" for _, node := range nodeList { fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) - err = common.WaitForPodRunningOnNode(mayastorRegexp, namespace, node.NodeName, timeoutSeconds) + err = common.WaitForPodRunningOnNode(mayastorRegexp, common.NSMayastor, node.NodeName, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } } diff --git a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go index 4432fec14..90089a50c 100644 --- a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go +++ b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go @@ -7,7 +7,6 @@ import ( disconnect_lib "e2e-basic/node_disconnect/lib" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" "testing" @@ -37,7 +36,7 @@ var _ = Describe("Mayastor replica pod removal test", func() { }) It("should verify nvmf nexus behaviour when a mayastor pod is removed", func() { - err := common.MkStorageClass(gStorageClass, 2, "nvmf", "io.openebs.csi-mayastor") + err := common.MkStorageClass(gStorageClass, 2, common.ShareProtoNvmf) Expect(err).ToNot(HaveOccurred()) env = disconnect_lib.Setup("loss-test-pvc-nvmf", gStorageClass, "fio-pod-remove-test") env.PodLossTest() @@ -45,7 +44,6 @@ var _ = Describe("Mayastor replica pod removal test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) diff --git a/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go b/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go index 456e1c750..cff2d7134 100644 --- a/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go +++ b/test/e2e/pvc_stress_fio/pvc_stress_fio_test.go @@ -2,23 +2,20 @@ package pvc_stress_fio_test import ( + "e2e-basic/common" + "e2e-basic/common/e2e_config" + rep "e2e-basic/common/reporter" + "fmt" - "os" - "strconv" "testing" - Cmn "e2e-basic/common" - rep "e2e-basic/common/reporter" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" coreV1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" ) var defTimeoutSecs = "60s" @@ -48,8 +45,11 @@ var volNames []volSc // 1. The PVC is deleted // 2. The associated PV is deleted // 3. The associated MV is deleted -func testPVC(volName string, scName string, runFio bool) { - logf.Log.Info("testPVC", "volume", volName, "storageClass", scName, "run FIO", runFio) +func testPVC(volName string, protocol common.ShareProto, runFio bool) { + logf.Log.Info("testPVC", "volume", volName, "protocol", protocol, "run FIO", runFio) + scName := "pvc-stress-test-" + string(protocol) + err := common.MkStorageClass(scName, e2e_config.GetConfig().BasicVolumeIO.Replicas, protocol) + Expect(err).ToNot(HaveOccurred(), "Creating storage class %s", scName) // PVC create options createOpts := &coreV1.PersistentVolumeClaim{ ObjectMeta: metaV1.ObjectMeta{ @@ -67,11 +67,11 @@ func testPVC(volName string, scName string, runFio bool) { }, } // Create the PVC. - _, createErr := Cmn.CreatePVC(createOpts) + _, createErr := common.CreatePVC(createOpts) Expect(createErr).To(BeNil()) // Confirm the PVC has been created. - pvc, getPvcErr := Cmn.GetPVC(volName) + pvc, getPvcErr := common.GetPVC(volName) Expect(getPvcErr).To(BeNil()) Expect(pvc).ToNot(BeNil()) @@ -81,20 +81,20 @@ func testPVC(volName string, scName string, runFio bool) { // Wait for the PVC to be bound. Eventually(func() coreV1.PersistentVolumeClaimPhase { - return Cmn.GetPvcStatusPhase(volName) + return common.GetPvcStatusPhase(volName) }, defTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal(coreV1.ClaimBound)) // Refresh the PVC contents, so that we can get the PV name. - pvc, getPvcErr = Cmn.GetPVC(volName) + pvc, getPvcErr = common.GetPVC(volName) Expect(getPvcErr).To(BeNil()) Expect(pvc).ToNot(BeNil()) // Wait for the PV to be provisioned Eventually(func() *coreV1.PersistentVolume { - pv, getPvErr := Cmn.GetPV(pvc.Spec.VolumeName) + pv, getPvErr := common.GetPV(pvc.Spec.VolumeName) if getPvErr != nil { return nil } @@ -107,15 +107,15 @@ func testPVC(volName string, scName string, runFio bool) { // Wait for the PV to be bound. Eventually(func() coreV1.PersistentVolumePhase { - return Cmn.GetPvStatusPhase(pvc.Spec.VolumeName) + return common.GetPvStatusPhase(pvc.Spec.VolumeName) }, defTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal(coreV1.VolumeBound)) // Wait for the MSV to be provisioned - Eventually(func() *Cmn.MayastorVolStatus { - return Cmn.GetMSV(string(pvc.ObjectMeta.UID)) + Eventually(func() *common.MayastorVolStatus { + return common.GetMSV(string(pvc.ObjectMeta.UID)) }, defTimeoutSecs, //timeout "1s", // polling interval @@ -123,7 +123,7 @@ func testPVC(volName string, scName string, runFio bool) { // Wait for the MSV to be healthy Eventually(func() string { - return Cmn.GetMsvState(string(pvc.ObjectMeta.UID)) + return common.GetMsvState(string(pvc.ObjectMeta.UID)) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -132,7 +132,7 @@ func testPVC(volName string, scName string, runFio bool) { if runFio { // Create the fio Pod fioPodName := "fio-" + volName - pod, err := Cmn.CreateFioPod(fioPodName, volName) + pod, err := common.CreateFioPod(fioPodName, volName) Expect(err).ToNot(HaveOccurred()) Expect(pod).ToNot(BeNil()) @@ -141,18 +141,18 @@ func testPVC(volName string, scName string, runFio bool) { // Wait for the fio Pod to transition to running Eventually(func() bool { - return Cmn.IsPodRunning(fioPodName) + return common.IsPodRunning(fioPodName) }, defTimeoutSecs, "1s", ).Should(Equal(true)) // Run the fio test - _, err = Cmn.RunFio(fioPodName, 5, Cmn.FioFsFilename) + _, err = common.RunFio(fioPodName, 5, common.FioFsFilename) Expect(err).ToNot(HaveOccurred()) // Delete the fio pod - err = Cmn.DeletePod(fioPodName) + err = common.DeletePod(fioPodName) Expect(err).ToNot(HaveOccurred()) // cleanup @@ -160,12 +160,12 @@ func testPVC(volName string, scName string, runFio bool) { } // Delete the PVC - deleteErr := Cmn.DeletePVC(volName) + deleteErr := common.DeletePVC(volName) Expect(deleteErr).To(BeNil()) // Wait for the PVC to be deleted. Eventually(func() bool { - return Cmn.IsPVCDeleted(volName) + return common.IsPVCDeleted(volName) }, "120s", // timeout "1s", // polling interval @@ -173,7 +173,7 @@ func testPVC(volName string, scName string, runFio bool) { // Wait for the PV to be deleted. Eventually(func() bool { - return Cmn.IsPVDeleted(pvc.Spec.VolumeName) + return common.IsPVDeleted(pvc.Spec.VolumeName) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -181,7 +181,7 @@ func testPVC(volName string, scName string, runFio bool) { // Wait for the MSV to be deleted. Eventually(func() bool { - return Cmn.IsMSVDeleted(string(pvc.ObjectMeta.UID)) + return common.IsMSVDeleted(string(pvc.ObjectMeta.UID)) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -189,6 +189,8 @@ func testPVC(volName string, scName string, runFio bool) { // cleanup volNames = volNames[:len(volNames)-1] + err = common.RmStorageClass(scName) + Expect(err).ToNot(HaveOccurred(), "Deleting storage class %s", scName) } func stressTestPVC(iters int, runFio bool) { @@ -197,8 +199,9 @@ func stressTestPVC(iters int, runFio bool) { decoration = "-io" } for ix := 1; ix <= iters; ix++ { - testPVC(fmt.Sprintf("stress-pvc-nvmf%s-%d", decoration, ix), "mayastor-nvmf", runFio) - testPVC(fmt.Sprintf("stress-pvc-iscsi%s-%d", decoration, ix), "mayastor-iscsi", runFio) + // Sadly we cannot enumerate over enums so we have to explicitly invoke + testPVC(fmt.Sprintf("stress-pvc-nvmf%s-%d", decoration, ix), common.ShareProtoNvmf, runFio) + testPVC(fmt.Sprintf("stress-pvc-iscsi%s-%d", decoration, ix), common.ShareProtoIscsi, runFio) } } @@ -210,7 +213,7 @@ func TestPVCStress(t *testing.T) { var _ = Describe("Mayastor PVC Stress test", func() { AfterEach(func() { // Check resource leakage - err := Cmn.AfterEachCheck() + err := common.AfterEachCheck() Expect(err).ToNot(HaveOccurred()) }) @@ -224,24 +227,11 @@ var _ = Describe("Mayastor PVC Stress test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) - - Cmn.SetupTestEnv() - tmp := os.Getenv("e2e_pvc_stress_cd_cycles") - if len(tmp) != 0 { - var err error - cdIterations, err = strconv.Atoi(tmp) - Expect(err).NotTo(HaveOccurred()) - logf.Log.Info("Cycle count changed by environment ", "Create/Delete", cdIterations) - } + common.SetupTestEnv() + e2eCfg := e2e_config.GetConfig() + cdIterations = e2eCfg.PVCStress.CdCycles + crudIterations = e2eCfg.PVCStress.CrudCycles - tmp = os.Getenv("e2e_pvc_stress_crud_cycles") - if len(tmp) != 0 { - var err error - crudIterations, err = strconv.Atoi(tmp) - Expect(err).NotTo(HaveOccurred()) - logf.Log.Info("Cycle count changed by environment", "Create/Read/Update/Delete", crudIterations) - } logf.Log.Info("Number of cycles are", "Create/Delete", cdIterations, "Create/Read/Update/Delete", crudIterations) close(done) @@ -251,5 +241,5 @@ var _ = AfterSuite(func() { // NB This only tears down the local structures for talking to the cluster, // not the kubernetes cluster itself. By("tearing down the test environment") - Cmn.TeardownTestEnv() + common.TeardownTestEnv() }) diff --git a/test/e2e/rebuild/basic_rebuild_test.go b/test/e2e/rebuild/basic_rebuild_test.go index 94b432975..7e286c1e5 100644 --- a/test/e2e/rebuild/basic_rebuild_test.go +++ b/test/e2e/rebuild/basic_rebuild_test.go @@ -8,18 +8,18 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( + podName = "rebuild-test-fio" pvcName = "rebuild-test-pvc" - storageClass = "mayastor-nvmf" + storageClass = "rebuild-test-nvmf" ) -const ApplicationPod = "fio.yaml" - func basicRebuildTest() { + err := common.MkStorageClass(storageClass, 1, common.ShareProtoNvmf) + Expect(err).ToNot(HaveOccurred(), "Creating storage class %s", storageClass) + // Create a PVC common.MkPVC(pvcName, storageClass) pvc, err := common.GetPVC(pvcName) @@ -30,7 +30,8 @@ func basicRebuildTest() { pollPeriod := "1s" // Create an application pod and wait for the PVC to be bound to it. - common.ApplyDeployYaml(ApplicationPod) + _, err = common.CreateFioPod(podName, pvcName) + Expect(err).ToNot(HaveOccurred(), "Failed to create rebuild test fio pod") Eventually(func() bool { return common.IsPvcBound(pvcName) }, timeout, pollPeriod).Should(Equal(true)) uuid := string(pvc.ObjectMeta.UID) @@ -43,7 +44,8 @@ func basicRebuildTest() { Eventually(func() bool { return common.IsVolumePublished(uuid) }, timeout, pollPeriod).Should(Equal(true)) // Add another child which should kick off a rebuild. - common.UpdateNumReplicas(uuid, 2) + err = common.UpdateNumReplicas(uuid, 2) + Expect(err).ToNot(HaveOccurred(), "Update the number of replicas") repl, err = common.GetNumReplicas(uuid) Expect(err).To(BeNil()) Expect(repl).Should(Equal(int64(2))) @@ -68,6 +70,11 @@ func basicRebuildTest() { Eventually(func() string { return getChildrenFunc(uuid)[0].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) Eventually(func() string { return getChildrenFunc(uuid)[1].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) Eventually(func() (string, error) { return common.GetNexusState(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo("NEXUS_ONLINE")) + err = common.DeletePod(podName) + Expect(err).ToNot(HaveOccurred(), "Deleting rebuild test fio pod") + common.RmPVC(pvcName, storageClass) + err = common.RmStorageClass(storageClass) + Expect(err).ToNot(HaveOccurred(), "Deleting storage class %s", storageClass) } func TestRebuild(t *testing.T) { @@ -82,14 +89,11 @@ var _ = Describe("Mayastor rebuild test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) var _ = AfterSuite(func() { By("tearing down the test environment") - common.DeleteDeployYaml(ApplicationPod) - common.RmPVC(pvcName, storageClass) common.TeardownTestEnv() }) diff --git a/test/e2e/replica/replica_test.go b/test/e2e/replica/replica_test.go index 530213627..bbc486d50 100644 --- a/test/e2e/replica/replica_test.go +++ b/test/e2e/replica/replica_test.go @@ -8,19 +8,19 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( pvcName = "replica-test-pvc" - storageClass = "mayastor-nvmf" + storageClass = "replica-test-nvmf" ) const fioPodName = "fio" func addUnpublishedReplicaTest() { + err := common.MkStorageClass(storageClass, 1, common.ShareProtoNvmf) + Expect(err).ToNot(HaveOccurred(), "Creating storage class %s", storageClass) + // Create a PVC common.MkPVC(pvcName, storageClass) pvc, err := common.GetPVC(pvcName) @@ -32,13 +32,15 @@ func addUnpublishedReplicaTest() { // Add another child before publishing the volume. uuid := string(pvc.ObjectMeta.UID) - common.UpdateNumReplicas(uuid, 2) + err = common.UpdateNumReplicas(uuid, 2) + Expect(err).ToNot(HaveOccurred(), "Update number of replicas") repl, err := common.GetNumReplicas(uuid) Expect(err).To(BeNil()) Expect(repl).Should(Equal(int64(2))) // Use the PVC and wait for the volume to be published - common.CreateFioPod(fioPodName, pvcName) + _, err = common.CreateFioPod(fioPodName, pvcName) + Expect(err).ToNot(HaveOccurred(), "Create fio pod") Eventually(func() bool { return common.IsVolumePublished(uuid) }, timeout, pollPeriod).Should(Equal(true)) getChildrenFunc := func(uuid string) []common.NexusChild { @@ -58,6 +60,13 @@ func addUnpublishedReplicaTest() { Eventually(func() string { return getChildrenFunc(uuid)[0].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) Eventually(func() string { return getChildrenFunc(uuid)[1].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) Eventually(func() (string, error) { return common.GetNexusState(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo("NEXUS_ONLINE")) + + err = common.DeletePod(fioPodName) + Expect(err).ToNot(HaveOccurred(), "Delete fio test pod") + common.RmPVC(pvcName, storageClass) + + err = common.RmStorageClass(storageClass) + Expect(err).ToNot(HaveOccurred(), "Deleting storage class %s", storageClass) } func TestReplica(t *testing.T) { @@ -72,14 +81,11 @@ var _ = Describe("Mayastor replica tests", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) var _ = AfterSuite(func() { By("tearing down the test environment") - common.DeletePod(fioPodName) - common.RmPVC(pvcName, storageClass) common.TeardownTestEnv() }) diff --git a/test/e2e/resource_check/resource_check_test.go b/test/e2e/resource_check/resource_check_test.go index add81dde9..8368ceb02 100644 --- a/test/e2e/resource_check/resource_check_test.go +++ b/test/e2e/resource_check/resource_check_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // Check that there are no artefacts left over from @@ -17,26 +16,26 @@ func resourceCheck() { found, err := common.CheckForTestPods() if err != nil { - logf.Log.Error(err, "Failed to check for test pods.") + logf.Log.Info("Failed to check for test pods.", "error", err) } else { Expect(found).To(BeFalse()) } found, err = common.CheckForPVCs() if err != nil { - logf.Log.Error(err, "Failed to check for PVCs") + logf.Log.Info("Failed to check for PVCs", err) } Expect(found).To(BeFalse()) found, err = common.CheckForPVs() if err != nil { - logf.Log.Error(err, "Failed to check PVs") + logf.Log.Info("Failed to check PVs", "error", err) } Expect(found).To(BeFalse()) found, err = common.CheckForMSVs() if err != nil { - logf.Log.Error(err, "Failed to check MSVs") + logf.Log.Info("Failed to check MSVs", "error", err) } Expect(found).To(BeFalse()) } @@ -53,7 +52,6 @@ var _ = Describe("Mayastor resource check", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) diff --git a/test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go index d90832ed6..5a121ffd8 100644 --- a/test/e2e/uninstall/uninstall_test.go +++ b/test/e2e/uninstall/uninstall_test.go @@ -2,12 +2,11 @@ package basic_test import ( "e2e-basic/common" + "e2e-basic/common/e2e_config" + "e2e-basic/common/locations" rep "e2e-basic/common/reporter" - "os" "os/exec" - "path" - "runtime" "testing" "time" @@ -15,25 +14,8 @@ import ( . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var cleanup = false - -// Encapsulate the logic to find where the deploy yamls are -func getDeployYamlDir() string { - _, filename, _, _ := runtime.Caller(0) - return path.Clean(filename + "/../../../../deploy") -} - -// Helper for passing yaml from the deploy directory to kubectl -func deleteDeployYaml(filename string) { - cmd := exec.Command("kubectl", "delete", "-f", filename) - cmd.Dir = getDeployYamlDir() - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - // Helper for deleting mayastor CRDs func deleteCRD(crdName string) { cmd := exec.Command("kubectl", "delete", "crd", crdName) @@ -42,7 +24,7 @@ func deleteCRD(crdName string) { // Create mayastor namespace func deleteNamespace() { - cmd := exec.Command("kubectl", "delete", "namespace", "mayastor") + cmd := exec.Command("kubectl", "delete", "namespace", common.NSMayastor) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) } @@ -52,6 +34,7 @@ func deleteNamespace() { // objects, so that we can verify the local deploy yaml files are correct. func teardownMayastor() { var cleaned bool + cleanup := e2e_config.GetConfig().Uninstall.Cleanup != 0 logf.Log.Info("Settings:", "cleanup", cleanup) if cleanup { @@ -59,40 +42,42 @@ func teardownMayastor() { } else { found, err := common.CheckForTestPods() if err != nil { - logf.Log.Error(err, "Failed to checking for test pods.") + logf.Log.Info("Failed to checking for test pods.", "error", err) } else { - Expect(found).To(BeFalse()) + Expect(found).To(BeFalse(), "Application pods were found, none expected.") } found, err = common.CheckForPVCs() if err != nil { - logf.Log.Error(err, "Failed to check for PVCs") + logf.Log.Info("Failed to check for PVCs", "error", err) } - Expect(found).To(BeFalse()) + Expect(found).To(BeFalse(), "PersistentVolumeClaims were found, none expected.") found, err = common.CheckForPVs() if err != nil { - logf.Log.Error(err, "Failed to check PVs") + logf.Log.Info("Failed to check PVs", "error", err) } - Expect(found).To(BeFalse()) + Expect(found).To(BeFalse(), "PersistentVolumes were found, none expected.") found, err = common.CheckForMSVs() if err != nil { - logf.Log.Error(err, "Failed to check MSVs") + logf.Log.Info("Failed to check MSVs", "error", err) } - Expect(found).To(BeFalse()) + Expect(found).To(BeFalse(), "Mayastor volume CRDs were found, none expected.") - poolsDeleted := common.DeleteAllPools() - Expect(poolsDeleted).To(BeTrue()) } + poolsDeleted := common.DeleteAllPools() + Expect(poolsDeleted).To(BeTrue()) + logf.Log.Info("Cleanup done, Uninstalling mayastor") + yamlsDir := locations.GetGeneratedYamlsDir() // Deletes can stall indefinitely, try to mitigate this // by running the deletes on different threads - go deleteDeployYaml("csi-daemonset.yaml") - go deleteDeployYaml("mayastor-daemonset.yaml") - go deleteDeployYaml("moac-deployment.yaml") - go deleteDeployYaml("nats-deployment.yaml") + go common.KubeCtlDeleteYaml("csi-daemonset.yaml", yamlsDir) + go common.KubeCtlDeleteYaml("mayastor-daemonset.yaml", yamlsDir) + go common.KubeCtlDeleteYaml("moac-deployment.yaml", yamlsDir) + go common.KubeCtlDeleteYaml("nats-deployment.yaml", yamlsDir) { const timeOutSecs = 240 @@ -113,25 +98,30 @@ func teardownMayastor() { } } - // The focus is on trying to make the cluster reusable, so we try to delete everything. - // TODO: When we start using a cluster for a single test run move these set of deletes to after all checks. - deleteDeployYaml("mayastorpoolcrd.yaml") - deleteDeployYaml("moac-rbac.yaml") - deleteDeployYaml("storage-class.yaml") + deployDir := locations.GetDeployDir() + common.KubeCtlDeleteYaml("mayastorpoolcrd.yaml", deployDir) + common.KubeCtlDeleteYaml("moac-rbac.yaml", yamlsDir) + + // MOAC implicitly creates these CRDs, should we delete? deleteCRD("mayastornodes.openebs.io") deleteCRD("mayastorvolumes.openebs.io") if cleanup { // Attempt to forcefully delete mayastor pods - _, podCount, err := common.ForceDeleteMayastorPods() - Expect(cleaned).To(BeTrue()) - Expect(podCount).To(BeZero()) - Expect(err).ToNot(HaveOccurred()) + deleted, podCount, err := common.ForceDeleteMayastorPods() + Expect(err).ToNot(HaveOccurred(), "ForceDeleteMayastorPods failed %v", err) + Expect(podCount).To(BeZero(), "All Mayastor pods have not been deleted") // Only delete the namespace if there are no pending resources - // other wise this hangs. + // otherwise this hangs. deleteNamespace() + if deleted { + logf.Log.Info("Mayastor pods were force deleted on cleanup!") + } + if cleaned { + logf.Log.Info("Application pods or volume resources were deleted on cleanup!") + } } else { - Expect(common.MayastorUndeletedPodCount()).To(Equal(0)) + Expect(common.MayastorUndeletedPodCount()).To(Equal(0), "All Mayastor pods were not removed on uninstall") // More verbose here as deleting the namespace is often where this // test hangs. logf.Log.Info("Deleting the mayastor namespace") @@ -142,10 +132,6 @@ func teardownMayastor() { func TestTeardownSuite(t *testing.T) { RegisterFailHandler(Fail) - - if os.Getenv("e2e_uninstall_cleanup") != "0" { - cleanup = true - } RunSpecsWithDefaultAndCustomReporters(t, "Basic Teardown Suite", rep.GetReporters("uninstall")) } @@ -156,7 +142,6 @@ var _ = Describe("Mayastor setup", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) From 4f0fd72a3f9257ca51cc19db5645e997740cd067 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Fri, 5 Mar 2021 15:27:13 +0000 Subject: [PATCH 64/78] ci: i/o soak test - ensure remote replicas Ensure i/o soak test runs with some volumes provisioned on a remote node. Run the i/o soak test in a configuration where at least one instance of mayastor is running on a node where no application pods are running, so that some volumes can only be provisioned on a remote node. Do this by adding labels to "alternate" nodes, and node selectors to pods. This relies on the fact that if enough pods and volumes are scheduled to run simultaneously MOAC will schedule some replicas to run on nodes where the test apps are not running. This is deemed good enough, and has been observed to work on a 3 node cluster with 30 fio pods and volumes. --- scripts/e2e-test.sh | 8 ++++---- test/e2e/common/util_testpods.go | 6 ++++++ test/e2e/io_soak/filesystem_fio.go | 21 +++++++++++++++------ test/e2e/io_soak/io_soak_test.go | 28 +++++++++++++++++++++++++--- test/e2e/io_soak/rawblock_fio.go | 20 +++++++++++++++----- 5 files changed, 65 insertions(+), 18 deletions(-) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 1789404f0..80c8b424f 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -16,10 +16,10 @@ TOPDIR=$(realpath "$SCRIPTDIR/..") # 2. replicas_pod_remove SHOULD be the last test before uninstall # this is a disruptive test. #TESTS="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall" -ALL_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall" +DEFAULT_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall" ONDEMAND_TESTS="install basic_volume_io csi resource_check uninstall" -EXTENDED_TESTS="install basic_volume_io csi resource_check uninstall" -CONTINUOUS_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall" +EXTENDED_TESTS="install basic_volume_io csi resource_check io_soak uninstall" +CONTINUOUS_TESTS="install basic_volume_io csi resource_check replica rebuild io_soak uninstall" #exit values EXITV_OK=0 @@ -188,7 +188,7 @@ case "$profile" in tests="$custom_tests" ;; default) - tests="$ALL_TESTS" + tests="$DEFAULT_TESTS" ;; *) echo "Unknown profile: $profile" diff --git a/test/e2e/common/util_testpods.go b/test/e2e/common/util_testpods.go index 435c03694..19b2c4f1b 100644 --- a/test/e2e/common/util_testpods.go +++ b/test/e2e/common/util_testpods.go @@ -136,6 +136,12 @@ func CreateFioPodDef(podName string, volName string) *corev1.Pod { return createFioPodDef(podName, volName, false) } +/// Create a test fio pod in default namespace, no options and no context +/// mayastor volume is mounted on /dev/sdm +func CreateRawBlockFioPodDef(podName string, volName string) *corev1.Pod { + return createFioPodDef(podName, volName, true) +} + /// Create a test fio pod in default namespace, no options and no context /// mayastor volume is mounted on /volume func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { diff --git a/test/e2e/io_soak/filesystem_fio.go b/test/e2e/io_soak/filesystem_fio.go index 9fd239e14..7c2071332 100644 --- a/test/e2e/io_soak/filesystem_fio.go +++ b/test/e2e/io_soak/filesystem_fio.go @@ -11,7 +11,6 @@ import ( ) // IO soak filesystem fio job - type FioFsSoakJob struct { volName string scName string @@ -27,8 +26,10 @@ func (job FioFsSoakJob) removeVolume() { common.RmPVC(job.volName, job.scName) } -func (job FioFsSoakJob) makeTestPod() (*coreV1.Pod, error) { - pod, err := common.CreateFioPod(job.podName, job.volName) +func (job FioFsSoakJob) makeTestPod(selector map[string]string) (*coreV1.Pod, error) { + pod := common.CreateFioPodDef(job.podName, job.volName) + pod.Spec.NodeSelector = selector + pod, err := common.CreatePod(pod) return pod, err } @@ -37,13 +38,21 @@ func (job FioFsSoakJob) removeTestPod() error { } func (job FioFsSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) { + thinkTime := 1 // 1 microsecond + thinkTimeBlocks := 1000 + FioDutyCycles := e2e_config.GetConfig().IOSoakTest.FioDutyCycles - ixp := job.id % len(FioDutyCycles) + if len(FioDutyCycles) != 0 { + ixp := job.id % len(FioDutyCycles) + thinkTime = FioDutyCycles[ixp].ThinkTime + thinkTimeBlocks = FioDutyCycles[ixp].ThinkTimeBlocks + } + RunIoSoakFio( job.podName, duration, - FioDutyCycles[ixp].ThinkTime, - FioDutyCycles[ixp].ThinkTimeBlocks, + thinkTime, + thinkTimeBlocks, false, doneC, errC, diff --git a/test/e2e/io_soak/io_soak_test.go b/test/e2e/io_soak/io_soak_test.go index 8709e0276..770c16828 100644 --- a/test/e2e/io_soak/io_soak_test.go +++ b/test/e2e/io_soak/io_soak_test.go @@ -23,13 +23,20 @@ var defTimeoutSecs = "120s" type IoSoakJob interface { makeVolume() - makeTestPod() (*coreV1.Pod, error) + makeTestPod(map[string]string) (*corev1.Pod, error) removeTestPod() error removeVolume() run(time.Duration, chan<- string, chan<- error) getPodName() string } +const NodeSelectorKey = "e2e-io-soak" +const NodeSelectorAppValue = "e2e-app" + +var AppNodeSelector = map[string]string{ + NodeSelectorKey: NodeSelectorAppValue, +} + var scNames []string var jobs []IoSoakJob @@ -64,6 +71,8 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur nodeList, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) + var nodes []string + numMayastorNodes := 0 jobCount := 0 sort.Slice(nodeList, func(i, j int) bool { return nodeList[i].NodeName < nodeList[j].NodeName }) @@ -72,6 +81,13 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur logf.Log.Info("MayastorNode", "name", node.NodeName, "index", i) jobCount += loadFactor numMayastorNodes += 1 + nodes = append(nodes, node.NodeName) + } + } + + for i, node := range nodes { + if i%2 == 0 { + common.LabelNode(node, NodeSelectorKey, NodeSelectorAppValue) } } @@ -115,7 +131,7 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur logf.Log.Info("Creating test pods") // Create the job test pods for _, job := range jobs { - pod, err := job.makeTestPod() + pod, err := job.makeTestPod(AppNodeSelector) Expect(err).ToNot(HaveOccurred()) Expect(pod).ToNot(BeNil()) } @@ -169,9 +185,15 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur err = common.RmStorageClass(scName) Expect(err).ToNot(HaveOccurred()) } + + for i, node := range nodes { + if i%2 == 0 { + common.UnlabelNode(node, NodeSelectorKey) + } + } } -var _ = Describe("Mayastor Volume IO test", func() { +var _ = Describe("Mayastor Volume IO soak test", func() { AfterEach(func() { logf.Log.Info("AfterEach") diff --git a/test/e2e/io_soak/rawblock_fio.go b/test/e2e/io_soak/rawblock_fio.go index f4a8b6ab2..237ec4d26 100644 --- a/test/e2e/io_soak/rawblock_fio.go +++ b/test/e2e/io_soak/rawblock_fio.go @@ -27,8 +27,10 @@ func (job FioRawBlockSoakJob) removeVolume() { common.RmPVC(job.volName, job.scName) } -func (job FioRawBlockSoakJob) makeTestPod() (*coreV1.Pod, error) { - pod, err := common.CreateRawBlockFioPod(job.podName, job.volName) +func (job FioRawBlockSoakJob) makeTestPod(selector map[string]string) (*coreV1.Pod, error) { + pod := common.CreateRawBlockFioPodDef(job.podName, job.volName) + pod.Spec.NodeSelector = selector + pod, err := common.CreatePod(pod) return pod, err } @@ -37,13 +39,21 @@ func (job FioRawBlockSoakJob) removeTestPod() error { } func (job FioRawBlockSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) { + thinkTime := 1 // 1 microsecond + thinkTimeBlocks := 1000 + FioDutyCycles := e2e_config.GetConfig().IOSoakTest.FioDutyCycles - ixp := job.id % len(FioDutyCycles) + if len(FioDutyCycles) != 0 { + ixp := job.id % len(FioDutyCycles) + thinkTime = FioDutyCycles[ixp].ThinkTime + thinkTimeBlocks = FioDutyCycles[ixp].ThinkTimeBlocks + } + RunIoSoakFio( job.podName, duration, - FioDutyCycles[ixp].ThinkTime, - FioDutyCycles[ixp].ThinkTimeBlocks, + thinkTime, + thinkTimeBlocks, true, doneC, errC, From 6614033494bfcf97048bdb161c7a446940cbc494 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Mon, 8 Mar 2021 18:45:58 +0000 Subject: [PATCH 65/78] fix(composer): update separator for security options Running a cargo test that uses composer results in these entries in the system log: dockerd[pid]: time="