From 77ebfa7b0d811a530b6d015dfd1bd70de2f9304f Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Wed, 25 Nov 2020 12:04:12 +0000 Subject: [PATCH 01/85] Introduce init stage for disabling default commit status in the future --- Jenkinsfile | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7163c1bdf..7dfe51601 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,13 +1,5 @@ #!/usr/bin/env groovy -// Will ABORT current job for cases when we don't want to build -if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && - BRANCH_NAME == "develop") { - print "INFO: Branch Indexing, aborting job." - currentBuild.result = 'ABORTED' - return -} - // Update status of a commit in github def updateGithubCommitStatus(commit, msg, state) { step([ @@ -72,6 +64,25 @@ pipeline { } stages { + stage('init') { + agent { label 'nixos-mayastor' } + steps { + // TODO: We want to disable built-in github commit notifications. + // skip-notifications-trait plugin in combination with checkout scm step + // should do that but not sure how exactly. + checkout scm + script { + // Will ABORT current job for cases when we don't want to build + if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && + BRANCH_NAME == "develop") { + print "INFO: Branch Indexing, aborting job." + currentBuild.result = 'ABORTED' + error('Stopping early') + } + } + updateGithubCommitStatus(env.GIT_COMMIT, 'Test started', 'pending') + } + } stage('linter') { agent { label 'nixos-mayastor' } when { @@ -84,7 +95,6 @@ pipeline { } } steps { - updateGithubCommitStatus(env.GIT_COMMIT, 'Started to test the commit', 'pending') sh 'nix-shell --run "cargo fmt --all -- --check"' sh 'nix-shell --run "cargo clippy --all-targets -- -D warnings"' sh 'nix-shell --run "./scripts/js-check.sh"' @@ -160,7 +170,6 @@ pipeline { } } steps { - updateGithubCommitStatus(env.GIT_COMMIT, 'Started to test the commit', 'pending') withCredentials([usernamePassword(credentialsId: 'dockerhub', usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { sh 'echo $PASSWORD | docker login -u $USERNAME --password-stdin' } From 813805b657333d680ecbb1a6dbd2a5eea3e07d59 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Nov 2020 16:11:42 +0000 Subject: [PATCH 02/85] Bump systeminformation from 4.27.11 to 4.30.5 in /mayastor-test Bumps [systeminformation](https://github.com/sebhildebrandt/systeminformation) from 4.27.11 to 4.30.5. - [Release notes](https://github.com/sebhildebrandt/systeminformation/releases) - [Changelog](https://github.com/sebhildebrandt/systeminformation/blob/master/CHANGELOG.md) - [Commits](https://github.com/sebhildebrandt/systeminformation/compare/v4.27.11...v4.30.5) Signed-off-by: dependabot[bot] --- mayastor-test/package-lock.json | 6 +++--- mayastor-test/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mayastor-test/package-lock.json b/mayastor-test/package-lock.json index 009cc60ec..3e84020c7 100644 --- a/mayastor-test/package-lock.json +++ b/mayastor-test/package-lock.json @@ -2763,9 +2763,9 @@ } }, "systeminformation": { - "version": "4.27.11", - "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.27.11.tgz", - "integrity": "sha512-U7bigXbOnsB8k1vNHS0Y13RCsRz5/UohiUmND+3mMUL6vfzrpbe/h4ZqewowB+B+tJNnmGFDj08Z8xGfYo45dQ==" + "version": "4.30.5", + "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.30.5.tgz", + "integrity": "sha512-aYWs8yttl8ePpr6VOQ/Ak8cznuc9L/NQODVhbOKhInX73ZMLvV2BS86Mzr7LLfmUteVFR36CTDNQgiJgRqq+SQ==" }, "table": { "version": "5.4.6", diff --git a/mayastor-test/package.json b/mayastor-test/package.json index 25192e8d5..d8bb02e9f 100644 --- a/mayastor-test/package.json +++ b/mayastor-test/package.json @@ -24,7 +24,7 @@ "read": "^1.0.7", "semistandard": "^14.2.0", "sleep-promise": "^8.0.1", - "systeminformation": "^4.27.11", + "systeminformation": "^4.30.5", "wtfnode": "^0.8.1" }, "author": "Jan Kryl ", From 18e9c9e769b6eeaf1b82f80fcaccdc665db776a3 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 26 Nov 2020 16:01:58 +0000 Subject: [PATCH 03/85] Change install test to use external resources CAS-541 Change the install test to use defined docker registry, and yaml files for pools. Note pool status is not checked for success. Communication of resources to use is via environment variables. That appears to be the way for go test. Previous behaviour is supported, see README.md --- mayastor-test/e2e/install/README.md | 15 ++++- .../deploy/{pool.yaml => pool.yaml.template} | 2 +- mayastor-test/e2e/install/install_test.go | 66 ++++++++++++++----- 3 files changed, 63 insertions(+), 20 deletions(-) rename mayastor-test/e2e/install/deploy/{pool.yaml => pool.yaml.template} (86%) diff --git a/mayastor-test/e2e/install/README.md b/mayastor-test/e2e/install/README.md index 6309f98c8..99fe347cd 100644 --- a/mayastor-test/e2e/install/README.md +++ b/mayastor-test/e2e/install/README.md @@ -17,8 +17,21 @@ The test host must have the following installed: * kubectl (tested with v1.18) # Running the tests - +Environment variables +* e2e_docker_registry + * The IP address:port of the registry to be used. + * If unspecified then the assumption is that test registry has been deployed in the cluster on port 30291, a suitable IP address is selected. +* e2e_pool_yaml_files + * The list of yaml files defining pools for the cluster, comma separated, absolute paths. +* e2e_pool_device + * This environment variable is used if `e2e_pool_yaml_files` is undefined. + * pools are created for each node running mayastor, using the template file and the specified pool device. ```sh cd Mayastor/e2e/install go test +``` +Or +```sh +cd Maystor/e2e/install +e2e_docker_registry='192.168.122.1:5000' e2e_pool_yaml_files='/e2e/pools.yaml' go test ``` \ No newline at end of file diff --git a/mayastor-test/e2e/install/deploy/pool.yaml b/mayastor-test/e2e/install/deploy/pool.yaml.template similarity index 86% rename from mayastor-test/e2e/install/deploy/pool.yaml rename to mayastor-test/e2e/install/deploy/pool.yaml.template index 3cdfd52f6..8d0a9d4d5 100644 --- a/mayastor-test/e2e/install/deploy/pool.yaml +++ b/mayastor-test/e2e/install/deploy/pool.yaml.template @@ -6,4 +6,4 @@ metadata: namespace: mayastor spec: node: ${NODE_NAME} - disks: ["/dev/sda"] + disks: ["${POOL_DEVICE}"] diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go index af27511a2..f35d0f03e 100644 --- a/mayastor-test/e2e/install/install_test.go +++ b/mayastor-test/e2e/install/install_test.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "os" "os/exec" "path" "runtime" + "strings" "testing" "time" @@ -87,13 +89,18 @@ func getTestClusterDetails() (string, int, []string, error) { return "", 0, nil, errors.New("no usable nodes found") } - /// TODO Refine how we workout the address of the test-registry - /// If there is master node, use its IP address as the registry IP address - if len(master) != 0 { - return master, nme, mayastorNodes, nil + registry := os.Getenv("e2e_docker_registry") + if len(registry) == 0 { + // a registry was not specified + // If there is master node, use its IP address as the registry IP address + if len(master) != 0 { + registry = master + ":30291" + } else { + /// Otherwise choose the IP address of first node in the list as the registry IP address + registry = nodeIPs[0] + ":30291" + } } - /// Otherwise choose the IP address of first node in the list as the registry IP address - return nodeIPs[0], nme, mayastorNodes, nil + return registry, nme, mayastorNodes, nil } // Encapsulate the logic to find where the deploy yamls are @@ -116,12 +123,12 @@ func getTemplateYamlDir() string { return path.Clean(filename + "/../deploy") } -func makeImageName(registryAddress string, registryport string, imagename string, imageversion string) string { - return registryAddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion +func makeImageName(registryAddress string, imagename string, imageversion string) string { + return registryAddress + "/mayadata/" + imagename + ":" + imageversion } func applyTemplatedYaml(filename string, imagename string, registryAddress string) { - fullimagename := makeImageName(registryAddress, "30291", imagename, "ci") + fullimagename := makeImageName(registryAddress, imagename, "ci") bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl apply -f -" cmd := exec.Command("bash", "-c", bashcmd) cmd.Dir = getTemplateYamlDir() @@ -149,6 +156,37 @@ func moacReadyPodCount() int { return int(moacDeployment.Status.AvailableReplicas) } +// create pools for the cluster +func createPools(mayastorNodes []string) { + envPoolYamls := os.Getenv("e2e_pool_yaml_files") + poolDevice := os.Getenv("e2e_pool_device") + if len(envPoolYamls) != 0 { + // Apply the list of externally defined pool yaml files + // NO check is made on the status of pools + poolYamlFiles := strings.Split(envPoolYamls, ",") + for _, poolYaml := range poolYamlFiles { + fmt.Println("applying ", poolYaml) + bashcmd := "kubectl apply -f " + poolYaml + cmd := exec.Command("bash", "-c", bashcmd) + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + } + } else if len(poolDevice) != 0 { + // Use the template file to create pools as per the devices + // NO check is made on the status of pools + for _, mayastorNode := range mayastorNodes { + fmt.Println("creating pool on:", mayastorNode, " using device:", poolDevice) + bashcmd := "NODE_NAME=" + mayastorNode + " POOL_DEVICE=" + poolDevice + " envsubst < " + "pool.yaml.template" + " | kubectl apply -f -" + cmd := exec.Command("bash", "-c", bashcmd) + cmd.Dir = getTemplateYamlDir() + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + } + } else { + // No pools created + } +} + // Install mayastor on the cluster under test. // We deliberately call out to kubectl, rather than constructing the client-go // objects, so that we can verfiy the local deploy yamls are correct. @@ -181,15 +219,7 @@ func installMayastor() { ).Should(Equal(1)) // Now create pools on all nodes. - // Note the disk for use on each node has been set in deploy/pool.yaml - // TODO make the pool disk configurable - for _, mayastorNode := range mayastorNodes { - bashcmd := "NODE_NAME=" + mayastorNode + " envsubst < " + "pool.yaml" + " | kubectl apply -f -" - cmd := exec.Command("bash", "-c", bashcmd) - cmd.Dir = getTemplateYamlDir() - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) - } + createPools(mayastorNodes) } func TestInstallSuite(t *testing.T) { From 5d2d91047e407ad9ae414386f1a26b5f7efffc33 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Wed, 25 Nov 2020 14:34:48 +0000 Subject: [PATCH 04/85] Basic nvmf test to run fio against a volume --- mayastor-test/e2e/common/test.go | 86 +++++ mayastor-test/e2e/common/util.go | 357 ++++++++++++++++++ .../e2e/nvmf_vol/deploy/fio_nvmf.yaml | 19 + .../e2e/nvmf_vol/deploy/pvc_nvmf.yaml | 12 + mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go | 67 ++++ mayastor-test/e2e/setup/bringup-cluster.sh | 4 +- 6 files changed, 543 insertions(+), 2 deletions(-) create mode 100644 mayastor-test/e2e/common/test.go create mode 100644 mayastor-test/e2e/common/util.go create mode 100644 mayastor-test/e2e/nvmf_vol/deploy/fio_nvmf.yaml create mode 100644 mayastor-test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml create mode 100644 mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go diff --git a/mayastor-test/e2e/common/test.go b/mayastor-test/e2e/common/test.go new file mode 100644 index 000000000..f17ec5b98 --- /dev/null +++ b/mayastor-test/e2e/common/test.go @@ -0,0 +1,86 @@ +package common + +import ( + "context" + "fmt" + "time" + + "sigs.k8s.io/controller-runtime/pkg/client/config" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/deprecated/scheme" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +type TestEnvironment struct { + Cfg *rest.Config + K8sClient client.Client + KubeInt kubernetes.Interface + K8sManager *ctrl.Manager + TestEnv *envtest.Environment + DynamicClient dynamic.Interface +} + +func SetupTestEnv() TestEnvironment { + + By("bootstrapping test environment") + useCluster := true + testEnv := &envtest.Environment{ + UseExistingCluster: &useCluster, + AttachControlPlaneOutput: true, + } + + var err error + cfg, err := testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + go func() { + err = k8sManager.Start(ctrl.SetupSignalHandler()) + Expect(err).ToNot(HaveOccurred()) + }() + + mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer mgrSyncCtxCancel() + if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { + fmt.Println("Failed to sync") + } + + k8sClient := k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + restConfig := config.GetConfigOrDie() + Expect(restConfig).ToNot(BeNil()) + + kubeInt := kubernetes.NewForConfigOrDie(restConfig) + Expect(kubeInt).ToNot(BeNil()) + + dynamicClient := dynamic.NewForConfigOrDie(restConfig) + Expect(dynamicClient).ToNot(BeNil()) + + return TestEnvironment{ + Cfg: cfg, + K8sClient: k8sClient, + KubeInt: kubeInt, + K8sManager: &k8sManager, + TestEnv: testEnv, + DynamicClient: dynamicClient, + } +} + +func TeardownTestEnv(env *TestEnvironment) { + err := env.TestEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +} diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go new file mode 100644 index 000000000..dd649a5ee --- /dev/null +++ b/mayastor-test/e2e/common/util.go @@ -0,0 +1,357 @@ +package common + +import ( + "context" + "fmt" + "os/exec" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/gomega" + + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var defTimeoutSecs = "90s" + +func ApplyDeployYaml(filename string) { + cmd := exec.Command("kubectl", "apply", "-f", filename) + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +func DeleteDeployYaml(filename string) { + cmd := exec.Command("kubectl", "delete", "-f", filename) + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +func LabelNode(nodename string, label string) { + cmd := exec.Command("kubectl", "label", "node", nodename, label) + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// Status part of the mayastor volume CRD +type mayastorVolStatus struct { + state string + reason string + node string +} + +func GetMSV(uuid string, dynamicClient *dynamic.Interface) *mayastorVolStatus { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + msv, err := (*dynamicClient).Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + if err != nil { + fmt.Println(err) + return nil + } + if msv == nil { + return nil + } + status, found, err := unstructured.NestedFieldCopy(msv.Object, "status") + if err != nil { + fmt.Println(err) + return nil + } + + if !found { + return nil + } + msVol := mayastorVolStatus{} + v := reflect.ValueOf(status) + if v.Kind() == reflect.Map { + for _, key := range v.MapKeys() { + sKey := key.Interface().(string) + val := v.MapIndex(key) + switch sKey { + case "state": + msVol.state = val.Interface().(string) + break + case "reason": + msVol.reason = val.Interface().(string) + break + case "node": + msVol.node = val.Interface().(string) + break + } + } + } + return &msVol +} + +// Check for a deleted Mayastor Volume, +// the object does not exist if deleted +func IsMSVDeleted(uuid string, dynamicClient *dynamic.Interface) bool { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msv, err := (*dynamicClient).Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "mayastorvolumes.openebs.io") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + + Expect(err).To(BeNil()) + Expect(msv).ToNot(BeNil()) + return false +} + +// Check for a deleted Persistent Volume Claim, +// either the object does not exist +// or the status phase is invalid. +func IsPVCDeleted(volName string, kubeInt *kubernetes.Interface) bool { + pvc, err := (*kubeInt).CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "persistentvolumeclaims") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + // After the PVC has been deleted it may still accessible, but status phase will be invalid + Expect(err).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + switch pvc.Status.Phase { + case + corev1.ClaimBound, + corev1.ClaimPending, + corev1.ClaimLost: + return false + default: + return true + } +} + +// Check for a deleted Persistent Volume, +// either the object does not exist +// or the status phase is invalid. +func IsPVDeleted(volName *string, kubeInt *kubernetes.Interface) bool { + vn := *volName + pv, err := (*kubeInt).CoreV1().PersistentVolumes().Get(context.TODO(), vn, metav1.GetOptions{}) + if err != nil { + // Unfortunately there is no associated error code so we resort to string comparison + if strings.HasPrefix(err.Error(), "persistentvolumes") && + strings.HasSuffix(err.Error(), " not found") { + return true + } + } + // After the PV has been deleted it may still accessible, but status phase will be invalid + Expect(err).To(BeNil()) + Expect(pv).ToNot(BeNil()) + switch pv.Status.Phase { + case + corev1.VolumeBound, + corev1.VolumeAvailable, + corev1.VolumeFailed, + corev1.VolumePending, + corev1.VolumeReleased: + return false + default: + return true + } +} + +// Retrieve status phase of a Persistent Volume Claim +func GetPvcStatusPhase(volname string, kubeInt *kubernetes.Interface) (phase corev1.PersistentVolumeClaimPhase) { + pvc, getPvcErr := (*kubeInt).CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + return pvc.Status.Phase +} + +// Retrieve status phase of a Persistent Volume +func GetPvStatusPhase(volname string, kubeInt *kubernetes.Interface) (phase corev1.PersistentVolumePhase) { + pv, getPvErr := (*kubeInt).CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) + Expect(getPvErr).To(BeNil()) + Expect(pv).ToNot(BeNil()) + return pv.Status.Phase +} + +// Retrieve the state of a Mayastor Volume +func GetMsvState(uuid string, dynamicClient *dynamic.Interface) string { + msv := GetMSV(uuid, dynamicClient) + Expect(msv).ToNot(BeNil()) + return fmt.Sprintf("%s", msv.state) +} + +// Retrieve the nexus node hosting the Mayastor Volume +func GetMsvNode(uuid string, dynamicClient *dynamic.Interface) string { + msv := GetMSV(uuid, dynamicClient) + Expect(msv).ToNot(BeNil()) + return fmt.Sprintf("%s", msv.node) +} + +// Create a PVC and verify that +// 1. The PVC status transitions to bound, +// 2. The associated PV is created and its status transitions bound +// 3. The associated MV is created and has a State "healthy" +func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kubeInt *kubernetes.Interface) string { + fmt.Printf("creating %s, %s\n", volName, scName) + // PVC create options + createOpts := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volName, + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("64Mi"), + }, + }, + }, + } + + // Create the PVC. + PVCApi := (*kubeInt).CoreV1().PersistentVolumeClaims + _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) + Expect(createErr).To(BeNil()) + + // Confirm the PVC has been created. + pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PVC to be bound. + Eventually(func() corev1.PersistentVolumeClaimPhase { + return GetPvcStatusPhase(volName, kubeInt) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(corev1.ClaimBound)) + + // Refresh the PVC contents, so that we can get the PV name. + pvc, getPvcErr = PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PV to be provisioned + Eventually(func() *corev1.PersistentVolume { + pv, getPvErr := (*kubeInt).CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if getPvErr != nil { + return nil + } + return pv + + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Not(BeNil())) + + // Wait for the PV to be bound. + Eventually(func() corev1.PersistentVolumePhase { + return GetPvStatusPhase(pvc.Spec.VolumeName, kubeInt) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(corev1.VolumeBound)) + + msv := GetMSV(string(pvc.ObjectMeta.UID), dynamicClient) + Expect(msv).ToNot(BeNil()) + return string(pvc.ObjectMeta.UID) +} + +// Delete the PVC and verify that +// 1. The PVC is deleted +// 2. The associated PV is deleted +// 3. The associated MV is deleted +func RmPVC(volName string, scName string, dynamicClient *dynamic.Interface, kubeInt *kubernetes.Interface) { + fmt.Printf("removing %s, %s\n", volName, scName) + + PVCApi := (*kubeInt).CoreV1().PersistentVolumeClaims + + // Confirm the PVC has been created. + pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Delete the PVC + deleteErr := PVCApi("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) + Expect(deleteErr).To(BeNil()) + + // Wait for the PVC to be deleted. + Eventually(func() bool { + return IsPVCDeleted(volName, kubeInt) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the PV to be deleted. + Eventually(func() bool { + return IsPVDeleted(&(pvc.Spec.VolumeName), kubeInt) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the MSV to be deleted. + Eventually(func() bool { + return IsMSVDeleted(string(pvc.ObjectMeta.UID), dynamicClient) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) +} + +func RunFio() { + cmd := exec.Command( + "kubectl", + "exec", + "-it", + "fio", + "--", + "fio", + "--name=benchtest", + "--size=50m", + "--filename=/volume/test", + "--direct=1", + "--rw=randrw", + "--ioengine=libaio", + "--bs=4k", + "--iodepth=16", + "--numjobs=1", + "--time_based", + "--runtime=20", + ) + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +func FioReadyPod(k8sClient *client.Client) bool { + var fioPod corev1.Pod + if (*k8sClient).Get(context.TODO(), types.NamespacedName{Name: "fio", Namespace: "default"}, &fioPod) != nil { + return false + } + return fioPod.Status.Phase == v1.PodRunning +} diff --git a/mayastor-test/e2e/nvmf_vol/deploy/fio_nvmf.yaml b/mayastor-test/e2e/nvmf_vol/deploy/fio_nvmf.yaml new file mode 100644 index 000000000..9cef19e7b --- /dev/null +++ b/mayastor-test/e2e/nvmf_vol/deploy/fio_nvmf.yaml @@ -0,0 +1,19 @@ +kind: Pod +apiVersion: v1 +metadata: + name: fio +spec: + volumes: + - name: ms-volume + persistentVolumeClaim: + claimName: vol-test-pvc-nvmf + containers: + - name: fio + image: nixery.dev/shell/fio/tini + command: [ "tini", "--" ] + args: + - sleep + - "1000000" + volumeMounts: + - mountPath: "/volume" + name: ms-volume diff --git a/mayastor-test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml b/mayastor-test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml new file mode 100644 index 000000000..7f80e30ae --- /dev/null +++ b/mayastor-test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vol-test-pvc-nvmf +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Mi + # storageClassName: mayastor-iscsi + storageClassName: mayastor-nvmf diff --git a/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go b/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go new file mode 100644 index 000000000..3e2c9c5b6 --- /dev/null +++ b/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go @@ -0,0 +1,67 @@ +package nvmf_vol_test + +import ( + "fmt" + "testing" + + "e2e-basic/common" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var defTimeoutSecs = "90s" + +var g_environment common.TestEnvironment + +func nvmfTest() { + fmt.Printf("running fio\n") + common.RunFio() +} + +func TestNvmfVol(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss Test Suite") +} + +var _ = Describe("Mayastor nvmf IO test", func() { + It("should verify an nvmf volume can process IO", func() { + nvmfTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + g_environment = common.SetupTestEnv() + + common.MkPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf", &g_environment.DynamicClient, &g_environment.KubeInt) + common.ApplyDeployYaml("deploy/fio_nvmf.yaml") + + fmt.Printf("waiting for fio\n") + Eventually(func() bool { + return common.FioReadyPod(&g_environment.K8sClient) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + fmt.Printf("removing fio pod\n") + common.DeleteDeployYaml("deploy/fio_nvmf.yaml") + + fmt.Printf("removing pvc\n") + common.RmPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf", &g_environment.DynamicClient, &g_environment.KubeInt) + + common.TeardownTestEnv(&g_environment) +}) diff --git a/mayastor-test/e2e/setup/bringup-cluster.sh b/mayastor-test/e2e/setup/bringup-cluster.sh index 8215b54f9..39bb5a9d6 100755 --- a/mayastor-test/e2e/setup/bringup-cluster.sh +++ b/mayastor-test/e2e/setup/bringup-cluster.sh @@ -47,12 +47,12 @@ bringup_cluster() { # Runs in a timeout, so we need to pass in $MASTER_NODE_IP and $REGISTRY_PORT wait_for_ready() { - while ! kubectl get nodes; do + while ! kubectl get nodes; do sleep 1 done # Wait for the registry to be accessible - while ! nc -z $1 $2; do + while ! nc -z $1 $2; do sleep 1 done } From faa06b61ae7cba07c1f921fb11ccb101b2424fa7 Mon Sep 17 00:00:00 2001 From: GlennBullingham Date: Tue, 1 Dec 2020 14:47:28 +0000 Subject: [PATCH 05/85] Restore image tag value :latest in deployment files --- deploy/csi-daemonset.yaml | 2 +- deploy/mayastor-daemonset-config.yaml | 2 +- deploy/mayastor-daemonset.yaml | 2 +- deploy/moac-deployment.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index eb2d32dd4..a8c91b687 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -28,7 +28,7 @@ spec: # the same. containers: - name: mayastor-csi - image: mayadata/mayastor-csi:v0.6.0 + image: mayadata/mayastor-csi:latest imagePullPolicy: Always # we need privileged because we mount filesystems and use mknod securityContext: diff --git a/deploy/mayastor-daemonset-config.yaml b/deploy/mayastor-daemonset-config.yaml index 191a891f4..578a685c4 100644 --- a/deploy/mayastor-daemonset-config.yaml +++ b/deploy/mayastor-daemonset-config.yaml @@ -35,7 +35,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:v0.6.0 + image: mayadata/mayastor:latest imagePullPolicy: Always env: - name: MY_NODE_NAME diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 22539bd0a..57a3f413c 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -31,7 +31,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:v0.6.0 + image: mayadata/mayastor:latest imagePullPolicy: Always env: - name: MY_NODE_NAME diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index 7b9a3b50a..90484d4a2 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -44,7 +44,7 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ - name: moac - image: mayadata/moac:v0.6.0 + image: mayadata/moac:latest imagePullPolicy: Always args: - "--csi-address=$(CSI_ENDPOINT)" From 1c0f2f70bb8738e112ec96d88c5cb446b431e1eb Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Tue, 1 Dec 2020 14:48:03 +0000 Subject: [PATCH 06/85] Move test enviromnent objects to common package --- mayastor-test/e2e/common/test.go | 10 ++-- mayastor-test/e2e/common/util.go | 61 ++++++++++----------- mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go | 12 ++-- 3 files changed, 40 insertions(+), 43 deletions(-) diff --git a/mayastor-test/e2e/common/test.go b/mayastor-test/e2e/common/test.go index f17ec5b98..5ff09cd77 100644 --- a/mayastor-test/e2e/common/test.go +++ b/mayastor-test/e2e/common/test.go @@ -28,7 +28,9 @@ type TestEnvironment struct { DynamicClient dynamic.Interface } -func SetupTestEnv() TestEnvironment { +var gTestEnv TestEnvironment + +func SetupTestEnv() { By("bootstrapping test environment") useCluster := true @@ -70,7 +72,7 @@ func SetupTestEnv() TestEnvironment { dynamicClient := dynamic.NewForConfigOrDie(restConfig) Expect(dynamicClient).ToNot(BeNil()) - return TestEnvironment{ + gTestEnv = TestEnvironment{ Cfg: cfg, K8sClient: k8sClient, KubeInt: kubeInt, @@ -80,7 +82,7 @@ func SetupTestEnv() TestEnvironment { } } -func TeardownTestEnv(env *TestEnvironment) { - err := env.TestEnv.Stop() +func TeardownTestEnv() { + err := gTestEnv.TestEnv.Stop() Expect(err).ToNot(HaveOccurred()) } diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go index dd649a5ee..2adbbdaef 100644 --- a/mayastor-test/e2e/common/util.go +++ b/mayastor-test/e2e/common/util.go @@ -19,9 +19,6 @@ import ( "reflect" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" ) var defTimeoutSecs = "90s" @@ -54,13 +51,13 @@ type mayastorVolStatus struct { node string } -func GetMSV(uuid string, dynamicClient *dynamic.Interface) *mayastorVolStatus { +func GetMSV(uuid string) *mayastorVolStatus { msvGVR := schema.GroupVersionResource{ Group: "openebs.io", Version: "v1alpha1", Resource: "mayastorvolumes", } - msv, err := (*dynamicClient).Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) if err != nil { fmt.Println(err) return nil @@ -101,14 +98,14 @@ func GetMSV(uuid string, dynamicClient *dynamic.Interface) *mayastorVolStatus { // Check for a deleted Mayastor Volume, // the object does not exist if deleted -func IsMSVDeleted(uuid string, dynamicClient *dynamic.Interface) bool { +func IsMSVDeleted(uuid string) bool { msvGVR := schema.GroupVersionResource{ Group: "openebs.io", Version: "v1alpha1", Resource: "mayastorvolumes", } - msv, err := (*dynamicClient).Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) + msv, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) if err != nil { // Unfortunately there is no associated error code so we resort to string comparison @@ -126,8 +123,8 @@ func IsMSVDeleted(uuid string, dynamicClient *dynamic.Interface) bool { // Check for a deleted Persistent Volume Claim, // either the object does not exist // or the status phase is invalid. -func IsPVCDeleted(volName string, kubeInt *kubernetes.Interface) bool { - pvc, err := (*kubeInt).CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) +func IsPVCDeleted(volName string) bool { + pvc, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) if err != nil { // Unfortunately there is no associated error code so we resort to string comparison if strings.HasPrefix(err.Error(), "persistentvolumeclaims") && @@ -152,9 +149,9 @@ func IsPVCDeleted(volName string, kubeInt *kubernetes.Interface) bool { // Check for a deleted Persistent Volume, // either the object does not exist // or the status phase is invalid. -func IsPVDeleted(volName *string, kubeInt *kubernetes.Interface) bool { +func IsPVDeleted(volName *string) bool { vn := *volName - pv, err := (*kubeInt).CoreV1().PersistentVolumes().Get(context.TODO(), vn, metav1.GetOptions{}) + pv, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), vn, metav1.GetOptions{}) if err != nil { // Unfortunately there is no associated error code so we resort to string comparison if strings.HasPrefix(err.Error(), "persistentvolumes") && @@ -179,31 +176,31 @@ func IsPVDeleted(volName *string, kubeInt *kubernetes.Interface) bool { } // Retrieve status phase of a Persistent Volume Claim -func GetPvcStatusPhase(volname string, kubeInt *kubernetes.Interface) (phase corev1.PersistentVolumeClaimPhase) { - pvc, getPvcErr := (*kubeInt).CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) +func GetPvcStatusPhase(volname string) (phase corev1.PersistentVolumeClaimPhase) { + pvc, getPvcErr := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) Expect(getPvcErr).To(BeNil()) Expect(pvc).ToNot(BeNil()) return pvc.Status.Phase } // Retrieve status phase of a Persistent Volume -func GetPvStatusPhase(volname string, kubeInt *kubernetes.Interface) (phase corev1.PersistentVolumePhase) { - pv, getPvErr := (*kubeInt).CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) +func GetPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { + pv, getPvErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) Expect(getPvErr).To(BeNil()) Expect(pv).ToNot(BeNil()) return pv.Status.Phase } // Retrieve the state of a Mayastor Volume -func GetMsvState(uuid string, dynamicClient *dynamic.Interface) string { - msv := GetMSV(uuid, dynamicClient) +func GetMsvState(uuid string) string { + msv := GetMSV(uuid) Expect(msv).ToNot(BeNil()) return fmt.Sprintf("%s", msv.state) } // Retrieve the nexus node hosting the Mayastor Volume -func GetMsvNode(uuid string, dynamicClient *dynamic.Interface) string { - msv := GetMSV(uuid, dynamicClient) +func GetMsvNode(uuid string) string { + msv := GetMSV(uuid) Expect(msv).ToNot(BeNil()) return fmt.Sprintf("%s", msv.node) } @@ -212,7 +209,7 @@ func GetMsvNode(uuid string, dynamicClient *dynamic.Interface) string { // 1. The PVC status transitions to bound, // 2. The associated PV is created and its status transitions bound // 3. The associated MV is created and has a State "healthy" -func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kubeInt *kubernetes.Interface) string { +func MkPVC(volName string, scName string) string { fmt.Printf("creating %s, %s\n", volName, scName) // PVC create options createOpts := &corev1.PersistentVolumeClaim{ @@ -232,7 +229,7 @@ func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube } // Create the PVC. - PVCApi := (*kubeInt).CoreV1().PersistentVolumeClaims + PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) Expect(createErr).To(BeNil()) @@ -243,7 +240,7 @@ func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // Wait for the PVC to be bound. Eventually(func() corev1.PersistentVolumeClaimPhase { - return GetPvcStatusPhase(volName, kubeInt) + return GetPvcStatusPhase(volName) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -256,7 +253,7 @@ func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // Wait for the PV to be provisioned Eventually(func() *corev1.PersistentVolume { - pv, getPvErr := (*kubeInt).CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + pv, getPvErr := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) if getPvErr != nil { return nil } @@ -269,13 +266,13 @@ func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // Wait for the PV to be bound. Eventually(func() corev1.PersistentVolumePhase { - return GetPvStatusPhase(pvc.Spec.VolumeName, kubeInt) + return GetPvStatusPhase(pvc.Spec.VolumeName) }, defTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal(corev1.VolumeBound)) - msv := GetMSV(string(pvc.ObjectMeta.UID), dynamicClient) + msv := GetMSV(string(pvc.ObjectMeta.UID)) Expect(msv).ToNot(BeNil()) return string(pvc.ObjectMeta.UID) } @@ -284,10 +281,10 @@ func MkPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // 1. The PVC is deleted // 2. The associated PV is deleted // 3. The associated MV is deleted -func RmPVC(volName string, scName string, dynamicClient *dynamic.Interface, kubeInt *kubernetes.Interface) { +func RmPVC(volName string, scName string) { fmt.Printf("removing %s, %s\n", volName, scName) - PVCApi := (*kubeInt).CoreV1().PersistentVolumeClaims + PVCApi := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims // Confirm the PVC has been created. pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) @@ -300,7 +297,7 @@ func RmPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // Wait for the PVC to be deleted. Eventually(func() bool { - return IsPVCDeleted(volName, kubeInt) + return IsPVCDeleted(volName) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -308,7 +305,7 @@ func RmPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // Wait for the PV to be deleted. Eventually(func() bool { - return IsPVDeleted(&(pvc.Spec.VolumeName), kubeInt) + return IsPVDeleted(&(pvc.Spec.VolumeName)) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -316,7 +313,7 @@ func RmPVC(volName string, scName string, dynamicClient *dynamic.Interface, kube // Wait for the MSV to be deleted. Eventually(func() bool { - return IsMSVDeleted(string(pvc.ObjectMeta.UID), dynamicClient) + return IsMSVDeleted(string(pvc.ObjectMeta.UID)) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -348,9 +345,9 @@ func RunFio() { Expect(err).ToNot(HaveOccurred()) } -func FioReadyPod(k8sClient *client.Client) bool { +func FioReadyPod() bool { var fioPod corev1.Pod - if (*k8sClient).Get(context.TODO(), types.NamespacedName{Name: "fio", Namespace: "default"}, &fioPod) != nil { + if gTestEnv.K8sClient.Get(context.TODO(), types.NamespacedName{Name: "fio", Namespace: "default"}, &fioPod) != nil { return false } return fioPod.Status.Phase == v1.PodRunning diff --git a/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go b/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go index 3e2c9c5b6..359815f2b 100644 --- a/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go +++ b/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go @@ -14,8 +14,6 @@ import ( var defTimeoutSecs = "90s" -var g_environment common.TestEnvironment - func nvmfTest() { fmt.Printf("running fio\n") common.RunFio() @@ -36,14 +34,14 @@ var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - g_environment = common.SetupTestEnv() + common.SetupTestEnv() - common.MkPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf", &g_environment.DynamicClient, &g_environment.KubeInt) + common.MkPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf") common.ApplyDeployYaml("deploy/fio_nvmf.yaml") fmt.Printf("waiting for fio\n") Eventually(func() bool { - return common.FioReadyPod(&g_environment.K8sClient) + return common.FioReadyPod() }, defTimeoutSecs, // timeout "1s", // polling interval @@ -61,7 +59,7 @@ var _ = AfterSuite(func() { common.DeleteDeployYaml("deploy/fio_nvmf.yaml") fmt.Printf("removing pvc\n") - common.RmPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf", &g_environment.DynamicClient, &g_environment.KubeInt) + common.RmPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf") - common.TeardownTestEnv(&g_environment) + common.TeardownTestEnv() }) From 65c36d9f525c7a6478eef2df30363d542a330b86 Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Tue, 1 Dec 2020 16:01:21 +0100 Subject: [PATCH 07/85] Build a mayastor-client image on the top of the mayastor (#547) --- nix/pkgs/images/default.nix | 7 +++++++ scripts/release.sh | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index 7f7abab4a..60c2e0230 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -129,4 +129,11 @@ rec { contents = [ busybox mayastor ]; config = { Entrypoint = [ "/bin/kiiss" ]; }; }); + + mayastor-client-image = dockerTools.buildImage (servicesImageProps // { + name = "mayadata/mayastor-client"; + contents = [ busybox mayastor ]; + config = { Entrypoint = [ "/bin/mayastor-client" ]; }; + }); + } diff --git a/scripts/release.sh b/scripts/release.sh index ac479b4fd..18d1b479f 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -39,7 +39,7 @@ DOCKER="docker" NIX_BUILD="nix-build" RM="rm" SCRIPTDIR=$(dirname "$0") -IMAGES="mayastor mayastor-csi moac" +IMAGES="mayastor mayastor-csi mayastor-client moac" TAG=`get_tag` BRANCH=`git rev-parse --abbrev-ref HEAD` UPLOAD= From d23aa427430a20ed568b8900b80708f956fd0a8e Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Tue, 1 Dec 2020 19:16:34 +0000 Subject: [PATCH 08/85] Refactor for CAS-500 Use and move functions from and to common/util.go Add pvc_stress test that uses fio --- mayastor-test/e2e/common/util.go | 113 +++++-- .../e2e/nightly/pvc_stress/pvc_stress_test.go | 301 +++--------------- .../pvc_stress_fio/pvc_stress_fio_test.go | 190 +++++++++++ mayastor-test/e2e/nightly/test.sh | 23 +- mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go | 2 +- 5 files changed, 340 insertions(+), 289 deletions(-) create mode 100644 mayastor-test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go index 2adbbdaef..26be22824 100644 --- a/mayastor-test/e2e/common/util.go +++ b/mayastor-test/e2e/common/util.go @@ -45,13 +45,13 @@ func LabelNode(nodename string, label string) { } // Status part of the mayastor volume CRD -type mayastorVolStatus struct { - state string - reason string - node string +type MayastorVolStatus struct { + State string + Reason string + Node string } -func GetMSV(uuid string) *mayastorVolStatus { +func GetMSV(uuid string) *MayastorVolStatus { msvGVR := schema.GroupVersionResource{ Group: "openebs.io", Version: "v1alpha1", @@ -74,7 +74,7 @@ func GetMSV(uuid string) *mayastorVolStatus { if !found { return nil } - msVol := mayastorVolStatus{} + msVol := MayastorVolStatus{} v := reflect.ValueOf(status) if v.Kind() == reflect.Map { for _, key := range v.MapKeys() { @@ -82,13 +82,13 @@ func GetMSV(uuid string) *mayastorVolStatus { val := v.MapIndex(key) switch sKey { case "state": - msVol.state = val.Interface().(string) + msVol.State = val.Interface().(string) break case "reason": - msVol.reason = val.Interface().(string) + msVol.Reason = val.Interface().(string) break case "node": - msVol.node = val.Interface().(string) + msVol.Node = val.Interface().(string) break } } @@ -149,9 +149,8 @@ func IsPVCDeleted(volName string) bool { // Check for a deleted Persistent Volume, // either the object does not exist // or the status phase is invalid. -func IsPVDeleted(volName *string) bool { - vn := *volName - pv, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), vn, metav1.GetOptions{}) +func IsPVDeleted(volName string) bool { + pv, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) if err != nil { // Unfortunately there is no associated error code so we resort to string comparison if strings.HasPrefix(err.Error(), "persistentvolumes") && @@ -195,14 +194,14 @@ func GetPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { func GetMsvState(uuid string) string { msv := GetMSV(uuid) Expect(msv).ToNot(BeNil()) - return fmt.Sprintf("%s", msv.state) + return fmt.Sprintf("%s", msv.State) } // Retrieve the nexus node hosting the Mayastor Volume func GetMsvNode(uuid string) string { msv := GetMSV(uuid) Expect(msv).ToNot(BeNil()) - return fmt.Sprintf("%s", msv.node) + return fmt.Sprintf("%s", msv.Node) } // Create a PVC and verify that @@ -305,7 +304,7 @@ func RmPVC(volName string, scName string) { // Wait for the PV to be deleted. Eventually(func() bool { - return IsPVDeleted(&(pvc.Spec.VolumeName)) + return IsPVDeleted(pvc.Spec.VolumeName) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -320,12 +319,13 @@ func RmPVC(volName string, scName string) { ).Should(Equal(true)) } -func RunFio() { +func RunFio(podName string, duration int) { + argRuntime := fmt.Sprintf("--runtime=%d", duration) cmd := exec.Command( "kubectl", "exec", "-it", - "fio", + podName, "--", "fio", "--name=benchtest", @@ -338,7 +338,7 @@ func RunFio() { "--iodepth=16", "--numjobs=1", "--time_based", - "--runtime=20", + argRuntime, ) cmd.Dir = "" _, err := cmd.CombinedOutput() @@ -352,3 +352,80 @@ func FioReadyPod() bool { } return fioPod.Status.Phase == v1.PodRunning } + +func IsPodRunning(podName string) bool { + var pod corev1.Pod + if gTestEnv.K8sClient.Get(context.TODO(), types.NamespacedName{Name: podName, Namespace: "default"}, &pod) != nil { + return false + } + return pod.Status.Phase == v1.PodRunning +} + +/// Create a PVC in default namespace, no options and no context +func CreatePVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(), pvc, metav1.CreateOptions{}) +} + +/// Retrieve a PVC in default namespace, no options and no context +func GetPVC(volName string) (*v1.PersistentVolumeClaim, error) { + return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) +} + +/// Delete a PVC in default namespace, no options and no context +func DeletePVC(volName string) error { + return gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) +} + +/// Retrieve a PV in default namespace, no options and no context +func GetPV(volName string) (*v1.PersistentVolume, error) { + return gTestEnv.KubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) +} + +/// Create a Pod in default namespace, no options and no context +func CreatePod(podDef *corev1.Pod) (*corev1.Pod, error) { + return gTestEnv.KubeInt.CoreV1().Pods("default").Create(context.TODO(), podDef, metav1.CreateOptions{}) +} + +/// Delete a Pod in default namespace, no options and no context +func DeletePod(podName string) error { + return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) +} + +/// Create a test fio pod in default namespace, no options and no context +/// mayastor volume is mounted on /volume +func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { + podDef := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: podName, + Image: "nixery.dev/shell/fio/tini", + Command: []string{"tini", "--"}, + Args: []string{"sleep", "1000000"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "ms-volume", + MountPath: "/volume", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "ms-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: volName, + }, + }, + }, + }, + }, + } + + return CreatePod(&podDef) +} diff --git a/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go b/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go index bcab85213..7cd123b10 100644 --- a/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go +++ b/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go @@ -2,207 +2,24 @@ package pvc_stress_test import ( - "context" "fmt" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "strings" "testing" - "time" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + Cmn "e2e-basic/common" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + coreV1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/deprecated/scheme" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "reflect" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) -var cfg *rest.Config -var k8sClient client.Client -var kubeInt kubernetes.Interface -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var dynamicClient dynamic.Interface var defTimeoutSecs = "30s" -// Status part of the mayastor volume CRD -type mayastorVolStatus struct { - state string - reason string - node string - /* Not required for now. - nexus struct { - children [ ]map[string]string - deviceUri string - state string - } - replicas []map[string]string - */ -} - -func getMSV(uuid string) *mayastorVolStatus { - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - - msv, err := dynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) - if err != nil { - fmt.Println(err) - return nil - } - - if msv == nil { - return nil - } - - status, found, err := unstructured.NestedFieldCopy(msv.Object, "status") - if err != nil { - fmt.Println(err) - return nil - } - - if !found { - return nil - } - - msVol := mayastorVolStatus{} - v := reflect.ValueOf(status) - if v.Kind() == reflect.Map { - for _, key := range v.MapKeys() { - sKey := key.Interface().(string) - val := v.MapIndex(key) - switch sKey { - case "state": - msVol.state = val.Interface().(string) - break - case "reason": - msVol.reason = val.Interface().(string) - break - case "node": - msVol.node = val.Interface().(string) - break - } - } - } - return &msVol -} - -// Check for a deleted Mayastor Volume, -// the object does not exist if deleted -func isMSVDeleted(uuid string) bool { - msvGVR := schema.GroupVersionResource{ - Group: "openebs.io", - Version: "v1alpha1", - Resource: "mayastorvolumes", - } - - msv, err := dynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) - - if err != nil { - // Unfortunately there is no associated error code so we resort to string comparison - if strings.HasPrefix(err.Error(), "mayastorvolumes.openebs.io") && - strings.HasSuffix(err.Error(), " not found") { - return true - } - } - - Expect(err).To(BeNil()) - Expect(msv).ToNot(BeNil()) - return false -} - -// Check for a deleted Persistent Volume Claim, -// either the object does not exist -// or the status phase is invalid. -func isPVCDeleted(volName string) bool { - pvc, err := kubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volName, metav1.GetOptions{}) - if err != nil { - // Unfortunately there is no associated error code so we resort to string comparison - if strings.HasPrefix(err.Error(), "persistentvolumeclaims") && - strings.HasSuffix(err.Error(), " not found") { - return true - } - } - // After the PVC has been deleted it may still accessible, but status phase will be invalid - Expect(err).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - switch pvc.Status.Phase { - case - corev1.ClaimBound, - corev1.ClaimPending, - corev1.ClaimLost: - return false - default: - return true - } -} - -// Check for a deleted Persistent Volume, -// either the object does not exist -// or the status phase is invalid. -func isPVDeleted(volName string) bool { - pv, err := kubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volName, metav1.GetOptions{}) - if err != nil { - // Unfortunately there is no associated error code so we resort to string comparison - if strings.HasPrefix(err.Error(), "persistentvolumes") && - strings.HasSuffix(err.Error(), " not found") { - return true - } - } - // After the PV has been deleted it may still accessible, but status phase will be invalid - Expect(err).To(BeNil()) - Expect(pv).ToNot(BeNil()) - switch pv.Status.Phase { - case - corev1.VolumeBound, - corev1.VolumeAvailable, - corev1.VolumeFailed, - corev1.VolumePending, - corev1.VolumeReleased: - return false - default: - return true - } -} - -// Retrieve status phase of a Persistent Volume Claim -func getPvcClaimStatusPhase(volname string) (phase corev1.PersistentVolumeClaimPhase) { - pvc, getPvcErr := kubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - return pvc.Status.Phase -} - -// Retrieve status phase of a Persistent Volume -func getPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { - pv, getPvErr := kubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), volname, metav1.GetOptions{}) - Expect(getPvErr).To(BeNil()) - Expect(pv).ToNot(BeNil()) - return pv.Status.Phase -} - -// Retrieve the state of a Mayastor Volume -func getMsvState(uuid string) (state string) { - msv := getMSV(uuid) - Expect(msv).ToNot(BeNil()) - return msv.state -} - // Create a PVC and verify that // 1. The PVC status transitions to bound, // 2. The associated PV is created and its status transitions bound @@ -214,48 +31,47 @@ func getMsvState(uuid string) (state string) { func testPVC(volName string, scName string) { fmt.Printf("%s, %s\n", volName, scName) // PVC create options - createOpts := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ + createOpts := &coreV1.PersistentVolumeClaim{ + ObjectMeta: metaV1.ObjectMeta{ Name: volName, Namespace: "default", }, - Spec: corev1.PersistentVolumeClaimSpec{ + Spec: coreV1.PersistentVolumeClaimSpec{ StorageClassName: &scName, - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("64Mi"), + AccessModes: []coreV1.PersistentVolumeAccessMode{coreV1.ReadWriteOnce}, + Resources: coreV1.ResourceRequirements{ + Requests: coreV1.ResourceList{ + coreV1.ResourceStorage: resource.MustParse("64Mi"), }, }, }, } // Create the PVC. - PVCApi := kubeInt.CoreV1().PersistentVolumeClaims - _, createErr := PVCApi("default").Create(context.TODO(), createOpts, metav1.CreateOptions{}) + _, createErr := Cmn.CreatePVC(createOpts) Expect(createErr).To(BeNil()) // Confirm the PVC has been created. - pvc, getPvcErr := PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + pvc, getPvcErr := Cmn.GetPVC(volName) Expect(getPvcErr).To(BeNil()) Expect(pvc).ToNot(BeNil()) // Wait for the PVC to be bound. - Eventually(func() corev1.PersistentVolumeClaimPhase { - return getPvcClaimStatusPhase(volName) + Eventually(func() coreV1.PersistentVolumeClaimPhase { + return Cmn.GetPvcStatusPhase(volName) }, defTimeoutSecs, // timeout "1s", // polling interval - ).Should(Equal(corev1.ClaimBound)) + ).Should(Equal(coreV1.ClaimBound)) // Refresh the PVC contents, so that we can get the PV name. - pvc, getPvcErr = PVCApi("default").Get(context.TODO(), volName, metav1.GetOptions{}) + pvc, getPvcErr = Cmn.GetPVC(volName) Expect(getPvcErr).To(BeNil()) Expect(pvc).ToNot(BeNil()) // Wait for the PV to be provisioned - Eventually(func() *corev1.PersistentVolume { - pv, getPvErr := kubeInt.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + Eventually(func() *coreV1.PersistentVolume { + pv, getPvErr := Cmn.GetPV(pvc.Spec.VolumeName) if getPvErr != nil { return nil } @@ -267,32 +83,36 @@ func testPVC(volName string, scName string) { ).Should(Not(BeNil())) // Wait for the PV to be bound. - Eventually(func() corev1.PersistentVolumePhase { - return getPvStatusPhase(pvc.Spec.VolumeName) + Eventually(func() coreV1.PersistentVolumePhase { + return Cmn.GetPvStatusPhase(pvc.Spec.VolumeName) }, defTimeoutSecs, // timeout "1s", // polling interval - ).Should(Equal(corev1.VolumeBound)) + ).Should(Equal(coreV1.VolumeBound)) - msv := getMSV(string(pvc.ObjectMeta.UID)) - Expect(msv).ToNot(BeNil()) - Expect(msv.state).Should(Equal("healthy")) + // Wait for the MSV to be provisioned + Eventually(func() *Cmn.MayastorVolStatus { + return Cmn.GetMSV(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, //timeout + "1s", // polling interval + ).Should(Not(BeNil())) // Wait for the MSV to be healthy Eventually(func() string { - return getMsvState(string(pvc.ObjectMeta.UID)) + return Cmn.GetMsvState(string(pvc.ObjectMeta.UID)) }, defTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal("healthy")) // Delete the PVC - deleteErr := PVCApi("default").Delete(context.TODO(), volName, metav1.DeleteOptions{}) + deleteErr := Cmn.DeletePVC(volName) Expect(deleteErr).To(BeNil()) // Wait for the PVC to be deleted. Eventually(func() bool { - return isPVCDeleted(volName) + return Cmn.IsPVCDeleted(volName) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -300,7 +120,7 @@ func testPVC(volName string, scName string) { // Wait for the PV to be deleted. Eventually(func() bool { - return isPVDeleted(pvc.Spec.VolumeName) + return Cmn.IsPVDeleted(pvc.Spec.VolumeName) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -308,7 +128,7 @@ func testPVC(volName string, scName string) { // Wait for the MSV to be deleted. Eventually(func() bool { - return isMSVDeleted(string(pvc.ObjectMeta.UID)) + return Cmn.IsMSVDeleted(string(pvc.ObjectMeta.UID)) }, defTimeoutSecs, // timeout "1s", // polling interval @@ -319,9 +139,6 @@ func stressTestPVC() { for ix := 0; ix < 100; ix++ { testPVC(fmt.Sprintf("stress-pvc-nvmf-%d", ix), "mayastor-nvmf") testPVC(fmt.Sprintf("stress-pvc-iscsi-%d", ix), "mayastor-iscsi") - // FIXME: Without this delay getPvcClaimStatusPhase returns Pending - // even though kubectl shows that the pvc is Bound. - //pause() } } @@ -339,46 +156,7 @@ var _ = Describe("Mayastor PVC Stress test", func() { var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - By("bootstrapping test environment") - useCluster := true - testEnv = &envtest.Environment{ - UseExistingCluster: &useCluster, - AttachControlPlaneOutput: true, - } - - var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) - }() - - mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer mgrSyncCtxCancel() - if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { - fmt.Println("Failed to sync") - } - - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) - - restConfig := config.GetConfigOrDie() - Expect(restConfig).ToNot(BeNil()) - - kubeInt = kubernetes.NewForConfigOrDie(restConfig) - Expect(kubeInt).ToNot(BeNil()) - - dynamicClient = dynamic.NewForConfigOrDie(restConfig) - Expect(dynamicClient).ToNot(BeNil()) - + Cmn.SetupTestEnv() close(done) }, 60) @@ -386,6 +164,5 @@ var _ = AfterSuite(func() { // NB This only tears down the local structures for talking to the cluster, // not the kubernetes cluster itself. By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) + Cmn.TeardownTestEnv() }) diff --git a/mayastor-test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go b/mayastor-test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go new file mode 100644 index 000000000..32da92991 --- /dev/null +++ b/mayastor-test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go @@ -0,0 +1,190 @@ +// JIRA: CAS-500 +package pvc_stress_fio_test + +import ( + "fmt" + "testing" + + Cmn "e2e-basic/common" + + coreV1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var defTimeoutSecs = "30s" + +// Create a PVC and verify that +// 1. The PVC status transitions to bound, +// 2. The associated PV is created and its status transitions bound +// 3. The associated MV is created and has a State "healthy" +// 4. That a test application (fio) can read and write to the volume +// then Delete the PVC and verify that +// 1. The PVC is deleted +// 2. The associated PV is deleted +// 3. The associated MV is deleted +func testPVC(volName string, scName string) { + fmt.Printf("%s, %s\n", volName, scName) + // PVC create options + createOpts := &coreV1.PersistentVolumeClaim{ + ObjectMeta: metaV1.ObjectMeta{ + Name: volName, + Namespace: "default", + }, + Spec: coreV1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + AccessModes: []coreV1.PersistentVolumeAccessMode{coreV1.ReadWriteOnce}, + Resources: coreV1.ResourceRequirements{ + Requests: coreV1.ResourceList{ + coreV1.ResourceStorage: resource.MustParse("64Mi"), + }, + }, + }, + } + + // Create the PVC. + _, createErr := Cmn.CreatePVC(createOpts) + Expect(createErr).To(BeNil()) + + // Confirm the PVC has been created. + pvc, getPvcErr := Cmn.GetPVC(volName) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PVC to be bound. + Eventually(func() coreV1.PersistentVolumeClaimPhase { + return Cmn.GetPvcStatusPhase(volName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(coreV1.ClaimBound)) + + // Refresh the PVC contents, so that we can get the PV name. + pvc, getPvcErr = Cmn.GetPVC(volName) + Expect(getPvcErr).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + // Wait for the PV to be provisioned + Eventually(func() *coreV1.PersistentVolume { + pv, getPvErr := Cmn.GetPV(pvc.Spec.VolumeName) + if getPvErr != nil { + return nil + } + return pv + + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Not(BeNil())) + + // Wait for the PV to be bound. + Eventually(func() coreV1.PersistentVolumePhase { + return Cmn.GetPvStatusPhase(pvc.Spec.VolumeName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(coreV1.VolumeBound)) + + // Wait for the MSV to be provisioned + Eventually(func() *Cmn.MayastorVolStatus { + return Cmn.GetMSV(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, //timeout + "1s", // polling interval + ).Should(Not(BeNil())) + + // Wait for the MSV to be healthy + Eventually(func() string { + return Cmn.GetMsvState(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal("healthy")) + + // Create the fio Pod + fioPodName := "fio-" + volName + pod, err := Cmn.CreateFioPod(fioPodName, volName) + Expect(err).ToNot(HaveOccurred()) + Expect(pod).ToNot(BeNil()) + + // Wait for the fio Pod to transition to running + Eventually(func() bool { + return Cmn.IsPodRunning(fioPodName) + }, + defTimeoutSecs, + "1s", + ).Should(Equal(true)) + + // Run the fio test + Cmn.RunFio(fioPodName, 5) + + // Delete the fio pod + err = Cmn.DeletePod(fioPodName) + Expect(err).ToNot(HaveOccurred()) + + // Delete the PVC + deleteErr := Cmn.DeletePVC(volName) + Expect(deleteErr).To(BeNil()) + + // Wait for the PVC to be deleted. + Eventually(func() bool { + return Cmn.IsPVCDeleted(volName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the PV to be deleted. + Eventually(func() bool { + return Cmn.IsPVDeleted(pvc.Spec.VolumeName) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + // Wait for the MSV to be deleted. + Eventually(func() bool { + return Cmn.IsMSVDeleted(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) +} + +func stressTestPVC() { + for ix := 0; ix < 10; ix++ { + testPVC(fmt.Sprintf("stress-pvc-nvmf-%d", ix), "mayastor-nvmf") + testPVC(fmt.Sprintf("stress-pvc-iscsi-%d", ix), "mayastor-iscsi") + } +} + +func TestPVCStress(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "PVC Stress Test Suite") +} + +var _ = Describe("Mayastor PVC Stress test with fio", func() { + It("should stress test use of PVCs provisioned over iSCSI and NVMe-of", func() { + stressTestPVC() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + Cmn.SetupTestEnv() + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + Cmn.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/nightly/test.sh b/mayastor-test/e2e/nightly/test.sh index 70eeca678..d0489eb8e 100755 --- a/mayastor-test/e2e/nightly/test.sh +++ b/mayastor-test/e2e/nightly/test.sh @@ -1,12 +1,19 @@ #!/usr/bin/env bash -# For stress tests the default go test timeout of 10 minutes may be -# insufficient. -# We start with a timeout value of 0 and bump up the value by addsing -# the number of seconds for each test. -timeout=0 -#pvc_stress run duration is around 7 minutes, add 10 minutes to handle -#unexpected delays. -timeout=$(( timeout + 600 )) +# The default go test timeout of 10 minutes may be insufficient. +# We start with a timeout value of 60 seconds and bump up the value +# adding a number of seconds for each test. +timeout=60 + +#pvc_stress run duration is around 7 minutes for 100 iterations, +# add 8 minutes to handle variations in timing. +timeout=$(( timeout + 480 )) + +#pvc_stress_fio run duration is around 11 minutes for 10 iterations, +# with fio duration set to 5 seconds. +# add 12 minutes to handle variations in timing. +timeout=$(( timeout + 720 )) + +# FIXME: we want to pvc_stress before pvc_stress_fio. go test ./... --timeout "${timeout}s" diff --git a/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go b/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go index 359815f2b..79e077a07 100644 --- a/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go +++ b/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go @@ -16,7 +16,7 @@ var defTimeoutSecs = "90s" func nvmfTest() { fmt.Printf("running fio\n") - common.RunFio() + common.RunFio("fio", 20) } func TestNvmfVol(t *testing.T) { From 40d6a38cea2ca82c7f1bc28d504f1ca7bba1dfb3 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 30 Nov 2020 11:43:42 +0000 Subject: [PATCH 09/85] e2e tests for a disconnected replica in a 2-replica volume --- mayastor-test/e2e/common/io_connect_node.sh | 38 ++++++ mayastor-test/e2e/node_disconnect/README.md | 27 ++++ .../e2e/node_disconnect/deploy/fio_iscsi.yaml | 21 +++ .../e2e/node_disconnect/deploy/fio_nvmf.yaml | 21 +++ .../deploy/moac-deployment-refuge.yaml | 72 ++++++++++ .../deploy/storage-class-2-repl.yaml | 17 +++ .../node_disconnect_iscsi_test.go | 129 ++++++++++++++++++ .../node_disconnect_nvmf_test.go | 129 ++++++++++++++++++ 8 files changed, 454 insertions(+) create mode 100644 mayastor-test/e2e/common/io_connect_node.sh create mode 100644 mayastor-test/e2e/node_disconnect/README.md create mode 100644 mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml create mode 100644 mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml create mode 100644 mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml create mode 100644 mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go diff --git a/mayastor-test/e2e/common/io_connect_node.sh b/mayastor-test/e2e/common/io_connect_node.sh new file mode 100644 index 000000000..d6fcb9c7c --- /dev/null +++ b/mayastor-test/e2e/common/io_connect_node.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +set -e + +# Script to disconnect a node from another node using iptables +# $1 is the node-name to isolate/restore +# $2 is the other node-name +# $3 is "DISCONNECT" or "RECONNECT" +# assumes the nodes use a known fixed set of IP addresses and node names + +# edit the line below, if necessary, or set KUBESPRAY_REPO when calling +KUBESPRAY_REPO="${KUBESPRAY_REPO:-$HOME/work/kubespray}" + +if [ $# -ne 3 ]; + then echo "specify node-name, other node-name and action (DISCONNECT or RECONNECT)" + exit 1 +fi + +if [ "$3" = "DISCONNECT" ]; then + action="I" +elif [ "$3" = "RECONNECT" ]; then + action="D" +else + echo "specify action (DISCONNECT or RECONNECT)" + exit 1 +fi + +cd ${KUBESPRAY_REPO} + +nodename=$1 +other_nodename=$2 +other_node_suffix=${other_nodename: -1} +other_ip=172.18.8.10${other_node_suffix} + +# apply the rule to block/unblock it +vagrant ssh ${nodename} -c "sh -c 'sudo iptables -${action} INPUT -s ${other_ip} -j REJECT'" +vagrant ssh ${nodename} -c "sh -c 'sudo iptables -${action} OUTPUT -s ${other_ip} -j REJECT'" + diff --git a/mayastor-test/e2e/node_disconnect/README.md b/mayastor-test/e2e/node_disconnect/README.md new file mode 100644 index 000000000..a2105a61c --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/README.md @@ -0,0 +1,27 @@ +## Note +The tests in this folder are not currently deployable by the CI system +as the automated install does not provide the pre-requisites below. + +## Pre-requisites for this test + +* A 3-node cluster with nodes k8s-1 k8s-2 and k8s-3 located at + 172.18.8.101-3 respectively +* k8s-1 is the master node, does NOT have the label openebs.io/engine + to avoid having to disconnect the master node. and is labelled + openebs.io/podrefuge=true +* moac is deployed with the following selector to keep it on k8s-1: + +``` + nodeSelector: + openebs.io/podrefuge: "true" +``` + + see deploy/moac-deployment-refuge.yaml` + +* k8s-2 and k8s-3 are labelled openebs.io/engine=mayastor, as usual +* the cluster is deployed using vagrant via bringup_cluster.sh and + KUBESPRAY_REPO is correctly defined in ../common/io_connect_node.sh +* mayastor is installed on the cluster, with mayastor instances on + k8s-2 and k8s-3 only (due to the node labels) +* the storage classes defined in deploy/storage-class-2-repl.yaml + have been applied (replica count of 2). diff --git a/mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml b/mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml new file mode 100644 index 000000000..59b0f92b6 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml @@ -0,0 +1,21 @@ +kind: Pod +apiVersion: v1 +metadata: + name: fio +spec: + volumes: + - name: ms-volume + persistentVolumeClaim: + claimName: loss-test-pvc-iscsi + containers: + - name: fio + image: nixery.dev/shell/fio/tini + command: [ "tini", "--" ] + args: + - sleep + - "1000000" + volumeMounts: + - mountPath: "/volume" + name: ms-volume + nodeSelector: + openebs.io/podrefuge: "true" diff --git a/mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml b/mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml new file mode 100644 index 000000000..69c35a570 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml @@ -0,0 +1,21 @@ +kind: Pod +apiVersion: v1 +metadata: + name: fio +spec: + volumes: + - name: ms-volume + persistentVolumeClaim: + claimName: loss-test-pvc-nvmf + containers: + - name: fio + image: nixery.dev/shell/fio/tini + command: [ "tini", "--" ] + args: + - sleep + - "1000000" + volumeMounts: + - mountPath: "/volume" + name: ms-volume + nodeSelector: + openebs.io/podrefuge: "true" diff --git a/mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml b/mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml new file mode 100644 index 000000000..2aefd59ae --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml @@ -0,0 +1,72 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: moac + namespace: mayastor +spec: + replicas: 1 + selector: + matchLabels: + app: moac + template: + metadata: + labels: + app: moac + spec: + nodeSelector: + openebs.io/podrefuge: "true" + serviceAccount: moac + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.6.0 + args: + - "--v=2" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v2.2.0 + args: + - "--v=2" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: moac + image: 172.18.8.101:30291/mayadata/moac:ci + imagePullPolicy: Always + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--namespace=$(MY_POD_NAMESPACE)" + - "--port=4000" + - "--message-bus=nats" + - "-v" + env: + - name: CSI_ENDPOINT + value: /var/lib/csi/sockets/pluginproxy/csi.sock + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + ports: + - containerPort: 4000 + protocol: TCP + name: "rest-api" + volumes: + - name: socket-dir + emptyDir: diff --git a/mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml b/mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml new file mode 100644 index 000000000..0c827ec72 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml @@ -0,0 +1,17 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: mayastor-iscsi +parameters: + repl: '2' + protocol: 'iscsi' +provisioner: io.openebs.csi-mayastor +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: mayastor-nvmf +parameters: + repl: '2' + protocol: 'nvmf' +provisioner: io.openebs.csi-mayastor diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go new file mode 100644 index 000000000..58bec1531 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go @@ -0,0 +1,129 @@ +package node_disconnect_iscsi_test + +// TODO factor out remaining code duplicated with node_disconnect_nvmf_test + +import ( + "e2e-basic/common" + "fmt" + "os/exec" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + defTimeoutSecs = "90s" + g_nodeToKill = "" + g_nexusNode = "" + g_uuid = "" +) + +func disconnectNode(vmname string, nexusNode string) { + cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "DISCONNECT") + cmd.Dir = "./" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +func reconnectNode(vmname string, nexusNode string, checkError bool) { + cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "RECONNECT") + cmd.Dir = "./" + _, err := cmd.CombinedOutput() + if checkError { + Expect(err).ToNot(HaveOccurred()) + } +} + +func lossTest() { + g_nexusNode = common.GetMsvNode(g_uuid) + fmt.Printf("nexus node is \"%s\"\n", g_nexusNode) + + if g_nexusNode == "k8s-2" { + g_nodeToKill = "k8s-3" + } else if g_nexusNode == "k8s-3" { + g_nodeToKill = "k8s-2" + } else { + fmt.Printf("Unexpected nexus node name\n") + Expect(false) + } + fmt.Printf("node to kill is \"%s\"\n", g_nodeToKill) + + fmt.Printf("running spawned fio\n") + go common.RunFio("fio", 20) + + time.Sleep(5 * time.Second) + fmt.Printf("disconnecting \"%s\"\n", g_nodeToKill) + disconnectNode(g_nodeToKill, g_nexusNode) + disconnectNode(g_nodeToKill, "k8s-1") + + fmt.Printf("waiting 60s for disconnection to affect the nexus\n") + time.Sleep(60 * time.Second) + + fmt.Printf("running fio while node is disconnected\n") + common.RunFio("fio", 20) + + //volumeState = getMsvState(g_uuid) + //fmt.Printf("Volume state is \"%s\"\n", volumeState) ///// FIXME - this reports an incorrect value + + fmt.Printf("reconnecting \"%s\"\n", g_nodeToKill) + reconnectNode(g_nodeToKill, g_nexusNode, true) + reconnectNode(g_nodeToKill, "k8s-1", true) + + fmt.Printf("running fio when node is reconnected\n") + common.RunFio("fio", 20) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss Test Suite") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + common.SetupTestEnv() + + g_uuid = common.MkPVC(fmt.Sprintf("loss-test-pvc-iscsi"), "mayastor-iscsi") + + common.ApplyDeployYaml("../deploy/fio_iscsi.yaml") + + fmt.Printf("waiting for fio\n") + Eventually(func() bool { + return common.FioReadyPod() + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + fmt.Printf("reconnecting %s\n", g_nodeToKill) + reconnectNode(g_nodeToKill, g_nexusNode, false) + reconnectNode(g_nodeToKill, "k8s-1", false) + + fmt.Printf("removing fio pod\n") + common.DeleteDeployYaml("../deploy/fio_iscsi.yaml") + + fmt.Printf("removing pvc\n") + common.RmPVC(fmt.Sprintf("loss-test-pvc-iscsi"), "mayastor-iscsi") + + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go new file mode 100644 index 000000000..5b2d80b05 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go @@ -0,0 +1,129 @@ +package node_disconnect_nvmf_test + +// TODO factor out remaining code duplicated with node_disconnect_iscsi_test + +import ( + "e2e-basic/common" + "fmt" + "os/exec" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + defTimeoutSecs = "90s" + g_nodeToKill = "" + g_nexusNode = "" + g_uuid = "" +) + +func disconnectNode(vmname string, nexusNode string) { + cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "DISCONNECT") + cmd.Dir = "./" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +func reconnectNode(vmname string, nexusNode string, checkError bool) { + cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "RECONNECT") + cmd.Dir = "./" + _, err := cmd.CombinedOutput() + if checkError { + Expect(err).ToNot(HaveOccurred()) + } +} + +func lossTest() { + g_nexusNode = common.GetMsvNode(g_uuid) + fmt.Printf("nexus node is \"%s\"\n", g_nexusNode) + + if g_nexusNode == "k8s-2" { + g_nodeToKill = "k8s-3" + } else if g_nexusNode == "k8s-3" { + g_nodeToKill = "k8s-2" + } else { + fmt.Printf("Unexpected nexus node name\n") + Expect(false) + } + fmt.Printf("node to kill is \"%s\"\n", g_nodeToKill) + + fmt.Printf("running spawned fio\n") + go common.RunFio("fio", 20) + + time.Sleep(5 * time.Second) + fmt.Printf("disconnecting \"%s\"\n", g_nodeToKill) + disconnectNode(g_nodeToKill, g_nexusNode) + disconnectNode(g_nodeToKill, "k8s-1") + + fmt.Printf("waiting 60s for disconnection to affect the nexus\n") + time.Sleep(60 * time.Second) + + fmt.Printf("running fio while node is disconnected\n") + common.RunFio("fio", 20) + + //volumeState = getMsvState(g_uuid) + //fmt.Printf("Volume state is \"%s\"\n", volumeState) ///// FIXME - this reports an incorrect value + + fmt.Printf("reconnecting \"%s\"\n", g_nodeToKill) + reconnectNode(g_nodeToKill, g_nexusNode, true) + reconnectNode(g_nodeToKill, "k8s-1", true) + + fmt.Printf("running fio when node is reconnected\n") + common.RunFio("fio", 20) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss Test Suite") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + common.SetupTestEnv() + + g_uuid = common.MkPVC(fmt.Sprintf("loss-test-pvc-nvmf"), "mayastor-nvmf") + + common.ApplyDeployYaml("../deploy/fio_nvmf.yaml") + + fmt.Printf("waiting for fio\n") + Eventually(func() bool { + return common.FioReadyPod() + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + fmt.Printf("reconnecting %s\n", g_nodeToKill) + reconnectNode(g_nodeToKill, g_nexusNode, false) + reconnectNode(g_nodeToKill, "k8s-1", false) + + fmt.Printf("removing fio pod\n") + common.DeleteDeployYaml("../deploy/fio_nvmf.yaml") + + fmt.Printf("removing pvc\n") + common.RmPVC(fmt.Sprintf("loss-test-pvc-nvmf"), "mayastor-nvmf") + + common.TeardownTestEnv() +}) From 15d8de5685e28b2f9f6bf5bda9028624556882e1 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 3 Dec 2020 15:51:08 +0100 Subject: [PATCH 10/85] Fix for a recent regression to develop branch testing The current code works but in case that develop should not be tested and error() is called in init stage, the final result is ERROR instead of SUCCESS. This fix hasn't been tested but according to stack-overflow it should work ;-) --- Jenkinsfile | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7dfe51601..59da9eeb6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -59,6 +59,10 @@ String cron_schedule = BRANCH_NAME == "develop" ? "0 2 * * *" : "" pipeline { agent none + options { + timeout(time: 2, unit: 'HOURS') + parallelsAlwaysFailFast() + } triggers { cron(cron_schedule) } @@ -66,6 +70,7 @@ pipeline { stages { stage('init') { agent { label 'nixos-mayastor' } + options { skipDefaultCheckout true } steps { // TODO: We want to disable built-in github commit notifications. // skip-notifications-trait plugin in combination with checkout scm step @@ -76,8 +81,9 @@ pipeline { if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && BRANCH_NAME == "develop") { print "INFO: Branch Indexing, aborting job." - currentBuild.result = 'ABORTED' - error('Stopping early') + currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) + sleep(3) // Interrupt is not blocking and does not take effect immediately. + return } } updateGithubCommitStatus(env.GIT_COMMIT, 'Test started', 'pending') From b97f9fcae7053d6b9f9f9e2ed1c6e19f9f5fc17f Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Fri, 4 Dec 2020 15:47:11 +0100 Subject: [PATCH 11/85] The fix for commit status updates. The fix is a combination of changes to jenkinsfile and to the jenkins configuration done through UI. --- Jenkinsfile | 53 ++++++++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 59da9eeb6..5ae70ca80 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,25 +1,5 @@ #!/usr/bin/env groovy -// Update status of a commit in github -def updateGithubCommitStatus(commit, msg, state) { - step([ - $class: 'GitHubCommitStatusSetter', - reposSource: [$class: "ManuallyEnteredRepositorySource", url: "https://github.com/openebs/Mayastor.git"], - commitShaSource: [$class: "ManuallyEnteredShaSource", sha: commit], - errorHandlers: [[$class: "ChangingBuildStatusErrorHandler", result: "UNSTABLE"]], - contextSource: [ - $class: 'ManuallyEnteredCommitContextSource', - context: 'continuous-integration/jenkins/branch' - ], - statusResultSource: [ - $class: 'ConditionalStatusResultSource', - results: [ - [$class: 'AnyBuildResult', message: msg, state: state] - ] - ] - ]) -} - // Searches previous builds to find first non aborted one def getLastNonAbortedBuild(build) { if (build == null) { @@ -70,12 +50,7 @@ pipeline { stages { stage('init') { agent { label 'nixos-mayastor' } - options { skipDefaultCheckout true } steps { - // TODO: We want to disable built-in github commit notifications. - // skip-notifications-trait plugin in combination with checkout scm step - // should do that but not sure how exactly. - checkout scm script { // Will ABORT current job for cases when we don't want to build if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && @@ -86,7 +61,14 @@ pipeline { return } } - updateGithubCommitStatus(env.GIT_COMMIT, 'Test started', 'pending') + step([ + $class: 'GitHubSetCommitStatusBuilder', + contextSource: [ + $class: 'ManuallyEnteredCommitContextSource', + context: 'continuous-integration/jenkins/branch' + ], + statusMessage: [ content: 'Pipeline started' ] + ]) } } stage('linter') { @@ -201,11 +183,20 @@ pipeline { // If no tests were run then we should neither be updating commit // status in github nor send any slack messages if (currentBuild.result != null) { - if (currentBuild.getResult() == 'SUCCESS') { - updateGithubCommitStatus(env.GIT_COMMIT, 'Looks good', 'success') - } else { - updateGithubCommitStatus(env.GIT_COMMIT, 'Test failed', 'failure') - } + step([ + $class: 'GitHubCommitStatusSetter', + errorHandlers: [[$class: "ChangingBuildStatusErrorHandler", result: "UNSTABLE"]], + contextSource: [ + $class: 'ManuallyEnteredCommitContextSource', + context: 'continuous-integration/jenkins/branch' + ], + statusResultSource: [ + $class: 'ConditionalStatusResultSource', + results: [ + [$class: 'AnyBuildResult', message: 'Pipeline result', state: currentBuild.getResult()] + ] + ] + ]) if (env.BRANCH_NAME == 'develop') { notifySlackUponStateChange(currentBuild) } From c7e475f507e12165f6ebdf1d8a800944e9a8e364 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Thu, 3 Dec 2020 08:53:54 +0000 Subject: [PATCH 12/85] Make disconnect e2e tests more independent of node name or IP addresses. More use of common code in disconnect tests. Added drop connection tests. --- mayastor-test/e2e/common/io_connect_node.sh | 38 ------ mayastor-test/e2e/common/util.go | 43 +++++- .../node_disconnect/lib/io_connect_node.sh | 42 ++++++ .../lib/node_disconnect_lib.go | 116 ++++++++++++++++ .../node_disconnect_iscsi_test.go | 129 ------------------ .../node_disconnect_iscsi_drop_test.go | 56 ++++++++ .../node_disconnect_iscsi_reject_test.go | 56 ++++++++ .../node_disconnect_nvmf_test.go | 129 ------------------ .../node_disconnect_nvmf_drop_test.go | 56 ++++++++ .../node_disconnect_nvmf_reject_test.go | 56 ++++++++ mayastor-test/e2e/node_disconnect/test.sh | 13 ++ 11 files changed, 437 insertions(+), 297 deletions(-) delete mode 100644 mayastor-test/e2e/common/io_connect_node.sh create mode 100755 mayastor-test/e2e/node_disconnect/lib/io_connect_node.sh create mode 100644 mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go delete mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go delete mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go create mode 100755 mayastor-test/e2e/node_disconnect/test.sh diff --git a/mayastor-test/e2e/common/io_connect_node.sh b/mayastor-test/e2e/common/io_connect_node.sh deleted file mode 100644 index d6fcb9c7c..000000000 --- a/mayastor-test/e2e/common/io_connect_node.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# Script to disconnect a node from another node using iptables -# $1 is the node-name to isolate/restore -# $2 is the other node-name -# $3 is "DISCONNECT" or "RECONNECT" -# assumes the nodes use a known fixed set of IP addresses and node names - -# edit the line below, if necessary, or set KUBESPRAY_REPO when calling -KUBESPRAY_REPO="${KUBESPRAY_REPO:-$HOME/work/kubespray}" - -if [ $# -ne 3 ]; - then echo "specify node-name, other node-name and action (DISCONNECT or RECONNECT)" - exit 1 -fi - -if [ "$3" = "DISCONNECT" ]; then - action="I" -elif [ "$3" = "RECONNECT" ]; then - action="D" -else - echo "specify action (DISCONNECT or RECONNECT)" - exit 1 -fi - -cd ${KUBESPRAY_REPO} - -nodename=$1 -other_nodename=$2 -other_node_suffix=${other_nodename: -1} -other_ip=172.18.8.10${other_node_suffix} - -# apply the rule to block/unblock it -vagrant ssh ${nodename} -c "sh -c 'sudo iptables -${action} INPUT -s ${other_ip} -j REJECT'" -vagrant ssh ${nodename} -c "sh -c 'sudo iptables -${action} OUTPUT -s ${other_ip} -j REJECT'" - diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go index 26be22824..11e128120 100644 --- a/mayastor-test/e2e/common/util.go +++ b/mayastor-test/e2e/common/util.go @@ -2,12 +2,14 @@ package common import ( "context" + "errors" "fmt" "os/exec" "strings" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -426,6 +428,45 @@ func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { }, }, } - return CreatePod(&podDef) } + +type NodeLocation struct { + NodeName string + IPAddress string + MayastorNode bool +} + +// returns vector of populated NodeLocation structs +func GetNodeLocs() ([]NodeLocation, error) { + nodeList := corev1.NodeList{} + + if gTestEnv.K8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil { + return nil, errors.New("failed to list nodes") + } + NodeLocs := make([]NodeLocation, 0, len(nodeList.Items)) + for _, k8snode := range nodeList.Items { + addrstr := "" + namestr := "" + mayastorNode := false + for label, value := range k8snode.Labels { + if label == "openebs.io/engine" && value == "mayastor" { + mayastorNode = true + } + } + for _, addr := range k8snode.Status.Addresses { + if addr.Type == corev1.NodeInternalIP { + addrstr = addr.Address + } + if addr.Type == corev1.NodeHostName { + namestr = addr.Address + } + } + if namestr != "" && addrstr != "" { + NodeLocs = append(NodeLocs, NodeLocation{NodeName: namestr, IPAddress: addrstr, MayastorNode: mayastorNode}) + } else { + return nil, errors.New("node lacks expected fields") + } + } + return NodeLocs, nil +} diff --git a/mayastor-test/e2e/node_disconnect/lib/io_connect_node.sh b/mayastor-test/e2e/node_disconnect/lib/io_connect_node.sh new file mode 100755 index 000000000..05e26f94e --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/lib/io_connect_node.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -e + +# Script to disconnect a node from another node using iptables +# $1 is the hostname of the node to change +# $2 is the target IP address of the connection to change +# $3 is "DISCONNECT" or "RECONNECT" +# $4 is "DROP" or "REJECT" + +# edit the line below, if necessary, or set KUBESPRAY_REPO when calling +KUBESPRAY_REPO="${KUBESPRAY_REPO:-$HOME/work/kubespray}" + +if [ $# -ne 4 ]; + then echo "specify node-name, target node-ip-address, action (DISCONNECT or RECONNECT), and (DROP or REJECT)" + exit 1 +fi + +if [ "$3" = "DISCONNECT" ]; then + action="I" +elif [ "$3" = "RECONNECT" ]; then + action="D" +else + echo "specify action (DISCONNECT or RECONNECT)" + exit 1 +fi + +if [ "$4" != "DROP" ] && [ "$4" != "REJECT" ]; then + echo "specify DROP or REJECT" + exit 1 +fi + + +cd ${KUBESPRAY_REPO} + +node_name=$1 +other_ip=$2 + +# apply the rule to block/unblock it +vagrant ssh ${node_name} -c "sh -c 'sudo iptables -${action} INPUT -s ${other_ip} -j $4'" +vagrant ssh ${node_name} -c "sh -c 'sudo iptables -${action} OUTPUT -s ${other_ip} -j $4'" + diff --git a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go new file mode 100644 index 000000000..ae7d1f992 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -0,0 +1,116 @@ +package node_disconnect_lib + +import ( + "e2e-basic/common" + "fmt" + "os/exec" + "time" + + . "github.com/onsi/gomega" +) + +var ( + defTimeoutSecs = "90s" +) + +func DisconnectNode(vmname string, otherNodes []string, method string) { + for _, targetIP := range otherNodes { + cmd := exec.Command("bash", "../lib/io_connect_node.sh", vmname, targetIP, "DISCONNECT", method) + cmd.Dir = "./" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + } +} + +func ReconnectNode(vmname string, otherNodes []string, checkError bool, method string) { + for _, targetIP := range otherNodes { + cmd := exec.Command("bash", "../lib/io_connect_node.sh", vmname, targetIP, "RECONNECT", method) + cmd.Dir = "./" + _, err := cmd.CombinedOutput() + if checkError { + Expect(err).ToNot(HaveOccurred()) + } + } +} + +// return the node name to isolate and a vector of IP addresses to isolate +func GetNodes(uuid string) (string, []string) { + nodeList, err := common.GetNodeLocs() + Expect(err).ToNot(HaveOccurred()) + + var nodeToIsolate = "" + nexusNode := common.GetMsvNode(uuid) + fmt.Printf("nexus node is \"%s\"\n", nexusNode) + + var otherAddresses []string + + // find a node which is not the nexus + for _, node := range nodeList { + if node.NodeName != nexusNode && node.MayastorNode == true { + nodeToIsolate = node.NodeName + break + } + } + Expect(nodeToIsolate != "") + + // get a list of the other ip addresses in the cluster + for _, node := range nodeList { + if node.NodeName != nodeToIsolate { + otherAddresses = append(otherAddresses, node.IPAddress) + } + } + Expect(len(otherAddresses) != 0) + + fmt.Printf("node to isolate is \"%s\"\n", nodeToIsolate) + return nodeToIsolate, otherAddresses +} + +func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { + fmt.Printf("running spawned fio\n") + go common.RunFio("fio", 20) + + time.Sleep(5 * time.Second) + fmt.Printf("disconnecting \"%s\"\n", nodeToIsolate) + + DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) + + fmt.Printf("waiting 60s for disconnection to affect the nexus\n") + time.Sleep(60 * time.Second) + + fmt.Printf("running fio while node is disconnected\n") + common.RunFio("fio", 20) + + volumeState := common.GetMsvState(uuid) + fmt.Printf("Volume state is \"%s\"\n", volumeState) + Expect(volumeState == "degraded") + + fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) + ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) + + fmt.Printf("running fio when node is reconnected\n") + common.RunFio("fio", 20) +} + +func Setup(pvc_name string, storage_class_name string, fio_yaml_path string) string { + uuid := common.MkPVC(fmt.Sprintf(pvc_name), storage_class_name) + + common.ApplyDeployYaml(fio_yaml_path) + + fmt.Printf("waiting for fio\n") + Eventually(func() bool { + return common.FioReadyPod() + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + return uuid +} + +func Teardown(pvc_name string, storage_class_name string, fio_yaml_path string) { + + fmt.Printf("removing fio pod\n") + common.DeleteDeployYaml(fio_yaml_path) + + fmt.Printf("removing pvc\n") + common.RmPVC(fmt.Sprintf(pvc_name), storage_class_name) +} diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go deleted file mode 100644 index 58bec1531..000000000 --- a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi/node_disconnect_iscsi_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package node_disconnect_iscsi_test - -// TODO factor out remaining code duplicated with node_disconnect_nvmf_test - -import ( - "e2e-basic/common" - "fmt" - "os/exec" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - defTimeoutSecs = "90s" - g_nodeToKill = "" - g_nexusNode = "" - g_uuid = "" -) - -func disconnectNode(vmname string, nexusNode string) { - cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "DISCONNECT") - cmd.Dir = "./" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - -func reconnectNode(vmname string, nexusNode string, checkError bool) { - cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "RECONNECT") - cmd.Dir = "./" - _, err := cmd.CombinedOutput() - if checkError { - Expect(err).ToNot(HaveOccurred()) - } -} - -func lossTest() { - g_nexusNode = common.GetMsvNode(g_uuid) - fmt.Printf("nexus node is \"%s\"\n", g_nexusNode) - - if g_nexusNode == "k8s-2" { - g_nodeToKill = "k8s-3" - } else if g_nexusNode == "k8s-3" { - g_nodeToKill = "k8s-2" - } else { - fmt.Printf("Unexpected nexus node name\n") - Expect(false) - } - fmt.Printf("node to kill is \"%s\"\n", g_nodeToKill) - - fmt.Printf("running spawned fio\n") - go common.RunFio("fio", 20) - - time.Sleep(5 * time.Second) - fmt.Printf("disconnecting \"%s\"\n", g_nodeToKill) - disconnectNode(g_nodeToKill, g_nexusNode) - disconnectNode(g_nodeToKill, "k8s-1") - - fmt.Printf("waiting 60s for disconnection to affect the nexus\n") - time.Sleep(60 * time.Second) - - fmt.Printf("running fio while node is disconnected\n") - common.RunFio("fio", 20) - - //volumeState = getMsvState(g_uuid) - //fmt.Printf("Volume state is \"%s\"\n", volumeState) ///// FIXME - this reports an incorrect value - - fmt.Printf("reconnecting \"%s\"\n", g_nodeToKill) - reconnectNode(g_nodeToKill, g_nexusNode, true) - reconnectNode(g_nodeToKill, "k8s-1", true) - - fmt.Printf("running fio when node is reconnected\n") - common.RunFio("fio", 20) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss Test Suite") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - common.SetupTestEnv() - - g_uuid = common.MkPVC(fmt.Sprintf("loss-test-pvc-iscsi"), "mayastor-iscsi") - - common.ApplyDeployYaml("../deploy/fio_iscsi.yaml") - - fmt.Printf("waiting for fio\n") - Eventually(func() bool { - return common.FioReadyPod() - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - fmt.Printf("reconnecting %s\n", g_nodeToKill) - reconnectNode(g_nodeToKill, g_nexusNode, false) - reconnectNode(g_nodeToKill, "k8s-1", false) - - fmt.Printf("removing fio pod\n") - common.DeleteDeployYaml("../deploy/fio_iscsi.yaml") - - fmt.Printf("removing pvc\n") - common.RmPVC(fmt.Sprintf("loss-test-pvc-iscsi"), "mayastor-iscsi") - - common.TeardownTestEnv() -}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go new file mode 100644 index 000000000..aaa1bda12 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go @@ -0,0 +1,56 @@ +package node_disconnect_iscsi_drop_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "DROP" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss iSCSI drop") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + fmt.Printf("reconnecting %s\n", g_nodeToIsolate) + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go new file mode 100644 index 000000000..10954d2b3 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go @@ -0,0 +1,56 @@ +package node_disconnect_iscsi_reject_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "REJECT" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss iSCSI reject") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + fmt.Printf("reconnecting %s\n", g_nodeToIsolate) + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go deleted file mode 100644 index 5b2d80b05..000000000 --- a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf/node_disconnect_nvmf_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package node_disconnect_nvmf_test - -// TODO factor out remaining code duplicated with node_disconnect_iscsi_test - -import ( - "e2e-basic/common" - "fmt" - "os/exec" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - defTimeoutSecs = "90s" - g_nodeToKill = "" - g_nexusNode = "" - g_uuid = "" -) - -func disconnectNode(vmname string, nexusNode string) { - cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "DISCONNECT") - cmd.Dir = "./" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - -func reconnectNode(vmname string, nexusNode string, checkError bool) { - cmd := exec.Command("bash", "../../common/io_connect_node.sh", vmname, nexusNode, "RECONNECT") - cmd.Dir = "./" - _, err := cmd.CombinedOutput() - if checkError { - Expect(err).ToNot(HaveOccurred()) - } -} - -func lossTest() { - g_nexusNode = common.GetMsvNode(g_uuid) - fmt.Printf("nexus node is \"%s\"\n", g_nexusNode) - - if g_nexusNode == "k8s-2" { - g_nodeToKill = "k8s-3" - } else if g_nexusNode == "k8s-3" { - g_nodeToKill = "k8s-2" - } else { - fmt.Printf("Unexpected nexus node name\n") - Expect(false) - } - fmt.Printf("node to kill is \"%s\"\n", g_nodeToKill) - - fmt.Printf("running spawned fio\n") - go common.RunFio("fio", 20) - - time.Sleep(5 * time.Second) - fmt.Printf("disconnecting \"%s\"\n", g_nodeToKill) - disconnectNode(g_nodeToKill, g_nexusNode) - disconnectNode(g_nodeToKill, "k8s-1") - - fmt.Printf("waiting 60s for disconnection to affect the nexus\n") - time.Sleep(60 * time.Second) - - fmt.Printf("running fio while node is disconnected\n") - common.RunFio("fio", 20) - - //volumeState = getMsvState(g_uuid) - //fmt.Printf("Volume state is \"%s\"\n", volumeState) ///// FIXME - this reports an incorrect value - - fmt.Printf("reconnecting \"%s\"\n", g_nodeToKill) - reconnectNode(g_nodeToKill, g_nexusNode, true) - reconnectNode(g_nodeToKill, "k8s-1", true) - - fmt.Printf("running fio when node is reconnected\n") - common.RunFio("fio", 20) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss Test Suite") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - common.SetupTestEnv() - - g_uuid = common.MkPVC(fmt.Sprintf("loss-test-pvc-nvmf"), "mayastor-nvmf") - - common.ApplyDeployYaml("../deploy/fio_nvmf.yaml") - - fmt.Printf("waiting for fio\n") - Eventually(func() bool { - return common.FioReadyPod() - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - fmt.Printf("reconnecting %s\n", g_nodeToKill) - reconnectNode(g_nodeToKill, g_nexusNode, false) - reconnectNode(g_nodeToKill, "k8s-1", false) - - fmt.Printf("removing fio pod\n") - common.DeleteDeployYaml("../deploy/fio_nvmf.yaml") - - fmt.Printf("removing pvc\n") - common.RmPVC(fmt.Sprintf("loss-test-pvc-nvmf"), "mayastor-nvmf") - - common.TeardownTestEnv() -}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go new file mode 100644 index 000000000..a586b226c --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go @@ -0,0 +1,56 @@ +package node_disconnect_nvmf_drop_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "DROP" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss NVMF drop") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + fmt.Printf("reconnecting %s\n", g_nodeToIsolate) + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go new file mode 100644 index 000000000..d4e896e78 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go @@ -0,0 +1,56 @@ +package node_disconnect_nvmf_reject_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "REJECT" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss NVMF reject") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + fmt.Printf("reconnecting %s\n", g_nodeToIsolate) + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/test.sh b/mayastor-test/e2e/node_disconnect/test.sh new file mode 100755 index 000000000..eeba06c8c --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/test.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e +timeout=200 + +# TODO run setup test here +(cd node_disconnect_nvmf_reject && go test --timeout "${timeout}s") +(cd node_disconnect_iscsi_reject && go test --timeout "${timeout}s") + +# These tests currently fail +# (cd node_disconnect_nvmf_drop && go test --timeout "${timeout}s") +# (cd node_disconnect_iscsi_drop && go test --timeout "${timeout}s") + From 68408e1ed05b17e409a42a5d6e83b4834b8c800d Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Mon, 7 Dec 2020 09:01:07 +0100 Subject: [PATCH 13/85] Move pipeline abort to global scope again. Now when default github notifications are disabled in jenkins configuration we can abort the job at global level. Another reason is that we don't have to enable execution of unsafe code in the pipeline then. We have already enabled raw job object access but we would need more to get rid of the latest error message: "Scripts not permitted to use method hudson.model.Run getExecutor" The init stage remains there because it avoids duplicating the code for setting pending state on a github commit. --- Jenkinsfile | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 5ae70ca80..b171d26a8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -34,6 +34,14 @@ def notifySlackUponStateChange(build) { } } +// Will ABORT current job for cases when we don't want to build +if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && + BRANCH_NAME == "develop") { + print "INFO: Branch Indexing, aborting job." + currentBuild.result = 'ABORTED' + return +} + // Only schedule regular builds on develop branch, so we don't need to guard against it String cron_schedule = BRANCH_NAME == "develop" ? "0 2 * * *" : "" @@ -51,16 +59,6 @@ pipeline { stage('init') { agent { label 'nixos-mayastor' } steps { - script { - // Will ABORT current job for cases when we don't want to build - if (currentBuild.getBuildCauses('jenkins.branch.BranchIndexingCause') && - BRANCH_NAME == "develop") { - print "INFO: Branch Indexing, aborting job." - currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) - sleep(3) // Interrupt is not blocking and does not take effect immediately. - return - } - } step([ $class: 'GitHubSetCommitStatusBuilder', contextSource: [ From cd6f8bef02c615c26a909494dde5f8e1fe40044e Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 7 Dec 2020 07:23:31 +0000 Subject: [PATCH 14/85] Update and re-enable error retry rust test. --- mayastor/tests/error_count_retry.rs | 234 +++++++++++++--------------- 1 file changed, 108 insertions(+), 126 deletions(-) diff --git a/mayastor/tests/error_count_retry.rs b/mayastor/tests/error_count_retry.rs index fc4eaedf8..1ff035b22 100644 --- a/mayastor/tests/error_count_retry.rs +++ b/mayastor/tests/error_count_retry.rs @@ -1,139 +1,141 @@ -extern crate log; - pub use common::error_bdev::{ create_error_bdev, inject_error, - SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_TYPE_WRITE, VBDEV_IO_FAILURE, }; use mayastor::{ - bdev::{nexus_create, nexus_lookup, ActionType, NexusErrStore, QueryType}, + bdev::{nexus_create, nexus_lookup, NexusStatus}, core::{Bdev, MayastorCliArgs}, subsys::Config, }; -pub mod common; - -static ERROR_COUNT_TEST_NEXUS: &str = "error_count_retry_nexus"; +use common::MayastorTest; +use once_cell::sync::OnceCell; -static DISKNAME1: &str = "/tmp/disk1.img"; +pub mod common; -static ERROR_DEVICE: &str = "error_retry_device"; -static EE_ERROR_DEVICE: &str = "EE_error_retry_device"; // The prefix is added by the vbdev_error module -static BDEV_EE_ERROR_DEVICE: &str = "bdev:///EE_error_retry_device"; +static YAML_CONFIG_FILE: &str = "/tmp/error_retry_test.yaml"; +static MS: OnceCell = OnceCell::new(); -static YAML_CONFIG_FILE: &str = "/tmp/error_count_retry_nexus.yaml"; -#[ignore] -#[tokio::test] -async fn nexus_error_count_retry_test() { - common::truncate_file(DISKNAME1, 64 * 1024); +static NON_ERROR_DISK: &str = "/tmp/non_error.img"; +static ERROR_DISK: &str = "/tmp/error.img"; +static NON_ERROR_BASE_BDEV: &str = "aio:///tmp/non_error.img?blk_size=512"; +fn mayastor() -> &'static MayastorTest<'static> { let mut config = Config::default(); - config.err_store_opts.enable_err_store = true; - config.err_store_opts.action = ActionType::Ignore; - config.err_store_opts.err_store_size = 256; config.err_store_opts.max_io_attempts = 2; - config.write(YAML_CONFIG_FILE).unwrap(); - let ms = common::MayastorTest::new(MayastorCliArgs { - mayastor_config: Some(YAML_CONFIG_FILE.to_string()), - reactor_mask: "0x3".to_string(), - ..Default::default() + + let ms = MS.get_or_init(|| { + MayastorTest::new(MayastorCliArgs { + mayastor_config: Some(YAML_CONFIG_FILE.to_string()), + reactor_mask: "0x3".to_string(), + ..Default::default() + }) }); + &ms +} - // baseline test with no errors injected - ms.spawn(async { - create_error_bdev(ERROR_DEVICE, DISKNAME1); - create_nexus().await; - err_write_nexus(true).await; - err_read_nexus(true).await; - }) - .await; - - ms.spawn(nexus_err_query_and_test( - BDEV_EE_ERROR_DEVICE, - NexusErrStore::READ_FLAG | NexusErrStore::WRITE_FLAG, - 0, - Some(1_000_000_000), - )) - .await; - - // 1 write error injected, 2 attempts allowed, 1 write error should be - // logged and the IO should succeed - ms.spawn(async { - inject_error( - EE_ERROR_DEVICE, - SPDK_BDEV_IO_TYPE_WRITE, - VBDEV_IO_FAILURE, - 1, - ); - err_write_nexus(true).await; - }) - .await; - - ms.spawn(nexus_err_query_and_test( - BDEV_EE_ERROR_DEVICE, - NexusErrStore::WRITE_FLAG, - 1, - Some(1_000_000_000), - )) - .await; - - // 2 errors injected, 2 attempts allowed, 1 read attempt, 2 read errors - // should be logged and the IO should fail - ms.spawn(async { - inject_error( - EE_ERROR_DEVICE, - SPDK_BDEV_IO_TYPE_READ, - VBDEV_IO_FAILURE, - 2, - ); - err_read_nexus(false).await; - }) - .await; - - // IO should now succeed - ms.spawn(async { - err_read_nexus(true).await; - }) - .await; - - common::delete_file(&[DISKNAME1.to_string()]); +#[tokio::test] +async fn nexus_retry_child_write_succeed_test() { + let nexus_name = "error_retry_write_succeed"; + let error_device = "error_device_write_succeed"; + let ee_error_device = format!("EE_{}", error_device); + let bdev_ee_error_device = format!("bdev:///{}", ee_error_device); + + common::truncate_file(ERROR_DISK, 64 * 1024); + common::truncate_file(NON_ERROR_DISK, 64 * 1024); + + mayastor() + .spawn(async move { + create_error_bdev(error_device, ERROR_DISK); + create_nexus( + nexus_name, + &bdev_ee_error_device, + &NON_ERROR_BASE_BDEV, + ) + .await; + + check_nexus_state_is(nexus_name, NexusStatus::Online); + + inject_error( + &ee_error_device, + SPDK_BDEV_IO_TYPE_WRITE, + VBDEV_IO_FAILURE, + 1, + ); + + err_write_nexus(nexus_name, true).await; //should succeed, 2 attempts vs 1 error + check_nexus_state_is(nexus_name, NexusStatus::Degraded); + delete_nexus(nexus_name).await; + }) + .await; + + common::delete_file(&[ERROR_DISK.to_string()]); + common::delete_file(&[NON_ERROR_DISK.to_string()]); common::delete_file(&[YAML_CONFIG_FILE.to_string()]); } -async fn create_nexus() { - let ch = vec![BDEV_EE_ERROR_DEVICE.to_string()]; +#[tokio::test] +async fn nexus_retry_child_write_fail_test() { + let nexus_name = "error_retry_write_fail"; + let error_device = "error_device_write_fail"; + let ee_error_device = format!("EE_{}", error_device); + let bdev_ee_error_device = format!("bdev:///{}", ee_error_device); + + common::truncate_file(ERROR_DISK, 64 * 1024); + common::truncate_file(NON_ERROR_DISK, 64 * 1024); + + mayastor() + .spawn(async move { + create_error_bdev(error_device, ERROR_DISK); + create_nexus( + nexus_name, + &bdev_ee_error_device, + &NON_ERROR_BASE_BDEV, + ) + .await; + check_nexus_state_is(nexus_name, NexusStatus::Online); + + inject_error( + &ee_error_device, + SPDK_BDEV_IO_TYPE_WRITE, + VBDEV_IO_FAILURE, + 2, + ); + + err_write_nexus(nexus_name, false).await; //should fail, 2 attempts vs 2 errors + check_nexus_state_is(nexus_name, NexusStatus::Degraded); + delete_nexus(nexus_name).await; + }) + .await; + + common::delete_file(&[ERROR_DISK.to_string()]); + common::delete_file(&[NON_ERROR_DISK.to_string()]); + common::delete_file(&[YAML_CONFIG_FILE.to_string()]); +} + +fn check_nexus_state_is(name: &str, expected_status: NexusStatus) { + let nexus = nexus_lookup(name).unwrap(); + assert_eq!(nexus.status(), expected_status); +} + +async fn create_nexus(name: &str, err_dev: &str, dev: &str) { + let ch = vec![err_dev.to_string(), dev.to_string()]; - nexus_create(ERROR_COUNT_TEST_NEXUS, 64 * 1024 * 1024, None, &ch) + nexus_create(&name.to_string(), 64 * 1024 * 1024, None, &ch) .await .unwrap(); } -async fn nexus_err_query_and_test( - child_bdev: &str, - io_type_flags: u32, - expected_count: u32, - age_nano: Option, -) { - let nexus = nexus_lookup(ERROR_COUNT_TEST_NEXUS).unwrap(); - let count = nexus - .error_record_query( - child_bdev, - io_type_flags, - NexusErrStore::IO_FAILED_FLAG, - age_nano, - QueryType::Total, - ) - .expect("failed to query child"); - assert!(count.is_some()); // true if the error_store is enabled - assert_eq!(count.unwrap(), expected_count); +async fn delete_nexus(name: &str) { + let n = nexus_lookup(name).unwrap(); + n.destroy().await.unwrap(); } -async fn err_write_nexus(succeed: bool) { - let bdev = Bdev::lookup_by_name(ERROR_COUNT_TEST_NEXUS) - .expect("failed to lookup nexus"); +async fn err_write_nexus(name: &str, succeed: bool) { + let bdev = Bdev::lookup_by_name(name).expect("failed to lookup nexus"); let d = bdev .open(true) .expect("failed open bdev") @@ -150,23 +152,3 @@ async fn err_write_nexus(succeed: bool) { } }; } - -async fn err_read_nexus(succeed: bool) { - let bdev = Bdev::lookup_by_name(ERROR_COUNT_TEST_NEXUS) - .expect("failed to lookup nexus"); - let d = bdev - .open(true) - .expect("failed open bdev") - .into_handle() - .unwrap(); - let mut buf = d.dma_malloc(512).expect("failed to allocate buffer"); - - match d.read_at(0, &mut buf).await { - Ok(_) => { - assert_eq!(succeed, true); - } - Err(_) => { - assert_eq!(succeed, false); - } - }; -} From 6db2625b53ed6842a5eec070d1b25541d4f4eed8 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Fri, 4 Dec 2020 16:04:10 +0000 Subject: [PATCH 15/85] Convert rebuild tests to Docker Compose tests Rebuild test cases in nexus_rebuild.rs have been converted into Docker Compose tests in rebuild.rs. The reason for this change is to make the tests more representative of real world applications. As an example, the new tests share replicas over NVMf. One test (rebuild_lookup) remains in nexus_rebuild.rs. This test cannot be converted to a Compose test as the functionality is not exposed over RPC. However, the test is still useful and hence remains. --- mayastor/tests/nexus_rebuild.rs | 504 +-------------- mayastor/tests/rebuild.rs | 1040 +++++++++++++++++++++++++++++++ 2 files changed, 1042 insertions(+), 502 deletions(-) create mode 100644 mayastor/tests/rebuild.rs diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index b507af714..bd930060e 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -4,11 +4,10 @@ use crossbeam::channel::unbounded; use once_cell::sync::Lazy; use tracing::error; -use common::error_bdev; use mayastor::{ - bdev::{nexus_lookup, ChildState, Reason, VerboseError}, + bdev::nexus_lookup, core::{MayastorCliArgs, MayastorEnvironment, Mthread, Reactor}, - rebuild::{RebuildJob, RebuildState, SEGMENT_SIZE}, + rebuild::RebuildJob, }; use rpc::mayastor::ShareProtocolNexus; @@ -23,7 +22,6 @@ pub fn nexus_name() -> &'static str { } static NEXUS_SIZE: u64 = 5 * 1024 * 1024; // 5MiB -static LARGE_NEXUS_SIZE: u64 = 100 * 1024 * 1024; // 100MiB // approximate on-disk metadata that will be written to the child by the nexus const META_SIZE: u64 = 5 * 1024 * 1024; // 5MiB @@ -39,16 +37,7 @@ fn test_ini(name: &'static str) { common::truncate_file_bytes(&get_disk(i), NEXUS_SIZE + META_SIZE); } } -fn test_ini_large_nexus(name: &'static str) { - *NEXUS_NAME.lock().unwrap() = name; - get_err_bdev().clear(); - test_init!(); - for i in 0 .. MAX_CHILDREN { - common::delete_file(&[get_disk(i)]); - common::truncate_file_bytes(&get_disk(i), LARGE_NEXUS_SIZE + META_SIZE); - } -} fn test_fini() { //mayastor_env_stop(0); for i in 0 .. MAX_CHILDREN { @@ -62,16 +51,6 @@ fn get_err_bdev() -> &'static mut Vec { &mut ERROR_DEVICE_INDEXES } } -fn get_err_dev(index: u64) -> String { - format!("EE_error_device{}", index) -} -fn set_err_dev(index: u64) { - if !get_err_bdev().contains(&index) { - let backing = get_disk(index); - get_err_bdev().push(index); - error_bdev::create_error_bdev(&get_disk(index), &backing); - } -} fn get_disk(number: u64) -> String { if get_err_bdev().contains(&number) { format!("error_device{}", number) @@ -87,183 +66,6 @@ fn get_dev(number: u64) -> String { } } -#[test] -fn rebuild_test_basic() { - test_ini("rebuild_test_basic"); - - Reactor::block_on(async { - nexus_create(NEXUS_SIZE, 1, false).await; - nexus_add_child(1, true).await; - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -// test the rebuild flag of the add_child operation -fn rebuild_test_add() { - test_ini("rebuild_test_add"); - - Reactor::block_on(async { - nexus_create(NEXUS_SIZE, 1, true).await; - let nexus = nexus_lookup(nexus_name()).unwrap(); - - nexus.add_child(&get_dev(1), false).await.unwrap(); - nexus - .start_rebuild(&get_dev(1)) - .await - .expect_err("rebuild expected to be present"); - nexus_test_child(1).await; - - nexus.add_child(&get_dev(2), true).await.unwrap(); - let _ = nexus - .start_rebuild(&get_dev(2)) - .await - .expect("rebuild not expected to be present"); - - nexus.destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_progress() { - test_ini_large_nexus("rebuild_progress"); - - async fn test_progress(polls: u64, progress: u32) -> u32 { - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus.resume_rebuild(&get_dev(1)).await.unwrap(); - // { polls } to poll with an expr rather than an ident - reactor_poll!({ polls }); - nexus.pause_rebuild(&get_dev(1)).await.unwrap(); - common::wait_for_rebuild( - get_dev(1), - RebuildState::Paused, - std::time::Duration::from_millis(1000), - ); - let p = nexus.get_rebuild_progress(&get_dev(1)).unwrap(); - assert!(p.progress >= progress); - p.progress - }; - - Reactor::block_on(async { - nexus_create(LARGE_NEXUS_SIZE, 1, false).await; - nexus_add_child(1, false).await; - // naive check to see if progress is being made - let mut progress = 0; - for _ in 0 .. 10 { - progress = test_progress(50, progress).await; - } - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_child_faulted() { - test_ini("rebuild_child_faulted"); - - Reactor::block_on(async move { - nexus_create(NEXUS_SIZE, 2, false).await; - - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus - .start_rebuild(&get_dev(1)) - .await - .expect_err("Rebuild only degraded children!"); - - nexus.remove_child(&get_dev(1)).await.unwrap(); - assert_eq!(nexus.children.len(), 1); - nexus - .start_rebuild(&get_dev(0)) - .await - .expect_err("Cannot rebuild from the same child"); - - nexus.destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_dst_removal() { - test_ini("rebuild_dst_removal"); - - Reactor::block_on(async move { - let new_child = 2; - nexus_create(NEXUS_SIZE, new_child, false).await; - nexus_add_child(new_child, false).await; - - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus.pause_rebuild(&get_dev(new_child)).await.unwrap(); - nexus.remove_child(&get_dev(new_child)).await.unwrap(); - - nexus.destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_src_removal() { - test_ini("rebuild_src_removal"); - - Reactor::block_on(async move { - let new_child = 2; - assert!(new_child > 1); - nexus_create(NEXUS_SIZE, new_child, true).await; - nexus_add_child(new_child, false).await; - - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus.pause_rebuild(&get_dev(new_child)).await.unwrap(); - nexus.remove_child(&get_dev(0)).await.unwrap(); - - // tests if new_child which had its original rebuild src removed - // ended up being rebuilt successfully - nexus_test_child(new_child).await; - - nexus.destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_with_load() { - test_ini("rebuild_with_load"); - - Reactor::block_on(async { - nexus_create(NEXUS_SIZE, 1, false).await; - let nexus = nexus_lookup(nexus_name()).unwrap(); - let nexus_device = nexus_share().await; - - let (s, r1) = unbounded::(); - Mthread::spawn_unaffinitized(move || { - s.send(common::fio_verify_size(&nexus_device, NEXUS_SIZE * 2)) - }); - let (s, r2) = unbounded::<()>(); - Mthread::spawn_unaffinitized(move || { - std::thread::sleep(std::time::Duration::from_millis(1500)); - s.send(()) - }); - // warm up fio with a single child first - reactor_poll!(r2); - nexus_add_child(1, false).await; - let fio_result: i32; - reactor_poll!(r1, fio_result); - assert_eq!(fio_result, 0, "Failed to run fio_verify_size"); - - nexus_test_child(1).await; - - nexus.destroy().await.unwrap(); - }); - - test_fini(); -} - async fn nexus_create(size: u64, children: u64, fill_random: bool) { let mut ch = Vec::new(); for i in 0 .. children { @@ -305,136 +107,6 @@ async fn nexus_share() -> String { device } -async fn nexus_add_child(new_child: u64, wait: bool) { - let nexus = nexus_lookup(nexus_name()).unwrap(); - - nexus.add_child(&get_dev(new_child), false).await.unwrap(); - - if wait { - common::wait_for_rebuild( - get_dev(new_child), - RebuildState::Completed, - std::time::Duration::from_secs(10), - ); - - nexus_test_child(new_child).await; - } else { - // allows for the rebuild to start running (future run by the reactor) - reactor_poll!(2); - } -} - -async fn nexus_test_child(child: u64) { - common::wait_for_rebuild( - get_dev(child), - RebuildState::Completed, - std::time::Duration::from_secs(10), - ); - - let nexus = nexus_lookup(nexus_name()).unwrap(); - - let (s, r) = unbounded::(); - Mthread::spawn_unaffinitized(move || { - s.send(common::compare_devices( - &get_disk(0), - &get_disk(child), - nexus.size(), - true, - )) - }); - reactor_poll!(r); -} - -#[test] -// test rebuild with different combinations of sizes for src and dst children -fn rebuild_sizes() { - test_ini("rebuild_sizes"); - - let nexus_size = 10 * 1024 * 1024; // 10MiB - let child_size = nexus_size + META_SIZE; - let mut test_cases = vec![ - // size of (first child, second, third) - // first child size is same as the nexus size to set it as the minimum - // otherwise a child bigger than the nexus but smaller than the - // smallest child would not be allowed - (nexus_size, child_size, child_size), - (nexus_size, child_size * 2, child_size), - (nexus_size, child_size, child_size * 2), - (nexus_size, child_size * 2, child_size * 2), - ]; - // now for completeness sake we also test the cases where the actual - // nexus_size will be lower due to the on-disk metadata - let child_size = nexus_size; - test_cases.extend(vec![ - (nexus_size, child_size, child_size), - (nexus_size, child_size * 2, child_size), - (nexus_size, child_size, child_size * 2), - (nexus_size, child_size * 2, child_size * 2), - ]); - - for (test_case_index, test_case) in test_cases.iter().enumerate() { - common::delete_file(&[get_disk(0), get_disk(1), get_disk(1)]); - // first healthy child in the list is used as the rebuild source - common::truncate_file_bytes(&get_disk(0), test_case.1); - common::truncate_file_bytes(&get_disk(1), test_case.0); - common::truncate_file_bytes(&get_disk(2), test_case.2); - - let nexus_size = test_case.0; - Reactor::block_on(async move { - // add an extra child so that the minimum size is set to - // match the nexus size - nexus_create(nexus_size, 2, false).await; - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus.add_child(&get_dev(2), true).await.unwrap(); - // within start_rebuild the size should be validated - let _ = nexus.start_rebuild(&get_dev(2)).await.unwrap_or_else(|e| { - error!( "Case {} - Child should have started to rebuild but got error:\n {:}", - test_case_index, e.verbose()); - panic!( - "Case {} - Child should have started to rebuild but got error:\n {}", - test_case_index, e.verbose() - ) - }); - // sanity check that the rebuild does succeed - common::wait_for_rebuild( - get_dev(2), - RebuildState::Completed, - std::time::Duration::from_secs(20), - ); - - nexus.destroy().await.unwrap(); - }); - } - - test_fini(); -} - -#[test] -// tests the rebuild with multiple size and a non-multiple size of the segment -fn rebuild_segment_sizes() { - test_ini("rebuild_segment_sizes"); - - assert!(SEGMENT_SIZE > 512 && SEGMENT_SIZE < NEXUS_SIZE); - - let test_cases = vec![ - // multiple of SEGMENT_SIZE - SEGMENT_SIZE * 10, - // not multiple of SEGMENT_SIZE - (SEGMENT_SIZE * 10) + 512, - ]; - - for test_case in test_cases.iter() { - let nexus_size = *test_case; - Reactor::block_on(async move { - nexus_create(nexus_size, 1, false).await; - nexus_add_child(1, true).await; - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); - }); - } - - test_fini(); -} - #[test] fn rebuild_lookup() { test_ini("rebuild_lookup"); @@ -504,175 +176,3 @@ fn rebuild_lookup() { test_fini(); } - -#[test] -// todo: decide whether to keep the idempotence on the operations or to -// create a RPC version which achieves the idempotence -fn rebuild_operations() { - test_ini("rebuild_operations"); - - Reactor::block_on(async { - nexus_create(NEXUS_SIZE, 1, false).await; - let nexus = nexus_lookup(nexus_name()).unwrap(); - - nexus - .resume_rebuild(&get_dev(1)) - .await - .expect_err("no rebuild to resume"); - - nexus_add_child(1, false).await; - - nexus - .resume_rebuild(&get_dev(1)) - .await - .expect("already running"); - - nexus.pause_rebuild(&get_dev(1)).await.unwrap(); - reactor_poll!(10); - // already pausing so no problem - nexus.pause_rebuild(&get_dev(1)).await.unwrap(); - reactor_poll!(10); - - let _ = nexus - .start_rebuild(&get_dev(1)) - .await - .expect_err("a rebuild already exists"); - - nexus.stop_rebuild(&get_dev(1)).await.unwrap(); - common::wait_for_rebuild( - get_dev(1), - RebuildState::Stopped, - // already stopping, should be enough - std::time::Duration::from_millis(250), - ); - // already stopped - nexus.stop_rebuild(&get_dev(1)).await.unwrap(); - - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -// rebuilds N children at the same time -// creates the nexus with 1 healthy and then adds N children which -// have to be rebuilt - this means we have N active rebuilds jobs -fn rebuild_multiple() { - test_ini("rebuild_multiple"); - - let active_rebuilds = 4; - Reactor::block_on(async move { - nexus_create(NEXUS_SIZE, 1, false).await; - let nexus = nexus_lookup(nexus_name()).unwrap(); - - for child in 1 ..= active_rebuilds { - nexus_add_child(child, false).await; - nexus.pause_rebuild(&get_dev(child)).await.unwrap(); - } - - assert_eq!(RebuildJob::count(), active_rebuilds as usize); - - for child in 1 ..= active_rebuilds { - nexus.resume_rebuild(&get_dev(child)).await.unwrap(); - common::wait_for_rebuild( - get_dev(child), - RebuildState::Completed, - std::time::Duration::from_secs(20), - ); - nexus.remove_child(&get_dev(child)).await.unwrap(); - } - - // make sure we can recreate the jobs again (as they - // will have the same URI) - - for child in 1 ..= active_rebuilds { - nexus_add_child(child, false).await; - } - - for child in 1 ..= active_rebuilds { - common::wait_for_rebuild( - get_dev(child), - RebuildState::Running, - std::time::Duration::from_millis(100), - ); - nexus.remove_child(&get_dev(child)).await.unwrap(); - } - - nexus.destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_fault_src() { - test_ini("rebuild_fault_src"); - set_err_dev(0); - - Reactor::block_on(async { - nexus_create(NEXUS_SIZE, 1, false).await; - - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus.add_child(&get_dev(1), false).await.unwrap(); - - error_bdev::inject_error( - &get_err_dev(0), - error_bdev::SPDK_BDEV_IO_TYPE_READ, - error_bdev::VBDEV_IO_FAILURE, - 88, - ); - - common::wait_for_rebuild( - get_dev(1), - RebuildState::Failed, - std::time::Duration::from_secs(20), - ); - // allow the nexus futures to run - reactor_poll!(10); - assert_eq!( - nexus.children[1].state(), - ChildState::Faulted(Reason::RebuildFailed) - ); - - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); - }); - - test_fini(); -} - -#[test] -fn rebuild_fault_dst() { - test_ini("rebuild_fault_dst"); - set_err_dev(1); - - Reactor::block_on(async { - nexus_create(NEXUS_SIZE, 1, false).await; - - let nexus = nexus_lookup(nexus_name()).unwrap(); - nexus.add_child(&get_dev(1), false).await.unwrap(); - - error_bdev::inject_error( - &get_err_dev(1), - error_bdev::SPDK_BDEV_IO_TYPE_WRITE, - error_bdev::VBDEV_IO_FAILURE, - 88, - ); - - common::wait_for_rebuild( - get_dev(1), - RebuildState::Failed, - std::time::Duration::from_secs(20), - ); - // allow the nexus futures to run - reactor_poll!(10); - assert_eq!( - nexus.children[1].state(), - ChildState::Faulted(Reason::RebuildFailed) - ); - - nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); - }); - - test_fini(); -} diff --git a/mayastor/tests/rebuild.rs b/mayastor/tests/rebuild.rs new file mode 100644 index 000000000..a467bbfb4 --- /dev/null +++ b/mayastor/tests/rebuild.rs @@ -0,0 +1,1040 @@ +use composer::{Builder, ComposeTest, RpcHandle}; + +use rpc::mayastor::{ + AddChildNexusRequest, + BdevShareRequest, + BdevUri, + Child, + ChildState, + CreateNexusRequest, + CreateReply, + DestroyNexusRequest, + Nexus, + NexusState, + Null, + PauseRebuildRequest, + PublishNexusRequest, + RebuildProgressRequest, + RebuildStateRequest, + RemoveChildNexusRequest, + ResumeRebuildRequest, + ShareProtocolNexus, + StartRebuildRequest, + StopRebuildRequest, +}; + +use std::time::Duration; + +use crossbeam::channel::unbounded; +use spdk_sys::SPDK_BDEV_LARGE_BUF_MAX_SIZE; +use std::convert::TryFrom; + +pub mod common; + +const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; +const NEXUS_SIZE: u64 = 50 * 1024 * 1024; // 50MiB + +/// Test that a child added to a nexus can be successfully rebuild. +#[tokio::test] +async fn rebuild_basic() { + let test = start_infrastructure("rebuild_basic").await; + let (mut ms1, _, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Check a rebuild is started for a newly added child. + add_child(nexus_hdl, child, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "running", + Duration::from_secs(1), + ) + .await + .unwrap()); + + // Check nexus is healthy after rebuild completion. + assert!( + wait_for_rebuild_completion(nexus_hdl, child, Duration::from_secs(20)) + .await + ); + check_nexus_state(nexus_hdl, NexusState::NexusOnline).await; +} + +/// Test the "norebuild" flag when adding a child. +#[tokio::test] +async fn rebuild_add_flag() { + let test = start_infrastructure("rebuild_add_flag").await; + let (mut ms1, _, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Add child but don't rebuild. + add_child(nexus_hdl, child, false).await; + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + + // Start rebuild. + start_rebuild(nexus_hdl, child).await.unwrap(); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "running", + Duration::from_secs(1), + ) + .await + .unwrap()); +} + +/// Test the rebuild progress gets updated. +#[tokio::test] +async fn rebuild_progress() { + let test = start_infrastructure("rebuild_progress").await; + let (mut ms1, _, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Start a rebuild and give it some time to run. + add_child(nexus_hdl, child, true).await; + std::thread::sleep(Duration::from_millis(100)); + + // Pause rebuild and get current progress. + pause_rebuild(nexus_hdl, child).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "paused", + Duration::from_secs(1), + ) + .await + .unwrap()); + let progress1 = get_rebuild_progress(nexus_hdl, child).await; + + // Resume rebuild and give it some time to run. + resume_rebuild(nexus_hdl, child).await.unwrap(); + std::thread::sleep(Duration::from_millis(100)); + + // Pause rebuild and check for further progress. + pause_rebuild(nexus_hdl, child).await; + let progress2 = get_rebuild_progress(nexus_hdl, child).await; + assert!(progress2 > progress1); +} + +/// Test cases where a rebuild should not be started. +#[tokio::test] +async fn rebuild_not_required() { + let test = start_infrastructure("rebuild_not_required").await; + let (mut ms1, ms2, ms3) = setup_test(&test, 2).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Attempt to rebuild a healthy child. + start_rebuild(nexus_hdl, child) + .await + .expect_err("Shouldn't rebuild"); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + + // Remove one of the healthy children. + remove_child(nexus_hdl, child).await; + + // Can't rebuild a single child which is healthy. + let last_child = &get_share_uri(&ms2); + start_rebuild(nexus_hdl, last_child) + .await + .expect_err("Shouldn't rebuild"); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); +} + +/// Test removing the source of a rebuild. +#[tokio::test] +async fn rebuild_src_removal() { + let test = start_infrastructure("rebuild_src_removal").await; + let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Pause rebuild for added child. + add_child(nexus_hdl, child, true).await; + pause_rebuild(nexus_hdl, child).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "paused", + Duration::from_secs(1), + ) + .await + .unwrap()); + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + + // Remove the rebuild source. + let src_child = &get_share_uri(&ms2); + remove_child(nexus_hdl, src_child).await; + // Give a little time for the rebuild to fail. + std::thread::sleep(Duration::from_secs(1)); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + // Nexus must be faulted because it doesn't have any healthy children. + check_nexus_state(nexus_hdl, NexusState::NexusFaulted).await; +} + +/// Test removing the destination of a rebuild. +#[tokio::test] +async fn rebuild_dst_removal() { + let test = start_infrastructure("rebuild_dst_removal").await; + let (mut ms1, _, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Pause rebuild for added child. + add_child(nexus_hdl, child, true).await; + pause_rebuild(nexus_hdl, child).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "paused", + Duration::from_secs(1), + ) + .await + .unwrap()); + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + + // Remove the child that is being rebuilt. + remove_child(nexus_hdl, child).await; + // Give a little time for the rebuild to fail. + std::thread::sleep(Duration::from_secs(1)); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + // Nexus must be online because it has a single healthy child. + check_nexus_state(nexus_hdl, NexusState::NexusOnline).await; +} + +/// Test faulting the source of a rebuild. +#[tokio::test] +async fn rebuild_fault_src() { + let test = start_infrastructure("rebuild_fault_src").await; + let (mut ms1, mut ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Check a rebuild is started for the added child. + add_child(nexus_hdl, child, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "running", + Duration::from_millis(500), + ) + .await + .unwrap()); + + // Fault the rebuild source by unsharing the bdev. + bdev_unshare(&mut ms2).await; + + // The rebuild failed so the destination should be faulted. + assert!( + wait_for_child_state( + nexus_hdl, + child, + ChildState::ChildFaulted, + Duration::from_millis(500), + ) + .await + ); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); +} + +/// Test faulting the destination of a rebuild. +#[tokio::test] +async fn rebuild_fault_dst() { + let test = start_infrastructure("rebuild_fault_dst").await; + let (mut ms1, _, mut ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let child = &get_share_uri(&ms3); + + // Check a rebuild is started for the added child. + add_child(nexus_hdl, child, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child, + "running", + Duration::from_millis(500), + ) + .await + .unwrap()); + + // Fault the rebuild destination by unsharing the bdev. + bdev_unshare(&mut ms3).await; + + // Check the state of the destination child. + // Give a sufficiently high timeout time as unsharing an NVMf bdev can take + // some time to propagate up as an error from the rebuild job. + assert!( + wait_for_child_state( + nexus_hdl, + child, + ChildState::ChildFaulted, + Duration::from_secs(20), + ) + .await + ); + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); +} + +/// Test rebuild with different sizes of source and destination children. +#[tokio::test] +async fn rebuild_sizes() { + struct TestCase { + child1_size: u64, + child2_size: u64, + child3_size: u64, + } + + // Test cases where the child sizes include space for the metadata. + + const META_SIZE_MB: u64 = 5; + let default_size: u64 = 50 + META_SIZE_MB; + + let mut test_cases = vec![]; + // Children with same size. + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size, + child3_size: default_size, + }); + // 2nd child larger + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size * 2, + child3_size: default_size, + }); + // 3rd child larger + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size, + child3_size: default_size * 2, + }); + // 2nd and 3rd child larger + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size * 2, + child3_size: default_size * 2, + }); + + // Test cases where the metadata size is not included. This will result in + // the nexus size being smaller than requested in order to accommodate the + // metadata on the children. + + let default_size: u64 = 50; + + // Children with same size. + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size, + child3_size: default_size, + }); + // 2nd child larger + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size * 2, + child3_size: default_size, + }); + // 3rd child larger + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size, + child3_size: default_size * 2, + }); + // 2nd and 3rd child larger + test_cases.push(TestCase { + child1_size: default_size, + child2_size: default_size * 2, + child3_size: default_size * 2, + }); + + let test = start_infrastructure("rebuild_sizes").await; + let ms1 = &mut test.grpc_handle("ms1").await.unwrap(); + let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); + let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); + let nexus_hdl = ms1; + + // Run the tests. + for test in test_cases { + let child1 = + bdev_create_and_share(ms2, Some(test.child1_size), None).await; + let child2 = + bdev_create_and_share(ms3, Some(test.child2_size), None).await; + let local_child = + format!("malloc:///disk0?size_mb={}", test.child3_size.to_string()); + + // Create a nexus with 2 remote children. + create_nexus(nexus_hdl, vec![child1.clone(), child2.clone()]).await; + + // Add the local child and wait for rebuild. + add_child(nexus_hdl, &local_child, true).await; + assert!( + wait_for_rebuild_completion( + nexus_hdl, + &local_child, + Duration::from_secs(2), + ) + .await + ); + + // Teardown + destroy_nexus(nexus_hdl).await; + bdev_unshare(ms2).await; + bdev_destroy(ms2, "malloc:///disk0".into()).await; + bdev_unshare(ms3).await; + bdev_destroy(ms3, "malloc:///disk0".into()).await; + } +} + +/// Tests the rebuild with different nexus sizes. +#[tokio::test] +async fn rebuild_segment_sizes() { + let test = start_infrastructure("rebuild_segment_sizes").await; + let ms1 = &mut test.grpc_handle("ms1").await.unwrap(); + let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); + let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); + let nexus_hdl = ms1; + + const SEGMENT_SIZE: u64 = SPDK_BDEV_LARGE_BUF_MAX_SIZE as u64; + let test_cases = vec![ + // multiple of SEGMENT_SIZE + SEGMENT_SIZE * 10, + // not multiple of SEGMENT_SIZE + (SEGMENT_SIZE * 10) + 512, + ]; + + // Run the tests. + for test_case in test_cases.iter() { + let child1 = bdev_create_and_share(ms2, None, None).await; + let child2 = bdev_create_and_share(ms3, None, None).await; + + let nexus_size = *test_case; + nexus_hdl + .mayastor + .create_nexus(CreateNexusRequest { + uuid: NEXUS_UUID.into(), + size: nexus_size, + children: vec![child1], + }) + .await + .unwrap(); + + // Wait for rebuild to complete. + add_child(nexus_hdl, &child2, true).await; + assert!( + wait_for_rebuild_completion( + nexus_hdl, + &child2, + Duration::from_secs(5) + ) + .await + ); + + // Teardown + destroy_nexus(nexus_hdl).await; + bdev_unshare(ms2).await; + bdev_destroy(ms2, "malloc:///disk0".into()).await; + bdev_unshare(ms3).await; + bdev_destroy(ms3, "malloc:///disk0".into()).await; + } +} + +/// Test the various rebuild operations. +#[tokio::test] +async fn rebuild_operations() { + let test = start_infrastructure("rebuild_operations").await; + let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + + // Rebuilding a healthy child should do nothing. + let child1 = &get_share_uri(&ms2); + resume_rebuild(nexus_hdl, child1) + .await + .expect_err("Should be nothing to rebuild"); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + + // Start a rebuild. + let child2 = &get_share_uri(&ms3); + add_child(nexus_hdl, child2, true).await; + assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); + + // Resuming a running rebuild should do nothing. + resume_rebuild(nexus_hdl, child2).await.unwrap(); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); + + // Pause a running rebuild. + pause_rebuild(nexus_hdl, child2).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child2, + "paused", + Duration::from_secs(1), + ) + .await + .unwrap()); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); + + // Pause the paused rebuild. + pause_rebuild(nexus_hdl, child2).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + child2, + "paused", + Duration::from_secs(1), + ) + .await + .unwrap()); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); + + // Start another rebuild for the same child. + start_rebuild(nexus_hdl, child2) + .await + .expect_err("Should already be rebuilding child"); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 1); + + // Stop rebuild - this will cause the rebuild job to be removed + stop_rebuild(nexus_hdl, child2).await; + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); +} + +/// Test multiple rebuilds running at the same time. +#[tokio::test] +async fn rebuild_multiple() { + let child_names = vec!["ms1", "ms2", "ms3", "ms4", "ms5"]; + let test = Builder::new() + .name("rebuild_multiple") + .network("10.1.0.0/16") + .add_container(child_names[0]) + .add_container(child_names[1]) + .add_container(child_names[2]) + .add_container(child_names[3]) + .add_container(child_names[4]) + .with_clean(true) + .with_prune(true) + .build() + .await + .unwrap(); + + #[derive(Clone)] + struct Child { + hdl: RpcHandle, + share_uri: String, + } + + let mut children = vec![]; + for name in child_names { + let share_uri = bdev_create_and_share( + &mut test.grpc_handle(name).await.unwrap(), + None, + None, + ) + .await; + children.push(Child { + hdl: test.grpc_handle(name).await.unwrap(), + share_uri, + }); + } + + // Create a nexus with a single healthy child. + let nexus_hdl = &mut test.grpc_handle("ms1").await.unwrap(); + create_nexus(nexus_hdl, vec![children[1].share_uri.clone()]).await; + + let degraded_children = children[2 ..= 4].to_vec(); + // Add children and pause rebuilds. + for child in °raded_children { + add_child(nexus_hdl, &child.share_uri, true).await; + pause_rebuild(nexus_hdl, &child.share_uri).await; + } + assert_eq!( + get_num_rebuilds(nexus_hdl).await as usize, + degraded_children.len() + ); + + // Resume rebuilds and wait for completion then remove the children. + for child in °raded_children { + resume_rebuild(nexus_hdl, &child.share_uri) + .await + .expect("Failed to resume rebuild"); + assert!( + wait_for_rebuild_completion( + nexus_hdl, + &child.share_uri, + Duration::from_secs(10), + ) + .await + ); + remove_child(nexus_hdl, &child.share_uri).await; + } + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + + // Add the children back again + for child in °raded_children { + add_child(nexus_hdl, &child.share_uri, true).await; + } + + // Wait for rebuilds to complete + for child in °raded_children { + assert!( + wait_for_rebuild_completion( + nexus_hdl, + &child.share_uri, + Duration::from_secs(10), + ) + .await + ); + } +} + +/// Test rebuild while running front-end I/O. +/// Note: This test can take some time to complete because it is running fio and +/// then comparing the contents of the children to make sure they are in-sync. +#[tokio::test] +async fn rebuild_with_load() { + init_tracing(); + let test = start_infrastructure("rebuild_with_load").await; + let nexus_hdl = &mut test.grpc_handle("ms1").await.unwrap(); + let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); + let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); + + const CHILD_SIZE_MB: u64 = 100; + + // Create a nexus with 1 child. + let child1 = + bdev_create_and_share(ms2, Some(CHILD_SIZE_MB), Some("disk1".into())) + .await; + create_nexus(nexus_hdl, vec![child1.clone()]).await; + + // Connect to nexus over NVMf. + let nexus_uri = publish_nexus(nexus_hdl).await; + let nexus_tgt = nvmf_connect(nexus_uri.clone()); + + // Run fio against nexus. + let (s, r) = unbounded::(); + let nvmf_tgt = nexus_tgt.clone(); + std::thread::spawn(move || { + if let Err(e) = s.send(common::fio_verify_size(&nvmf_tgt, NEXUS_SIZE)) { + tracing::error!("Failed to send fio complete with error {}", e); + } + }); + + // Let fio run for a bit. + std::thread::sleep(Duration::from_secs(2)); + + // Add a child and rebuild. + let child2 = + bdev_create_and_share(ms3, Some(CHILD_SIZE_MB), Some("disk2".into())) + .await; + add_child(nexus_hdl, &child2, true).await; + + // Wait for fio to complete + let fio_result = r.recv().unwrap(); + assert_eq!(fio_result, 0, "Failed to run fio_verify_size"); + + // Wait for rebuild to complete. + assert!( + wait_for_rebuild_completion(nexus_hdl, &child2, Duration::from_secs(1)) + .await + ); + + // Disconnect and destroy nexus + nvmf_disconnect(nexus_uri); + destroy_nexus(nexus_hdl).await; + + // Check children are in-sync. + let child1_tgt = nvmf_connect(child1.clone()); + let child2_tgt = nvmf_connect(child2.clone()); + common::compare_devices(&child1_tgt, &child2_tgt, CHILD_SIZE_MB, true); + nvmf_disconnect(child1); + nvmf_disconnect(child2); +} + +/// Build the infrastructure required to run the tests. +async fn start_infrastructure(test_name: &str) -> ComposeTest { + Builder::new() + .name(test_name) + .network("10.1.0.0/16") + .add_container("ms1") + .add_container("ms2") + .add_container("ms3") + .with_clean(true) + .with_prune(true) + .build() + .await + .unwrap() +} + +/// Set up the prerequisites for the tests. +/// Create a nexus on ms1 and create NVMf shares from ms2 & ms3. +/// The number of children to be added to the nexus is passed in as a parameter. +async fn setup_test( + test: &ComposeTest, + num_nexus_children: usize, +) -> (RpcHandle, RpcHandle, RpcHandle) { + // Currently only support creating a nexus with up to 2 children. + assert!(num_nexus_children < 3); + + let mut ms1 = test.grpc_handle("ms1").await.unwrap(); + let mut ms2 = test.grpc_handle("ms2").await.unwrap(); + let mut ms3 = test.grpc_handle("ms3").await.unwrap(); + + let mut replicas = vec![]; + replicas.push(bdev_create_and_share(&mut ms2, None, None).await); + replicas.push(bdev_create_and_share(&mut ms3, None, None).await); + create_nexus(&mut ms1, replicas[0 .. num_nexus_children].to_vec()).await; + (ms1, ms2, ms3) +} + +/// Publish the nexus and return the share uri. +async fn publish_nexus(hdl: &mut RpcHandle) -> String { + let reply = hdl + .mayastor + .publish_nexus(PublishNexusRequest { + uuid: NEXUS_UUID.into(), + key: "".to_string(), + share: ShareProtocolNexus::NexusNvmf as i32, + }) + .await + .unwrap() + .into_inner(); + reply.device_uri +} + +/// Create and share a bdev and return the share uri. +async fn bdev_create_and_share( + hdl: &mut RpcHandle, + child_size_mb: Option, + disk_name: Option, +) -> String { + let size_mb = child_size_mb.unwrap_or(100); + let disk_name = match disk_name { + Some(n) => n, + None => "disk0".to_string(), + }; + bdev_create(hdl, size_mb, disk_name.clone()).await; + bdev_share(hdl, disk_name).await +} + +/// Create a bdev and return the uri. +async fn bdev_create( + hdl: &mut RpcHandle, + size_mb: u64, + disk_name: String, +) -> String { + let uri = format!("malloc:///{}?size_mb={}", disk_name, size_mb,); + hdl.bdev + .create(BdevUri { + uri: uri.clone(), + }) + .await + .unwrap(); + uri +} + +/// Destroy a bdev. +async fn bdev_destroy(hdl: &mut RpcHandle, uri: String) { + hdl.bdev + .destroy(BdevUri { + uri, + }) + .await + .expect("Failed to destroy bdev"); +} + +/// Share a bdev and return the share uri. +async fn bdev_share(hdl: &mut RpcHandle, name: String) -> String { + let result = hdl + .bdev + .share(BdevShareRequest { + name, + proto: "nvmf".into(), + }) + .await + .expect("Failed to share bdev") + .into_inner(); + result.uri +} + +/// Get a bdev share uri. +fn get_share_uri(hdl: &RpcHandle) -> String { + format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdl.endpoint.ip() + ) +} + +/// Unshare a bdev. +async fn bdev_unshare(hdl: &mut RpcHandle) { + hdl.bdev + .unshare(CreateReply { + name: "disk0".to_string(), + }) + .await + .unwrap(); +} + +/// Create a nexus. +async fn create_nexus(hdl: &mut RpcHandle, children: Vec) { + hdl.mayastor + .create_nexus(CreateNexusRequest { + uuid: NEXUS_UUID.into(), + size: NEXUS_SIZE, + children, + }) + .await + .unwrap(); +} + +/// Delete a nexus. +async fn destroy_nexus(hdl: &mut RpcHandle) { + hdl.mayastor + .destroy_nexus(DestroyNexusRequest { + uuid: NEXUS_UUID.into(), + }) + .await + .expect("Failed to destroy nexus"); +} + +/// Add a child to the nexus. +async fn add_child(hdl: &mut RpcHandle, child: &str, rebuild: bool) { + hdl.mayastor + .add_child_nexus(AddChildNexusRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + norebuild: !rebuild, + }) + .await + .unwrap(); +} + +/// Remove a child from the nexus. +async fn remove_child(hdl: &mut RpcHandle, child: &str) { + hdl.mayastor + .remove_child_nexus(RemoveChildNexusRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + .expect("Failed to remove child"); +} + +/// Start a rebuild for the given child. +async fn start_rebuild(hdl: &mut RpcHandle, child: &str) -> Result<(), ()> { + match hdl + .mayastor + .start_rebuild(StartRebuildRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + { + Ok(_) => Ok(()), + Err(_) => Err(()), + } +} + +/// Stop a rebuild for the given child. +async fn stop_rebuild(hdl: &mut RpcHandle, child: &str) { + hdl.mayastor + .stop_rebuild(StopRebuildRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + .expect("Failed to stop rebuild"); +} + +/// Pause a rebuild for the given child. +async fn pause_rebuild(hdl: &mut RpcHandle, child: &str) { + hdl.mayastor + .pause_rebuild(PauseRebuildRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + .expect("Failed to pause rebuild"); +} + +/// Resume a rebuild for the given child. +async fn resume_rebuild(hdl: &mut RpcHandle, child: &str) -> Result<(), ()> { + match hdl + .mayastor + .resume_rebuild(ResumeRebuildRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + { + Ok(_) => Ok(()), + Err(_) => Err(()), + } +} + +/// Get the number of rebuilds. +async fn get_num_rebuilds(hdl: &mut RpcHandle) -> u32 { + let n = get_nexus(hdl, NEXUS_UUID).await; + n.rebuilds +} + +/// Get the rebuild progress for the given child. +async fn get_rebuild_progress(hdl: &mut RpcHandle, child: &str) -> u32 { + let reply = hdl + .mayastor + .get_rebuild_progress(RebuildProgressRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + .expect("Failed to get rebuild progress"); + reply.into_inner().progress +} + +/// Waits on the given rebuild state or times out. +/// Returns false if a timeout occurs. +async fn wait_for_rebuild_state( + hdl: &mut RpcHandle, + child: &str, + state: &str, + timeout: Duration, +) -> Option { + let time = std::time::Instant::now(); + while time.elapsed().as_millis() < timeout.as_millis() { + match get_rebuild_state(hdl, child).await { + Some(rebuild_state) => { + if rebuild_state == state { + return Some(true); + } + } + None => return None, + } + std::thread::sleep(Duration::from_millis(10)); + } + Some(false) +} + +/// Get the current state of the rebuild for the given child uri. +/// Returns None if the rebuild job isn't found. +async fn get_rebuild_state(hdl: &mut RpcHandle, child: &str) -> Option { + match hdl + .mayastor + .get_rebuild_state(RebuildStateRequest { + uuid: NEXUS_UUID.into(), + uri: child.into(), + }) + .await + { + Ok(rebuild_state) => Some(rebuild_state.into_inner().state), + Err(_) => None, + } +} + +/// Returns true if the rebuild has completed. +/// A rebuild is deemed to be complete if the destination child is online. +async fn wait_for_rebuild_completion( + hdl: &mut RpcHandle, + child: &str, + timeout: Duration, +) -> bool { + wait_for_child_state(hdl, child, ChildState::ChildOnline, timeout).await +} + +/// Wait on the given child state or times out. +/// Returns false if a timeout occurs. +async fn wait_for_child_state( + hdl: &mut RpcHandle, + child: &str, + state: ChildState, + timeout: Duration, +) -> bool { + let time = std::time::Instant::now(); + while time.elapsed().as_millis() < timeout.as_millis() { + let c = get_child(hdl, NEXUS_UUID, child).await; + if c.state == state as i32 { + return true; + } + std::thread::sleep(Duration::from_millis(10)); + } + false +} + +/// Returns the state of the nexus with the given uuid. +async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> Option { + let list = hdl + .mayastor + .list_nexus(Null {}) + .await + .unwrap() + .into_inner() + .nexus_list; + for nexus in list { + if nexus.uuid == uuid { + return Some(nexus.state); + } + } + None +} + +/// Returns the nexus with the given uuid. +async fn get_nexus(hdl: &mut RpcHandle, uuid: &str) -> Nexus { + let nexus_list = hdl + .mayastor + .list_nexus(Null {}) + .await + .unwrap() + .into_inner() + .nexus_list; + let n = nexus_list + .iter() + .filter(|n| n.uuid == uuid) + .collect::>(); + assert_eq!(n.len(), 1); + n[0].clone() +} + +/// Returns a child with the given URI. +async fn get_child( + hdl: &mut RpcHandle, + nexus_uuid: &str, + child_uri: &str, +) -> Child { + let n = get_nexus(hdl, nexus_uuid).await; + let c = n + .children + .iter() + .filter(|c| c.uri == child_uri) + .collect::>(); + assert_eq!(c.len(), 1); + c[0].clone() +} + +/// Connect to NVMf target and return device name. +fn nvmf_connect(uri: String) -> String { + let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); + let devices = target.connect().unwrap(); + devices[0].path.to_string() +} + +// Disconnect from NVMf target. +fn nvmf_disconnect(uri: String) { + let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); + target.disconnect().unwrap(); +} + +/// Initialise tracing. +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +/// Checks if the nexus state matches the expected state. +async fn check_nexus_state(nexus_hdl: &mut RpcHandle, state: NexusState) { + assert_eq!( + get_nexus_state(nexus_hdl, NEXUS_UUID).await.unwrap(), + state as i32 + ); +} From a9c1adba3787a07113d9761e90590984241ee73e Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 19 Nov 2020 11:37:15 +0000 Subject: [PATCH 16/85] Defer nexus creation until the volume is published. This addresses github issue: Bringing down a node with nexus brings down the whole volume #508 Previously nexus was created when volume was created and it could not be moved between nodes until the volume was destroyed. With these changes nexus is not created until the volume is actually published and it is destroyed when unpublished. Why? * nexus with unused volumes was occasionally causing problems in data plane and unnecessary traffic (keep alive requests) between nexus and replicas. * we could not recover a volume with nexus and app running on the node that was offline because nexus could not move between nodes even if volume was unpublished. * we could not recover nexus of a published volume that came online after being offline for some time. Once the nexus was lost, it was lost forever. So it is a significant improvement over existing situation, though we still cannot recover a volume with crashed nexus that had been published. For that we need to support multipathing in nvmf initiator which will be done at some point in the future. --- csi/moac/.gitignore | 3 + csi/moac/README.md | 119 ++-- csi/moac/crds/mayastorvolume.yaml | 22 +- csi/moac/csi.js | 50 +- csi/moac/event_stream.js | 4 +- csi/moac/index.js | 2 +- csi/moac/nexus.ts | 88 ++- csi/moac/{node.js => node.ts} | 113 ++-- csi/moac/package.json | 2 +- csi/moac/pool.ts | 13 +- csi/moac/registry.js | 7 +- csi/moac/replica.ts | 14 +- csi/moac/test/csi_test.js | 89 +-- csi/moac/test/event_stream_test.js | 10 +- csi/moac/test/nexus_test.js | 8 +- csi/moac/test/node_stub.js | 11 +- csi/moac/test/node_test.js | 28 +- csi/moac/test/pool_test.js | 18 +- csi/moac/test/replica_test.js | 31 +- csi/moac/test/rest_api_test.js | 6 +- csi/moac/test/volume_operator_test.js | 84 ++- csi/moac/test/volume_test.js | 158 ++++- csi/moac/test/volumes_test.js | 599 +++++++++++------- csi/moac/tsconfig.json | 3 + csi/moac/volume.js | 700 -------------------- csi/moac/volume.ts | 877 ++++++++++++++++++++++++++ csi/moac/volume_operator.ts | 89 +-- csi/moac/{volumes.js => volumes.ts} | 149 ++--- csi/moac/watcher.ts | 2 +- 29 files changed, 1879 insertions(+), 1420 deletions(-) rename csi/moac/{node.js => node.ts} (81%) delete mode 100644 csi/moac/volume.js create mode 100644 csi/moac/volume.ts rename csi/moac/{volumes.js => volumes.ts} (57%) diff --git a/csi/moac/.gitignore b/csi/moac/.gitignore index 1ca28f6cc..f9efbe2ac 100644 --- a/csi/moac/.gitignore +++ b/csi/moac/.gitignore @@ -3,9 +3,12 @@ /result /watcher.js /nexus.js +/node.js /node_operator.js /pool.js /pool_operator.js /replica.js +/volume.js +/volumes.js /volume_operator.js /*.js.map diff --git a/csi/moac/README.md b/csi/moac/README.md index d2094949e..730f51d17 100644 --- a/csi/moac/README.md +++ b/csi/moac/README.md @@ -229,58 +229,73 @@ crucial for understanding what and when can happen with the volume. Imperfect approximation of FSA diagram for the volume follows: ```text - new volume - + - | - +----v-----+ - | | - | pending <--+ nexus deleted - | | - +----+-----+ - | -nexus modified+--| new nexus -replica events | +----------+ - v yes | | - nexus offline? +-------------> offline | - + | | - | +----------+ - | no - | +----------+ - v no | | - any replica online? +-------> faulted | - + | | - | +----------+ - | yes - | - v - insufficient # of online yes - and rebuild replicas? +--->create a new - + replica (async) - | + - | | - | no | - | +----v-----+ - v yes | | - any replica in rebuild? +------> degraded | - + | | - | +----------+ - | no - | - v - +----+------+ - | | - | healthy | - | | - +----+------+ - | - v yes - any replica faulty? +--> remove it - + - | no - v - more online replicas yes - than needed? +---> remove the least - preferred replica + new volume + + + | + +----v-----+ + | | + | pending | + | | + +----+-----+ + | + | set up the volume + | + +----v-----+ + | | + | healthy | + | | + +----+-----+ + | + | + yes v no + +--+is volume published?+--+ + | | + | | +----------+ + v v no | | + yes is any replica any replica online? +-------> faulted | +reshare <---+ unreachable from + | | +replica nexus? | +----------+ + + | yes + | no | + yes v v +----------+ +recreate <---+ is nexus missing? insufficient # of sound yes | | create new +nexus + replicas? +-----> degraded +----> replica + | no | | + no v +-----^----+ +share <---+ is nexus exposed? | +nexus + | + | yes | + v | + insufficient # of yes | + sound replicas? +--------------------------------------------+ + + + | no + v +------------+ + volume under yes | | + rebuild? +-----> degraded | + + | | + | no +------------+ + v + +---+-----+ + | | + | healthy | + | | + +---+-----+ + | + v yes + any replica faulty? +--> remove it + + + | no + v + more online replicas yes + than needed? +---> remove the least + + preferred replica + | no + v + should move + volume to yes + different +---> create new replica + node(s)? ``` ## Troubleshooting diff --git a/csi/moac/crds/mayastorvolume.yaml b/csi/moac/crds/mayastorvolume.yaml index a92ab1b8c..280d5e9b2 100644 --- a/csi/moac/crds/mayastorvolume.yaml +++ b/csi/moac/crds/mayastorvolume.yaml @@ -12,7 +12,7 @@ spec: # The status part is updated by the controller and spec part by the user # usually. Well, not in this case. The mayastor's control plane updates both # parts and user is allowed to update some of the properties in the spec - # too. The status part is read-only for the user as it is usually done. + # too. Though status part remains read-only for the user. status: {} schema: openAPIV3Schema: @@ -70,13 +70,18 @@ spec: reason: description: Further explanation of the state if applicable. type: string - node: - description: Name of the k8s node with the nexus. - type: string + targetNodes: + description: k8s node(s) with storage targets for the volume. + type: array + items: + type: string nexus: description: Frontend of the volume. type: object properties: + node: + description: Name of the k8s node with the nexus. + type: string deviceUri: description: URI of a block device for IO. type: string @@ -115,10 +120,11 @@ spec: description: Is replica reachable by control plane. type: boolean additionalPrinterColumns: - - name: Node - type: string - description: Node where the volume is located - jsonPath: .status.node + - name: Targets + type: array + description: k8s node(s) with storage targets for the volume. + jsonPath: .status.targetNodes + items: string - name: Size type: integer format: int64 diff --git a/csi/moac/csi.js b/csi/moac/csi.js index cc3602c00..964a7cb03 100644 --- a/csi/moac/csi.js +++ b/csi/moac/csi.js @@ -77,15 +77,17 @@ function checkCapabilities (caps) { // @param {object} volume Volume object. // @returns {object} K8s CSI volume object. function createK8sVolumeObject (volume) { - return { + const obj = { volumeId: volume.uuid, capacityBytes: volume.getSize(), - accessibleTopology: [ - { - segments: { 'kubernetes.io/hostname': volume.getNodeName() } - } - ] + accessibleTopology: [] }; + if (volume.protocol.toLowerCase() === 'nbd') { + obj.accessibleTopology.push({ + segments: { 'kubernetes.io/hostname': volume.getNodeName() } + }); + } + return obj; } // CSI Controller implementation. @@ -117,9 +119,9 @@ class CsiServer { // Wrap all controller methods by a check for readiness of the csi server // and request/response logging to avoid repeating code. - var self = this; - var controllerMethods = {}; - var methodNames = [ + const self = this; + const controllerMethods = {}; + let methodNames = [ 'createVolume', 'deleteVolume', 'controllerPublishVolume', @@ -197,7 +199,7 @@ class CsiServer { // Stop the grpc server. async stop () { - var self = this; + const self = this; return new Promise((resolve, reject) => { log.info('Shutting down grpc server'); self.server.tryShutdown(resolve); @@ -237,7 +239,7 @@ class CsiServer { } getPluginCapabilities (_, cb) { - var caps = ['CONTROLLER_SERVICE', 'VOLUME_ACCESSIBILITY_CONSTRAINTS']; + const caps = ['CONTROLLER_SERVICE', 'VOLUME_ACCESSIBILITY_CONSTRAINTS']; log.debug('getPluginCapabilities request: ' + caps.join(', ')); cb(null, { capabilities: caps.map((c) => { @@ -256,7 +258,7 @@ class CsiServer { // async controllerGetCapabilities (_, cb) { - var caps = [ + const caps = [ 'CREATE_DELETE_VOLUME', 'PUBLISH_UNPUBLISH_VOLUME', 'LIST_VOLUMES', @@ -271,7 +273,7 @@ class CsiServer { } async createVolume (call, cb) { - var args = call.request; + const args = call.request; log.debug( `Request to create volume "${args.name}" with size ` + @@ -366,7 +368,7 @@ class CsiServer { } // create the volume - var volume; + let volume; try { volume = await this.volumes.createVolume(uuid, { replicaCount: count, @@ -402,7 +404,7 @@ class CsiServer { } async deleteVolume (call, cb) { - var args = call.request; + const args = call.request; log.debug(`Request to destroy volume "${args.volumeId}"`); @@ -416,8 +418,8 @@ class CsiServer { } async listVolumes (call, cb) { - var args = call.request; - var ctx = {}; + const args = call.request; + let ctx = {}; if (args.startingToken) { ctx = this.listContexts[args.startingToken]; @@ -434,7 +436,7 @@ class CsiServer { log.debug('Request to list volumes'); ctx = { volumes: this.volumes - .get() + .list() .map(createK8sVolumeObject) .map((v) => { return { volume: v }; @@ -446,7 +448,7 @@ class CsiServer { args.maxEntries = 1000; } - var entries = ctx.volumes.splice(0, args.maxEntries); + const entries = ctx.volumes.splice(0, args.maxEntries); // TODO: purge list contexts older than .. (1 min) if (ctx.volumes.length > 0) { @@ -462,7 +464,7 @@ class CsiServer { } async controllerPublishVolume (call, cb) { - var args = call.request; + const args = call.request; log.debug( `Request to publish volume "${args.volumeId}" on "${args.nodeId}"` @@ -543,7 +545,7 @@ class CsiServer { } async controllerUnpublishVolume (call, cb) { - var args = call.request; + const args = call.request; log.debug(`Request to unpublish volume "${args.volumeId}"`); @@ -569,7 +571,7 @@ class CsiServer { } async validateVolumeCapabilities (call, cb) { - var args = call.request; + const args = call.request; log.debug(`Request to validate volume capabilities for "${args.volumeId}"`); @@ -600,8 +602,8 @@ class CsiServer { // XXX Is the caller interested in total capacity (sum of all pools) or // a capacity usable by a single volume? async getCapacity (call, cb) { - var nodeName; - var args = call.request; + let nodeName; + const args = call.request; if (args.volumeCapabilities) { try { diff --git a/csi/moac/event_stream.js b/csi/moac/event_stream.js index 6193cebdf..465b84bc9 100644 --- a/csi/moac/event_stream.js +++ b/csi/moac/event_stream.js @@ -83,7 +83,7 @@ class EventStream extends Readable { } // Populate stream with objects which already exist but for consumer // they appear as new. - var self = this; + const self = this; if (self.registry) { self.registry.getNode().forEach((node) => { self.events.push({ @@ -122,7 +122,7 @@ class EventStream extends Readable { }); } if (self.volumes) { - self.volumes.get().forEach((volume) => { + self.volumes.list().forEach((volume) => { self.events.push({ kind: 'volume', eventType: 'new', diff --git a/csi/moac/index.js b/csi/moac/index.js index 58d081fc8..4b794846e 100755 --- a/csi/moac/index.js +++ b/csi/moac/index.js @@ -11,7 +11,7 @@ const logger = require('./logger'); const Registry = require('./registry'); const { NodeOperator } = require('./node_operator'); const { PoolOperator } = require('./pool_operator'); -const Volumes = require('./volumes'); +const { Volumes } = require('./volumes'); const { VolumeOperator } = require('./volume_operator'); const ApiServer = require('./rest_api'); const CsiServer = require('./csi').CsiServer; diff --git a/csi/moac/nexus.ts b/csi/moac/nexus.ts index b5dffde75..58a5fb2be 100644 --- a/csi/moac/nexus.ts +++ b/csi/moac/nexus.ts @@ -7,9 +7,40 @@ const log = require('./logger').Logger('nexus'); import { Replica } from './replica'; -function compareChildren(a: any, b: any) { - assert(a.uri); - assert(b.uri); +// Protocol used to export nexus (volume) +export enum Protocol { + Unknown = 'unknown', + Nbd = 'nbd', + Iscsi = 'iscsi', + Nvmf = 'nvmf', +} + +export function protocolFromString(val: string): Protocol { + if (val == Protocol.Nbd) { + return Protocol.Nbd; + } else if (val == Protocol.Iscsi) { + return Protocol.Iscsi; + } else if (val == Protocol.Nvmf) { + return Protocol.Nvmf; + } else { + return Protocol.Unknown; + } +} + +// Represents a child with uri and state properties. +// TODO: define state as enum. +export class Child { + constructor(public uri: string, public state: string) { + assert(uri); + assert(state); + } + isEqual(ch: Child) { + return (ch.uri === this.uri && ch.state === this.state); + } +} + +// Used with .sort() method to enforce deterministic order of children. +function compareChildren(a: Child, b: Child) { return a.uri.localeCompare(b.uri); } @@ -19,7 +50,7 @@ export class Nexus { size: number; deviceUri: string; state: string; - children: any[]; + children: Child[]; // Construct new nexus object. // @@ -37,7 +68,9 @@ export class Nexus { this.deviceUri = props.deviceUri; this.state = props.state; // children of the nexus (replica URIs and their state) - this.children = [].concat(props.children || []).sort(compareChildren); + this.children = (props.children || []) + .map((ch: any) => new Child(ch.uri, ch.state)) + .sort(compareChildren); } // Stringify the nexus @@ -69,8 +102,21 @@ export class Nexus { this.state = props.state; changed = true; } - const children = [].concat(props.children).sort(compareChildren); - if (!_.isEqual(this.children, children)) { + const children = props.children + .map((ch: any) => new Child(ch.uri, ch.state)) + .sort(compareChildren); + let childrenChanged = false; + if (this.children.length !== children.length) { + childrenChanged = true; + } else { + for (let i = 0; i < this.children.length; i++) { + if (!this.children[i].isEqual(children[i])) { + childrenChanged = true; + break; + } + } + } + if (childrenChanged) { this.children = children; changed = true; } @@ -122,10 +168,10 @@ export class Nexus { } // Publish the nexus to make accessible for IO. - // @params {string} protocol The nexus share protocol. - // @returns {string} The device path of nexus block device. + // @params protocol The nexus share protocol. + // @returns The device path of nexus block device. // - async publish(protocol: string) { + async publish(protocol: Protocol): Promise { var res; if (this.deviceUri) { @@ -181,6 +227,12 @@ export class Nexus { this._emitMod(); } + // Get URI under which the nexus is published or "undefined" if it hasn't been + // published. + getUri(): string | undefined { + return this.deviceUri || undefined; + } + // Add replica to the nexus. // // @param {object} replica Replica object to add to the nexus. @@ -208,7 +260,7 @@ export class Nexus { } // The child will need to be rebuilt when added, but until we get // confirmation back from the nexus, set it as pending - this.children.push(childInfo); + this.children.push(new Child(childInfo.uri, childInfo.state)); this.children.sort(compareChildren); log.info(`Replica uri "${uri}" added to the nexus "${this}"`); this._emitMod(); @@ -248,18 +300,8 @@ export class Nexus { // Destroy nexus on storage node. async destroy() { log.debug(`Destroying nexus "${this}" ...`); - - try { - await this.node.call('destroyNexus', { uuid: this.uuid }); - log.info(`Destroyed nexus "${this}"`); - } catch (err) { - // TODO: make destroyNexus idempotent - if (err.code !== GrpcCode.NOT_FOUND) { - throw err; - } - log.warn(`Destroyed nexus "${this}" does not exist`); - } - + await this.node.call('destroyNexus', { uuid: this.uuid }); + log.info(`Destroyed nexus "${this}"`); this.unbind(); } } \ No newline at end of file diff --git a/csi/moac/node.js b/csi/moac/node.ts similarity index 81% rename from csi/moac/node.js rename to csi/moac/node.ts index 1f2263359..ecc2b6139 100644 --- a/csi/moac/node.js +++ b/csi/moac/node.ts @@ -2,14 +2,13 @@ // replicas). Consumers can use it to receive information about the storage // objects and notifications about the changes. -'use strict'; +import assert from 'assert'; +import { Pool } from './pool'; +import { Nexus } from './nexus'; +import { Replica } from './replica'; -const assert = require('assert'); const EventEmitter = require('events'); const Workq = require('./workq'); -const { Nexus } = require('./nexus'); -const { Pool } = require('./pool'); -const { Replica } = require('./replica'); const log = require('./logger').Logger('node'); const { GrpcClient, GrpcCode, GrpcError } = require('./grpc_client'); @@ -19,7 +18,19 @@ const { GrpcClient, GrpcCode, GrpcError } = require('./grpc_client'); // "node": node related events with payload { eventType: "sync", object: node } // when the node is sync'd after previous sync failure(s). // "pool", "replica", "nexus": with eventType "new", "mod", "del". -class Node extends EventEmitter { +export class Node extends EventEmitter { + name: string; + syncPeriod: number; + syncRetry: number; + syncBadLimit: number; + endpoint: string | null; + client: any; + workq: any; + syncFailed: number; + syncTimer: NodeJS.Timeout | null; + nexus: Nexus[]; + pools: Pool[]; + // Create a storage node object. // // @param {string} name Node name. @@ -27,7 +38,7 @@ class Node extends EventEmitter { // @param {number} opts.syncPeriod How often to sync healthy node (in ms). // @param {number} opts.syncRetry How often to retry sync if it failed (in ms). // @param {number} opts.syncBadLimit Flip the node to offline state after this many retries have failed. - constructor (name, opts) { + constructor (name: string, opts: any) { opts = opts || {}; super(); @@ -51,12 +62,12 @@ class Node extends EventEmitter { } // Stringify node object. - toString () { + toString(): string { return this.name; } // Create grpc connection to the mayastor server - connect (endpoint) { + connect(endpoint: string) { if (this.client) { if (this.endpoint === endpoint) { // nothing changed @@ -66,7 +77,10 @@ class Node extends EventEmitter { `mayastor endpoint on node "${this.name}" changed from "${this.endpoint}" to "${endpoint}"` ); this.client.close(); - clearTimeout(this.syncTimer); + if (this.syncTimer) { + clearTimeout(this.syncTimer); + this.syncTimer = null; + } } } else { log.info(`new mayastor node "${this.name}" with endpoint "${endpoint}"`); @@ -77,18 +91,20 @@ class Node extends EventEmitter { } // Close the grpc connection - disconnect () { + disconnect() { log.info(`mayastor on node "${this.name}" is gone`); assert(this.client); this.client.close(); this.client = null; - clearTimeout(this.syncTimer); - this.syncTimer = null; + if (this.syncTimer) { + clearTimeout(this.syncTimer); + this.syncTimer = null; + } this.syncFailed = this.syncBadLimit + 1; this._offline(); } - unbind () { + unbind() { // todo: on user explicit removal should we destroy the pools as well? this.pools.forEach((pool) => pool.unbind()); this.nexus.forEach((nexus) => nexus.unbind()); @@ -96,7 +112,7 @@ class Node extends EventEmitter { // The node is considered broken, emit offline events on all objects // that are present on the node. - _offline () { + _offline() { this.emit('node', { eventType: 'mod', object: this @@ -108,15 +124,15 @@ class Node extends EventEmitter { // Call grpc method on storage node. The calls are serialized in order // to prevent race conditions and inconsistencies. // - // @param {string} method gRPC method name. - // @param {object} args Arguments for gRPC method. - // @returns {object} A promise that evals to return value of gRPC method. + // @param method gRPC method name. + // @param args Arguments for gRPC method. + // @returns A promise that evals to return value of gRPC method. // - async call (method, args) { + async call(method: string, args: any): Promise { return this.workq.push({ method, args }, this._call.bind(this)); } - async _call (ctx) { + async _call(ctx: any) { if (!this.client) { throw new GrpcError( GrpcCode.INTERNAL, @@ -128,7 +144,7 @@ class Node extends EventEmitter { // Sync triggered by the timer. It ensures that the sync does run in // parallel with any other rpc call or another sync. - async sync () { + async sync() { let nextSync; this.syncTimer = null; @@ -160,7 +176,7 @@ class Node extends EventEmitter { // Synchronize nexus, replicas and pools. Called from work queue so it cannot // interfere with other grpc calls. - async _sync () { + async _sync() { log.debug(`Syncing the node "${this.name}"`); // TODO: Harden checking of outputs of the methods below @@ -208,7 +224,7 @@ class Node extends EventEmitter { // @param {object[]} pools New pools with properties. // @param {object[]} replicas New replicas with properties. // - _mergePoolsAndReplicas (pools, replicas) { + _mergePoolsAndReplicas(pools: any[], replicas: any[]) { // detect modified and new pools pools.forEach((props) => { const poolReplicas = replicas.filter((r) => r.pool === props.name); @@ -239,7 +255,7 @@ class Node extends EventEmitter { // // @param {object[]} nexusList List of nexus obtained from storage node. // - _mergeNexus (nexusList) { + _mergeNexus(nexusList: any[]) { // detect modified and new pools nexusList.forEach((props) => { const nexus = this.nexus.find((n) => n.uuid === props.uuid); @@ -248,7 +264,7 @@ class Node extends EventEmitter { nexus.merge(props); } else { // it is a new nexus - this._registerNexus(new Nexus(props, [])); + this._registerNexus(new Nexus(props)); } }); // remove nexus that no longer exist @@ -263,19 +279,19 @@ class Node extends EventEmitter { // @param {object} pool New pool object. // @param {object[]} [replicas] New replicas on the pool. // - _registerPool (pool, replicas) { + _registerPool(pool: Pool, replicas: any) { assert(!this.pools.find((p) => p.name === pool.name)); this.pools.push(pool); pool.bind(this); replicas = replicas || []; - replicas.forEach((r) => pool.registerReplica(new Replica(r))); + replicas.forEach((r: any) => pool.registerReplica(new Replica(r))); } // Remove the pool from list of pools of this node. // // @param {object} pool The pool to be deregistered from the node. // - unregisterPool (pool) { + unregisterPool(pool: Pool) { const idx = this.pools.indexOf(pool); if (idx >= 0) { this.pools.splice(idx, 1); @@ -290,7 +306,7 @@ class Node extends EventEmitter { // // @param {object} nexus New nexus object. // - _registerNexus (nexus) { + _registerNexus(nexus: Nexus) { assert(!this.nexus.find((p) => p.uuid === nexus.uuid)); this.nexus.push(nexus); nexus.bind(this); @@ -300,7 +316,7 @@ class Node extends EventEmitter { // // @param {object} nexus The nexus to be deregistered from the node. // - unregisterNexus (nexus) { + unregisterNexus(nexus: Nexus) { const idx = this.nexus.indexOf(nexus); if (idx >= 0) { this.nexus.splice(idx, 1); @@ -313,44 +329,45 @@ class Node extends EventEmitter { // Get all replicas across all pools on this node. // - // @returns {object[]} All replicas on this node. - getReplicas () { - return this.pools.reduce((acc, pool) => acc.concat(pool.replicas), []); + // @returns All replicas on this node. + getReplicas(): Replica[] { + return this.pools.reduce( + (acc: Replica[], pool: Pool) => acc.concat(pool.replicas), []); } // Return true if the node is considered healthy which means that its state // is synchronized with the state maintained on behalf of this node object. // - // @returns {boolean} True if the node is healthy, false otherwise. + // @returns True if the node is healthy, false otherwise. // - isSynced () { + isSynced(): boolean { return this.syncFailed <= this.syncBadLimit; } // Create storage pool on this node. // - // @param {string} name Name of the new pool. - // @param {string[]} disks List of disk devices for the pool. - // @returns {object} New pool object. + // @param name Name of the new pool. + // @param disks List of disk devices for the pool. + // @returns New pool object. // - async createPool (name, disks) { + async createPool(name: string, disks: string[]): Promise { log.debug(`Creating pool "${name}@${this.name}" ...`); const poolInfo = await this.call('createPool', { name, disks }); log.info(`Created pool "${name}@${this.name}"`); const newPool = new Pool(poolInfo); - this._registerPool(newPool); + this._registerPool(newPool, []); return newPool; } // Create nexus on this node. // - // @param {string} uuid ID of the new nexus. - // @param {number} size Size of nexus in bytes. - // @param {object[]} replicas Replica objects comprising the nexus. - // @returns {object} New nexus object. - async createNexus (uuid, size, replicas) { + // @param uuid ID of the new nexus. + // @param size Size of nexus in bytes. + // @param replicas Replica objects comprising the nexus. + // @returns New nexus object. + async createNexus(uuid: string, size: number, replicas: Replica[]): Promise { const children = replicas.map((r) => r.uri); log.debug(`Creating nexus "${uuid}@${this.name}"`); @@ -364,12 +381,10 @@ class Node extends EventEmitter { // Get IO statistics for all replicas on the node. // - // @returns {object[]} Array of stats where each object is for a different replica and keys are stats names and values stats values. - async getStats () { + // @returns Array of stats where each object is for a different replica and keys are stats names and values stats values. + async getStats(): Promise { log.debug(`Retrieving volume stats from node "${this}"`); const reply = await this.call('statReplicas', {}); return reply.replicas; } } - -module.exports = Node; diff --git a/csi/moac/package.json b/csi/moac/package.json index c6c9fc6de..b05bf62f6 100644 --- a/csi/moac/package.json +++ b/csi/moac/package.json @@ -15,7 +15,7 @@ "scripts": { "prepare": "./bundle_protos.sh", "clean": "rm -f replica.js pool.js nexus.js", - "purge": "rm -rf node_modules proto replica.js pool.js nexus.js watcher.js node_operator.js pool_operator.js volume_operator.js", + "purge": "rm -rf node_modules proto node.js replica.js pool.js nexus.js watcher.js node_operator.js pool_operator.js volume.js volumes.js volume_operator.js *.js.map", "compile": "tsc --pretty", "start": "./index.js", "test": "mocha test/index.js", diff --git a/csi/moac/pool.ts b/csi/moac/pool.ts index 730c0d1fe..959c5d58b 100644 --- a/csi/moac/pool.ts +++ b/csi/moac/pool.ts @@ -187,17 +187,8 @@ export class Pool { // Destroy the pool and remove it from the list of pools on the node. async destroy() { log.debug(`Destroying pool "${this}" ...`); - - try { - await this.node.call('destroyPool', { name: this.name }); - log.info(`Destroyed pool "${this}"`); - } catch (err) { - // TODO: make destroyPool idempotent - if (err.code !== GrpcCode.NOT_FOUND) { - throw err; - } - log.warn(`Removed pool "${this}" does not exist`); - } + await this.node.call('destroyPool', { name: this.name }); + log.info(`Destroyed pool "${this}"`); this.unbind(); } diff --git a/csi/moac/registry.js b/csi/moac/registry.js index 07854945f..dcacf396f 100644 --- a/csi/moac/registry.js +++ b/csi/moac/registry.js @@ -8,7 +8,7 @@ const assert = require('assert'); const EventEmitter = require('events'); const log = require('./logger').Logger('registry'); -const Node = require('./node'); +const { Node } = require('./node'); // List of events emitted by the registry. // @@ -45,7 +45,7 @@ class Registry extends EventEmitter { // @param {string} name Name of the node. // @param {string} endpoint Endpoint for gRPC communication. addNode (name, endpoint) { - var node = this.nodes[name]; + let node = this.nodes[name]; if (node) { // if grpc endpoint has not changed, then this will not do anything if (node.endpoint !== endpoint) { @@ -81,9 +81,8 @@ class Registry extends EventEmitter { `mayastor on node "${node.name}" and endpoint "${node.endpoint}" just joined` ); - var self = this; eventObjects.forEach((objType) => { - node.on(objType, (ev) => self.emit(objType, ev)); + node.on(objType, (ev) => this.emit(objType, ev)); }); } diff --git a/csi/moac/replica.ts b/csi/moac/replica.ts index 6859a2288..713d28aed 100644 --- a/csi/moac/replica.ts +++ b/csi/moac/replica.ts @@ -136,18 +136,8 @@ export class Replica { if (!this.pool) { throw new Error('Cannot offline a replica that has not been bound'); } - - try { - await this.pool.node.call('destroyReplica', { uuid: this.uuid }); - log.info(`Destroyed replica "${this}"`); - } catch (err) { - // TODO: make destroyReplica idempotent - if (err.code !== GrpcCode.NOT_FOUND) { - throw err; - } - log.warn(`Destroyed replica "${this}" does not exist`); - } - + await this.pool.node.call('destroyReplica', { uuid: this.uuid }); + log.info(`Destroyed replica "${this}"`); this.unbind(); } diff --git a/csi/moac/test/csi_test.js b/csi/moac/test/csi_test.js index 7019e1ccf..27c42a109 100644 --- a/csi/moac/test/csi_test.js +++ b/csi/moac/test/csi_test.js @@ -10,8 +10,8 @@ const sinon = require('sinon'); const { CsiServer, csi } = require('../csi'); const { GrpcError, GrpcCode } = require('../grpc_client'); const Registry = require('../registry'); -const Volume = require('../volume'); -const Volumes = require('../volumes'); +const { Volume } = require('../volume'); +const { Volumes } = require('../volumes'); const { shouldFailWith } = require('./utils'); const SOCKPATH = '/tmp/csi_controller_test.sock'; @@ -28,7 +28,7 @@ function getCsiClient (svc) { module.exports = function () { it('should start even if there is stale socket file', async () => { await fs.writeFile(SOCKPATH, 'blabla'); - var server = new CsiServer(SOCKPATH); + const server = new CsiServer(SOCKPATH); await server.start(); await server.stop(); try { @@ -43,8 +43,8 @@ module.exports = function () { }); describe('identity', function () { - var server; - var client; + let server; + let client; // create csi server and client before(async () => { @@ -95,18 +95,19 @@ module.exports = function () { }); describe('controller', function () { - var client; - var registry, volumes; - var getCapacityStub, createVolumeStub, getVolumesStub, destroyVolumeStub; + let client; + let registry, volumes; + let getCapacityStub, createVolumeStub, listVolumesStub, getVolumesStub, destroyVolumeStub; async function mockedServer (pools, replicas, nexus) { - var server = new CsiServer(SOCKPATH); + const server = new CsiServer(SOCKPATH); await server.start(); registry = new Registry(); volumes = new Volumes(registry); server.makeReady(registry, volumes); getCapacityStub = sinon.stub(registry, 'getCapacity'); createVolumeStub = sinon.stub(volumes, 'createVolume'); + listVolumesStub = sinon.stub(volumes, 'list'); getVolumesStub = sinon.stub(volumes, 'get'); destroyVolumeStub = sinon.stub(volumes, 'destroyVolume'); return server; @@ -125,7 +126,7 @@ module.exports = function () { }); describe('generic', function () { - var server; + let server; afterEach(async () => { if (server) { @@ -192,10 +193,10 @@ module.exports = function () { }); describe('CreateVolume', function () { - var server; + let server; // place-holder for return value from createVolume when we don't care // about the data (i.e. when testing error cases). - var returnedVolume = new Volume(UUID, registry, { + const returnedVolume = new Volume(UUID, registry, () => {}, { replicaCount: 1, preferredNodes: [], requiredNodes: [], @@ -219,7 +220,7 @@ module.exports = function () { it('should create a volume and return parameters in volume context', async () => { createVolumeStub.resolves(returnedVolume); - var parameters = { protocol: 'iscsi', repl: 3, blah: 'again' }; + const parameters = { protocol: 'iscsi', repl: 3, blah: 'again' }; const result = await client.createVolume().sendMessage({ name: 'pvc-' + UUID, capacityRange: { @@ -235,7 +236,7 @@ module.exports = function () { parameters: parameters }); // volume context is a of type map - var expected = {}; + const expected = {}; for (const key in parameters) { expected[key] = parameters[key].toString(); } @@ -247,7 +248,7 @@ module.exports = function () { it('should create a volume that can be accessed only locally', async () => { createVolumeStub.resolves(returnedVolume); - var parameters = { protocol: 'nbd', repl: 3, blah: 'again' }; + const parameters = { protocol: 'nbd', repl: 3, blah: 'again' }; const result = await client.createVolume().sendMessage({ name: 'pvc-' + UUID, capacityRange: { @@ -491,7 +492,7 @@ module.exports = function () { }); describe('DeleteVolume', function () { - var server; + let server; beforeEach(async () => { server = await mockedServer(); @@ -527,16 +528,20 @@ module.exports = function () { }); describe('ListVolumes', function () { - var server; + let server; // uuid except the last two digits - var uuidBase = '4334cc8a-2fed-45ed-866f-3716639db5'; + const uuidBase = '4334cc8a-2fed-45ed-866f-3716639db5'; // Create army of volumes (100) before(async () => { - var vols = []; + const vols = []; for (let i = 0; i < 10; i++) { for (let j = 0; j < 10; j++) { - const vol = new Volume(uuidBase + i + j, registry, {}); + const vol = new Volume(uuidBase + i + j, registry, () => {}, { + replicaCount: 3, + requiredBytes: 100, + protocol: 'nbd' + }); const getSizeStub = sinon.stub(vol, 'getSize'); getSizeStub.returns(100); const getNodeName = sinon.stub(vol, 'getNodeName'); @@ -545,7 +550,7 @@ module.exports = function () { } } server = await mockedServer(); - getVolumesStub.returns(vols); + listVolumesStub.returns(vols); }); after(async () => { @@ -603,7 +608,7 @@ module.exports = function () { }); describe('ControllerPublishVolume', function () { - var server; + let server; before(async () => { server = await mockedServer(); @@ -621,7 +626,7 @@ module.exports = function () { }); it('should publish volume', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const publishStub = sinon.stub(volume, 'publish'); publishStub.resolves('/dev/sdb'); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -671,7 +676,7 @@ module.exports = function () { }); it('should not publish volume over nbd on a different node', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const publishStub = sinon.stub(volume, 'publish'); publishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -699,7 +704,7 @@ module.exports = function () { }); it('should not publish readonly volume', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const publishStub = sinon.stub(volume, 'publish'); publishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -724,7 +729,7 @@ module.exports = function () { }); it('should not publish volume with unsupported capability', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const publishStub = sinon.stub(volume, 'publish'); publishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -749,7 +754,7 @@ module.exports = function () { }); it('should not publish volume on node with invalid ID', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const publishStub = sinon.stub(volume, 'publish'); publishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -774,7 +779,7 @@ module.exports = function () { }); it('should not publish volume if share protocol is not specified', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const publishStub = sinon.stub(volume, 'publish'); publishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -799,7 +804,7 @@ module.exports = function () { }); describe('ControllerUnpublishVolume', function () { - var server; + let server; before(async () => { server = await mockedServer(); @@ -828,7 +833,7 @@ module.exports = function () { }); it('should not unpublish volume on pool with invalid ID', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const unpublishStub = sinon.stub(volume, 'unpublish'); unpublishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -844,7 +849,7 @@ module.exports = function () { }); it('should unpublish volume', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const unpublishStub = sinon.stub(volume, 'unpublish'); unpublishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -862,7 +867,7 @@ module.exports = function () { }); it('should unpublish volume even if on a different node', async () => { - const volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); const unpublishStub = sinon.stub(volume, 'unpublish'); unpublishStub.resolves(); const getNodeNameStub = sinon.stub(volume, 'getNodeName'); @@ -881,7 +886,7 @@ module.exports = function () { }); describe('ValidateVolumeCapabilities', function () { - var server; + let server; before(async () => { server = await mockedServer(); @@ -895,16 +900,16 @@ module.exports = function () { }); it('should report SINGLE_NODE_WRITER cap as valid', async () => { - var volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); getVolumesStub.returns(volume); - var caps = [ + const caps = [ 'SINGLE_NODE_WRITER', 'SINGLE_NODE_READER_ONLY', 'MULTI_NODE_READER_ONLY', 'MULTI_NODE_SINGLE_WRITER', 'MULTI_NODE_MULTI_WRITER' ]; - var resp = await client.validateVolumeCapabilities().sendMessage({ + const resp = await client.validateVolumeCapabilities().sendMessage({ volumeId: UUID, volumeCapabilities: caps.map((c) => { return { @@ -921,15 +926,15 @@ module.exports = function () { }); it('should report other caps than SINGLE_NODE_WRITER as invalid', async () => { - var volume = new Volume(UUID, registry, {}); + const volume = new Volume(UUID, registry, () => {}, {}); getVolumesStub.returns(volume); - var caps = [ + const caps = [ 'SINGLE_NODE_READER_ONLY', 'MULTI_NODE_READER_ONLY', 'MULTI_NODE_SINGLE_WRITER', 'MULTI_NODE_MULTI_WRITER' ]; - var resp = await client.validateVolumeCapabilities().sendMessage({ + const resp = await client.validateVolumeCapabilities().sendMessage({ volumeId: UUID, volumeCapabilities: caps.map((c) => { return { @@ -959,7 +964,7 @@ module.exports = function () { }); describe('GetCapacity', function () { - var server; + let server; before(async () => { server = await mockedServer(); @@ -978,7 +983,7 @@ module.exports = function () { it('should get capacity of a single node with multiple pools', async () => { getCapacityStub.returns(75); - var resp = await client.getCapacity().sendMessage({ + const resp = await client.getCapacity().sendMessage({ accessibleTopology: { segments: { 'kubernetes.io/hostname': 'node1' @@ -992,7 +997,7 @@ module.exports = function () { it('should get capacity of all pools on all nodes', async () => { getCapacityStub.returns(80); - var resp = await client.getCapacity().sendMessage({}); + const resp = await client.getCapacity().sendMessage({}); expect(resp.availableCapacity).to.equal(80); sinon.assert.calledOnce(getCapacityStub); sinon.assert.calledWith(getCapacityStub, undefined); diff --git a/csi/moac/test/event_stream_test.js b/csi/moac/test/event_stream_test.js index c45adeb1a..0cb4a8d89 100644 --- a/csi/moac/test/event_stream_test.js +++ b/csi/moac/test/event_stream_test.js @@ -9,8 +9,8 @@ const { Pool } = require('../pool'); const { Replica } = require('../replica'); const { Nexus } = require('../nexus'); const Registry = require('../registry'); -const Volume = require('../volume'); -const Volumes = require('../volumes'); +const { Volume } = require('../volume'); +const { Volumes } = require('../volumes'); const EventStream = require('../event_stream'); module.exports = function () { @@ -35,7 +35,7 @@ module.exports = function () { const registry = new Registry(); const volumes = new Volumes(registry); const getNodeStub = sinon.stub(registry, 'getNode'); - const getVolumeStub = sinon.stub(volumes, 'get'); + const getVolumeStub = sinon.stub(volumes, 'list'); // The initial state of the nodes. "new" event should be written to the // stream for all these objects and one "sync" event for each node meaning // that the reader has caught up with the initial state. @@ -66,8 +66,8 @@ module.exports = function () { ) ]); getVolumeStub.returns([ - new Volume('volume1', registry, {}), - new Volume('volume2', registry, {}) + new Volume('volume1', registry, () => {}, {}), + new Volume('volume2', registry, () => {}, {}) ]); // set low high water mark to test buffered reads diff --git a/csi/moac/test/nexus_test.js b/csi/moac/test/nexus_test.js index ebd5891b6..a099c6d8c 100644 --- a/csi/moac/test/nexus_test.js +++ b/csi/moac/test/nexus_test.js @@ -5,7 +5,7 @@ const _ = require('lodash'); const expect = require('chai').expect; const sinon = require('sinon'); -const Node = require('../node'); +const { Node } = require('../node'); const { Replica } = require('../replica'); const { Nexus } = require('../nexus'); const { shouldFailWith } = require('./utils'); @@ -14,7 +14,7 @@ const { GrpcCode, GrpcError } = require('../grpc_client'); const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; module.exports = function () { - var props = { + const props = { uuid: UUID, size: 100, deviceUri: '', @@ -66,7 +66,7 @@ module.exports = function () { }); describe('mod event', () => { - var node, eventSpy, nexus, newProps; + let node, eventSpy, nexus, newProps; beforeEach(() => { node = new Node('node'); @@ -162,7 +162,7 @@ module.exports = function () { }); describe('grpc', () => { - var node, nexus, eventSpy, callStub; + let node, nexus, eventSpy, callStub; // Create a sample nexus bound to a node beforeEach((done) => { diff --git a/csi/moac/test/node_stub.js b/csi/moac/test/node_stub.js index ef9d97bb9..01f344e24 100644 --- a/csi/moac/test/node_stub.js +++ b/csi/moac/test/node_stub.js @@ -3,7 +3,7 @@ 'use strict'; -const Node = require('../node'); +const { Node } = require('../node'); // It can be used instead of real node object in tests of components that // depend on the Node. @@ -14,16 +14,15 @@ class NodeStub extends Node { constructor (name, opts, pools, nexus) { super(name, opts); - var self = this; if (pools) { - self.pools = pools.map((p) => { - p.node = self; + this.pools = pools.map((p) => { + p.node = this; return p; }); } if (nexus) { - self.nexus = nexus.map((n) => { - n.node = self; + this.nexus = nexus.map((n) => { + n.node = this; return n; }); } diff --git a/csi/moac/test/node_test.js b/csi/moac/test/node_test.js index d0f0ceedd..778933033 100644 --- a/csi/moac/test/node_test.js +++ b/csi/moac/test/node_test.js @@ -4,7 +4,7 @@ const _ = require('lodash'); const expect = require('chai').expect; -const Node = require('../node'); +const { Node } = require('../node'); const { Nexus } = require('../nexus'); const { Pool } = require('../pool'); const { Replica } = require('../replica'); @@ -15,9 +15,9 @@ const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; const MS_ENDPOINT = '127.0.0.1:12345'; module.exports = function () { - var srv; - var node; - var pools = [ + let srv; + let node; + const pools = [ { name: 'pool', disks: ['aio:///dev/sdb', 'aio:///dev/sdc'], @@ -26,7 +26,7 @@ module.exports = function () { used: 14 } ]; - var replicas = [ + const replicas = [ { uuid: UUID, pool: 'pool', @@ -36,7 +36,7 @@ module.exports = function () { uri: 'bdev:///' + UUID } ]; - var nexus = [ + const nexus = [ { uuid: UUID, size: 10, @@ -262,7 +262,7 @@ module.exports = function () { }); it('should emit event when a pool is deleted', (done) => { - var replicaRemoved = false; + let replicaRemoved = false; node.once('replica', (ev) => { expect(ev.eventType).to.equal('del'); @@ -283,7 +283,7 @@ module.exports = function () { it('should emit event when a pool with replica is created', (done) => { const newUuid = 'f04015e1-3689-4e34-9bed-e2dbba1e4a29'; - var poolAdded = false; + let poolAdded = false; node.once('pool', (ev) => { expect(ev.eventType).to.equal('new'); @@ -498,7 +498,7 @@ module.exports = function () { // pool/replica/nexus event should be emitted before node event and // node should be online when emitting those events. - var poolEvent; + let poolEvent; node.once('pool', (ev) => { expect(node.isSynced()).to.be.true(); poolEvent = ev; @@ -517,9 +517,9 @@ module.exports = function () { }); describe('object create', function () { - var replica; - var pool; - var nexus; + let replica; + let pool; + let nexus; this.timeout(100); @@ -606,7 +606,7 @@ module.exports = function () { // start a fake mayastor server before((done) => { - var pools = [ + const pools = [ { name: 'pool1', disks: ['/dev/sdb', '/dev/sdc'], @@ -622,7 +622,7 @@ module.exports = function () { used: 14 } ]; - var replicas = [ + const replicas = [ { uuid: UUID1, pool: 'pool1', diff --git a/csi/moac/test/pool_test.js b/csi/moac/test/pool_test.js index 22fb81c83..5863aa6a7 100644 --- a/csi/moac/test/pool_test.js +++ b/csi/moac/test/pool_test.js @@ -5,7 +5,7 @@ const _ = require('lodash'); const expect = require('chai').expect; const sinon = require('sinon'); -const Node = require('../node'); +const { Node } = require('../node'); const { Pool } = require('../pool'); const { Replica } = require('../replica'); const { shouldFailWith } = require('./utils'); @@ -21,7 +21,7 @@ module.exports = function () { }; describe('should emit event upon change of volatile property', () => { - var node, eventSpy, pool, newProps; + let node, eventSpy, pool, newProps; beforeEach(() => { node = new Node('node'); @@ -217,20 +217,6 @@ module.exports = function () { }); }); - it('should ignore NOT_FOUND error when destroying the pool', async () => { - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.rejects({ code: 5 }); - const pool = new Pool(props); - node._registerPool(pool); - - await pool.destroy(); - - sinon.assert.calledOnce(stub); - sinon.assert.calledWithMatch(stub, 'destroyPool', { name: 'pool' }); - expect(node.pools).to.be.empty(); - }); - it('should offline the pool with replica', () => { const node = new Node('node'); const eventSpy = sinon.spy(node, 'emit'); diff --git a/csi/moac/test/replica_test.js b/csi/moac/test/replica_test.js index b53a5519c..1ffed8a8b 100644 --- a/csi/moac/test/replica_test.js +++ b/csi/moac/test/replica_test.js @@ -5,7 +5,7 @@ const _ = require('lodash'); const expect = require('chai').expect; const sinon = require('sinon'); -const Node = require('../node'); +const { Node } = require('../node'); const { Pool } = require('../pool'); const { Replica } = require('../replica'); const { shouldFailWith } = require('./utils'); @@ -14,14 +14,14 @@ const { GrpcCode, GrpcError } = require('../grpc_client'); const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; module.exports = function () { - var poolProps = { + const poolProps = { name: 'pool', disks: ['/dev/sda'], state: 'POOL_ONLINE', capacity: 100, used: 4 }; - var props = { + const props = { uuid: UUID, pool: 'pool', size: 100, @@ -30,7 +30,7 @@ module.exports = function () { }; describe('mod event', () => { - var node, eventSpy, replica, pool, newProps; + let node, eventSpy, replica, pool, newProps; beforeEach(() => { node = new Node('node'); @@ -217,27 +217,4 @@ module.exports = function () { expect(replica.pool).to.equal(pool); expect(pool.replicas).to.have.lengthOf(1); }); - - it('should ignore NOT_FOUND error when destroying the replica', (done) => { - const node = new Node('node'); - const stub = sinon.stub(node, 'call'); - stub.rejects(new GrpcError(GrpcCode.NOT_FOUND, 'not found test failure')); - const pool = new Pool(poolProps); - node._registerPool(pool); - const replica = new Replica(props); - pool.registerReplica(replica); - - node.once('replica', (ev) => { - expect(ev.eventType).to.equal('del'); - expect(ev.object).to.equal(replica); - sinon.assert.calledOnce(stub); - sinon.assert.calledWith(stub, 'destroyReplica', { uuid: UUID }); - setTimeout(() => { - expect(replica.pool).to.be.undefined(); - expect(pool.replicas).to.have.lengthOf(0); - done(); - }, 0); - }); - replica.destroy(); - }); }; diff --git a/csi/moac/test/rest_api_test.js b/csi/moac/test/rest_api_test.js index 7d6086510..3893d4d05 100644 --- a/csi/moac/test/rest_api_test.js +++ b/csi/moac/test/rest_api_test.js @@ -6,7 +6,7 @@ const expect = require('chai').expect; const http = require('http'); const sinon = require('sinon'); const Registry = require('../registry'); -const Node = require('../node'); +const { Node } = require('../node'); const { GrpcError, GrpcCode } = require('../grpc_client'); const ApiServer = require('../rest_api'); @@ -17,8 +17,8 @@ const UUID2 = '02de3df9-ce18-4164-89e1-b1cbf7a88e52'; const UUID3 = '02de3df9-ce18-4164-89e1-b1cbf7a88e53'; module.exports = function () { - var apiServer; - var call1, call2, call3, call4; + let apiServer; + let call1, call2, call3, call4; before(() => { const node1 = new Node('node1'); diff --git a/csi/moac/test/volume_operator_test.js b/csi/moac/test/volume_operator_test.js index 9bdbb0f43..9947f3bba 100644 --- a/csi/moac/test/volume_operator_test.js +++ b/csi/moac/test/volume_operator_test.js @@ -8,8 +8,8 @@ const sinon = require('sinon'); const sleep = require('sleep-promise'); const { KubeConfig } = require('client-node-fixed-watcher'); const Registry = require('../registry'); -const Volume = require('../volume'); -const Volumes = require('../volumes'); +const { Volume } = require('../volume'); +const { Volumes } = require('../volumes'); const { VolumeOperator, VolumeResource } = require('../volume_operator'); const { GrpcError, GrpcCode } = require('../grpc_client'); const { mockCache } = require('./watcher_stub'); @@ -57,11 +57,12 @@ const defaultSpec = { const defaultStatus = { size: 110, - node: 'node2', + targetNodes: ['node2'], state: 'healthy', nexus: { deviceUri: 'file:///dev/nbd0', state: 'NEXUS_ONLINE', + node: 'node2', children: [ { uri: 'bdev:///' + UUID, @@ -130,6 +131,7 @@ module.exports = function () { nexus: { deviceUri: 'file:///dev/nbd0', state: 'NEXUS_ONLINE', + node: 'node2', children: [ { uri: 'bdev:///' + UUID, @@ -157,10 +159,10 @@ module.exports = function () { expect(res.spec.requiredBytes).to.equal(100); expect(res.spec.limitBytes).to.equal(120); expect(res.status.size).to.equal(110); - expect(res.status.node).to.equal('node2'); expect(res.status.state).to.equal('healthy'); expect(res.status.nexus.deviceUri).to.equal('file:///dev/nbd0'); expect(res.status.nexus.state).to.equal('NEXUS_ONLINE'); + expect(res.status.nexus.node).to.equal('node2'); expect(res.status.nexus.children).to.have.length(1); expect(res.status.nexus.children[0].uri).to.equal('bdev:///' + UUID); expect(res.status.nexus.children[0].state).to.equal('CHILD_ONLINE'); @@ -180,14 +182,14 @@ module.exports = function () { }, { size: 100, - node: 'node2', + targetNodes: ['node2'], state: 'online' // "online" is not a valid volume state } ); expect(res.metadata.name).to.equal(UUID); expect(res.spec.replicaCount).to.equal(1); expect(res.status.size).to.equal(100); - expect(res.status.node).to.equal('node2'); + expect(res.status.targetNodes).to.deep.equal(['node2']); expect(res.status.state).to.equal('unknown'); }); @@ -203,7 +205,7 @@ module.exports = function () { }, { size: 110, - node: 'node2', + targetNodes: ['node2'], state: 'healthy', replicas: [] } @@ -219,7 +221,7 @@ module.exports = function () { expect(res.spec.requiredBytes).to.equal(100); expect(res.spec.limitBytes).to.equal(120); expect(res.status.size).to.equal(110); - expect(res.status.node).to.equal('node2'); + expect(res.status.targetNodes).to.deep.equal(['node2']); expect(res.status.state).to.equal('healthy'); expect(res.status.nexus).is.undefined(); expect(res.status.replicas).to.have.lengthOf(0); @@ -335,7 +337,7 @@ module.exports = function () { const volumes = new Volumes(registry); const importVolumeStub = sinon.stub(volumes, 'importVolume'); // return value is not used so just return something - importVolumeStub.resolves({ uuid: UUID }); + importVolumeStub.returns({ uuid: UUID }); const volumeResource = createVolumeResource(UUID, defaultSpec, defaultStatus); oper = await createVolumeOperator(volumes, (arg) => { @@ -356,7 +358,7 @@ module.exports = function () { const registry = new Registry(); const volumes = new Volumes(registry); const importVolumeStub = sinon.stub(volumes, 'importVolume'); - importVolumeStub.rejects( + importVolumeStub.throws( new GrpcError(GrpcCode.INTERNAL, 'create failed') ); @@ -428,16 +430,18 @@ module.exports = function () { let stubs; const registry = new Registry(); const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volume.size = 110; const fsaStub = sinon.stub(volume, 'fsa'); fsaStub.returns(); sinon .stub(volumes, 'get') .withArgs(UUID) - .returns(volume) + .returns(volume); + sinon + .stub(volumes, 'list') .withArgs() - .returns([]); + .returns([volume]); const oldObj = createVolumeResource(UUID, defaultSpec, defaultStatus); // new changed specification of the object const newObj = createVolumeResource( @@ -474,16 +478,18 @@ module.exports = function () { let stubs; const registry = new Registry(); const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volume.size = 110; const fsaStub = sinon.stub(volume, 'fsa'); fsaStub.resolves(); sinon .stub(volumes, 'get') .withArgs(UUID) - .returns(volume) + .returns(volume); + sinon + .stub(volumes, 'list') .withArgs() - .returns([]); + .returns([volume]); const oldObj = createVolumeResource(UUID, defaultSpec, defaultStatus); // new changed specification of the object const newObj = createVolumeResource( @@ -518,14 +524,16 @@ module.exports = function () { let stubs; const registry = new Registry(); const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volume.size = 110; const fsaStub = sinon.stub(volume, 'fsa'); fsaStub.returns(); sinon .stub(volumes, 'get') .withArgs(UUID) - .returns(volume) + .returns(volume); + sinon + .stub(volumes, 'list') .withArgs() .returns([]); const oldObj = createVolumeResource(UUID, defaultSpec, defaultStatus); @@ -558,12 +566,14 @@ module.exports = function () { it('should create a resource upon "new" volume event', async () => { let stubs; const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultSpec, 100); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); const volumes = new Volumes(registry); sinon .stub(volumes, 'get') .withArgs(UUID) - .returns(volume) + .returns(volume); + sinon + .stub(volumes, 'list') .withArgs() .returns([volume]); @@ -582,18 +592,18 @@ module.exports = function () { expect(stubs.create.args[0][4].spec).to.deep.equal(defaultSpec); sinon.assert.calledOnce(stubs.updateStatus); expect(stubs.updateStatus.args[0][5].status).to.deep.equal({ - node: '', replicas: [], - size: 100, + size: 0, state: 'pending' }); + expect(stubs.updateStatus.args[0][5].status.targetNodes).to.be.undefined(); }); it('should not crash if POST fails upon "new" volume event', async () => { let stubs; const registry = new Registry(); const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); sinon.stub(volumes, 'get').returns([]); const volumeResource = createVolumeResource(UUID, defaultSpec); @@ -620,11 +630,13 @@ module.exports = function () { const volumes = new Volumes(registry); const newSpec = _.cloneDeep(defaultSpec); newSpec.replicaCount += 1; - const volume = new Volume(UUID, registry, newSpec); + const volume = new Volume(UUID, registry, () => {}, newSpec); sinon .stub(volumes, 'get') .withArgs(UUID) - .returns(volume) + .returns(volume); + sinon + .stub(volumes, 'list') .withArgs() .returns([volume]); @@ -646,17 +658,19 @@ module.exports = function () { let stubs; const registry = new Registry(); const volumes = new Volumes(registry); - const volume = new Volume(UUID, registry, defaultSpec, 100); + const volume = new Volume(UUID, registry, () => {}, defaultSpec, 'pending', 100, 'node2'); sinon .stub(volumes, 'get') .withArgs(UUID) - .returns(volume) + .returns(volume); + sinon + .stub(volumes, 'list') .withArgs() .returns([volume]); const volumeResource = createVolumeResource(UUID, defaultSpec, { size: 100, - node: '', + targetNodes: ['node2'], state: 'pending', replicas: [] }); @@ -694,7 +708,7 @@ module.exports = function () { limitBytes: 130, protocol: 'nvmf' }; - const volume = new Volume(UUID, registry, newSpec); + const volume = new Volume(UUID, registry, () => {}, newSpec); volumes.emit('volume', { eventType: 'mod', object: volume @@ -720,7 +734,7 @@ module.exports = function () { stubs.updateStatus.resolves(); }); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volumes.emit('volume', { eventType: 'mod', object: volume @@ -753,7 +767,7 @@ module.exports = function () { limitBytes: 130, protocol: 'nbd' }; - const volume = new Volume(UUID, registry, newSpec); + const volume = new Volume(UUID, registry, () => {}, newSpec); volumes.emit('volume', { eventType: 'mod', object: volume @@ -783,7 +797,7 @@ module.exports = function () { limitBytes: 130, protocol: 'nbd' }; - const volume = new Volume(UUID, registry, newSpec); + const volume = new Volume(UUID, registry, () => {}, newSpec); volumes.emit('volume', { eventType: 'mod', object: volume @@ -808,7 +822,7 @@ module.exports = function () { stubs.delete.resolves(); }); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volumes.emit('volume', { eventType: 'del', object: volume @@ -831,7 +845,7 @@ module.exports = function () { stubs.delete.rejects(new Error('delete failed')); }); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volumes.emit('volume', { eventType: 'del', object: volume @@ -853,7 +867,7 @@ module.exports = function () { stubs.delete.resolves(); }); - const volume = new Volume(UUID, registry, defaultSpec); + const volume = new Volume(UUID, registry, () => {}, defaultSpec); volumes.emit('volume', { eventType: 'del', object: volume diff --git a/csi/moac/test/volume_test.js b/csi/moac/test/volume_test.js index 5e5b0d3c7..6400cac08 100644 --- a/csi/moac/test/volume_test.js +++ b/csi/moac/test/volume_test.js @@ -9,9 +9,13 @@ const expect = require('chai').expect; const sinon = require('sinon'); const { Nexus } = require('../nexus'); -const Node = require('../node'); +const { Node } = require('../node'); +const { Pool } = require('../pool'); const Registry = require('../registry'); -const Volume = require('../volume'); +const { Replica } = require('../replica'); +const { Volume } = require('../volume'); +const { shouldFailWith } = require('./utils'); +const { GrpcCode } = require('../grpc_client'); const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; @@ -23,62 +27,176 @@ const defaultOpts = { limitBytes: 100 }; +// Repeating code that is extracted to a function. +function createFakeVolume (state) { + const registry = new Registry(); + const volume = new Volume(UUID, registry, () => {}, defaultOpts, state, 100); + const fsaStub = sinon.stub(volume, 'fsa'); + fsaStub.returns(); + const node = new Node('node'); + const replica = new Replica({ uuid: UUID, size: 100, share: 'REPLICA_NONE', uri: `bdev:///${UUID}` }); + const pool = new Pool({ name: 'pool', disks: [] }); + pool.bind(node); + replica.bind(pool); + volume.newReplica(replica); + return [volume, node]; +} + module.exports = function () { it('should stringify volume name', () => { const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultOpts); + const volume = new Volume(UUID, registry, () => {}, defaultOpts); expect(volume.toString()).to.equal(UUID); }); - it('should get name of the node where the volume is accessible from', () => { + it('should get name of the node where the volume has been published', () => { const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultOpts); - const node = new Node('node'); - const nexus = new Nexus({ uuid: UUID }); - nexus.bind(node); - volume.newNexus(nexus); + const volume = new Volume(UUID, registry, () => {}, defaultOpts, 'degraded', 100, 'node'); expect(volume.getNodeName()).to.equal('node'); + expect(volume.state).to.equal('degraded'); }); it('should get zero size of a volume that has not been created yet', () => { const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultOpts); + const volume = new Volume(UUID, registry, () => {}, defaultOpts); expect(volume.getSize()).to.equal(0); }); + it('should get the right size of a volume that has been imported', () => { + const registry = new Registry(); + const volume = new Volume(UUID, registry, () => {}, defaultOpts, 'healthy', 100); + expect(volume.getSize()).to.equal(100); + expect(volume.state).to.equal('healthy'); + }); + it('should set the preferred nodes for the volume', () => { + let modified = false; const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultOpts); + const volume = new Volume(UUID, registry, () => { + modified = true; + }, defaultOpts); + const fsaStub = sinon.stub(volume, 'fsa'); + fsaStub.returns(); expect(volume.preferredNodes).to.have.lengthOf(0); - const updated = volume.update({ preferredNodes: ['node1', 'node2'] }); - expect(updated).to.equal(true); + volume.update({ preferredNodes: ['node1', 'node2'] }); + expect(modified).to.equal(true); expect(volume.preferredNodes).to.have.lengthOf(2); }); - it('should publish and unpublish the volume', async () => { + it('should not publish volume that is known to be broken', async () => { const registry = new Registry(); - const volume = new Volume(UUID, registry, defaultOpts); + const volume = new Volume(UUID, registry, () => {}, defaultOpts, 'faulted', 100); + const fsaStub = sinon.stub(volume, 'fsa'); + fsaStub.returns(); const node = new Node('node'); - const nexus = new Nexus({ uuid: UUID }); const stub = sinon.stub(node, 'call'); + stub.onCall(0).resolves({}); + stub.onCall(1).resolves({ deviceUri: 'file:///dev/nbd0' }); + + shouldFailWith(GrpcCode.INTERNAL, async () => { + await volume.publish('nbd'); + }); + sinon.assert.notCalled(stub); + }); + + it('should publish a volume', async () => { + const [volume, node] = createFakeVolume('healthy'); + const stub = sinon.stub(node, 'call'); + stub.onCall(0).resolves({ uuid: UUID, size: 100, state: 'NEXUS_ONLINE', children: [{ uri: `bdev:///${UUID}`, state: 'CHILD_ONLINE' }] }); + stub.onCall(1).resolves({ deviceUri: 'file:///dev/nbd0' }); + + const uri = await volume.publish('nbd'); + expect(uri).to.equal('file:///dev/nbd0'); + sinon.assert.calledTwice(stub); + sinon.assert.calledWithMatch(stub.firstCall, 'createNexus', { + uuid: UUID, + size: 100, + children: [`bdev:///${UUID}`] + }); + sinon.assert.calledWithMatch(stub.secondCall, 'publishNexus', { + uuid: UUID, + key: '' + }); + }); + + it('should publish a volume that already has a nexus', async () => { + const [volume, node] = createFakeVolume('healthy'); + const stub = sinon.stub(node, 'call'); + const nexus = new Nexus({ uuid: UUID }); nexus.bind(node); volume.newNexus(nexus); stub.resolves({ deviceUri: 'file:///dev/nbd0' }); - await volume.publish('nbd'); + const uri = await volume.publish('nbd'); + expect(uri).to.equal('file:///dev/nbd0'); expect(nexus.deviceUri).to.equal('file:///dev/nbd0'); sinon.assert.calledOnce(stub); sinon.assert.calledWithMatch(stub, 'publishNexus', { uuid: UUID, key: '' }); + }); + it('should publish a volume that has been already published', async () => { + const [volume, node] = createFakeVolume('degraded'); + const stub = sinon.stub(node, 'call'); + const nexus = new Nexus({ uuid: UUID }); + const getUriStub = sinon.stub(nexus, 'getUri'); + nexus.bind(node); + volume.newNexus(nexus); + getUriStub.returns('file:///dev/nbd0'); + + const uri = await volume.publish('nbd'); + expect(uri).to.equal('file:///dev/nbd0'); + sinon.assert.notCalled(stub); + sinon.assert.calledOnce(getUriStub); + }); + + it('should unpublish a volume', async () => { + const [volume, node] = createFakeVolume('faulted'); + const stub = sinon.stub(node, 'call'); + const nexus = new Nexus({ uuid: UUID }); + const getUriStub = sinon.stub(nexus, 'getUri'); + nexus.bind(node); + volume.newNexus(nexus); + volume.publishedOn = node.name; + getUriStub.returns('file:///dev/nbd0'); + stub.onCall(0).resolves({}); + + await volume.unpublish(); + expect(volume.getNodeName()).to.be.undefined(); + sinon.assert.calledOnce(stub); + sinon.assert.calledWithMatch(stub, 'destroyNexus', { + uuid: UUID + }); + }); + + it('should unpublish volume that has not been published', async () => { + const [volume, node] = createFakeVolume('faulted'); + const stub = sinon.stub(node, 'call'); + const nexus = new Nexus({ uuid: UUID }); + const getUriStub = sinon.stub(nexus, 'getUri'); + nexus.bind(node); + volume.newNexus(nexus); + volume.publishedOn = node.name; + getUriStub.returns(); stub.resolves({}); + await volume.unpublish(); - expect(nexus.deviceUri).to.equal(''); - sinon.assert.calledTwice(stub); - sinon.assert.calledWithMatch(stub.secondCall, 'unpublishNexus', { + expect(volume.getNodeName()).to.be.undefined(); + sinon.assert.calledOnce(stub); + sinon.assert.calledWithMatch(stub, 'destroyNexus', { uuid: UUID }); }); + + it('should unpublish volume without nexus', async () => { + const [volume, node] = createFakeVolume('healthy'); + const stub = sinon.stub(node, 'call'); + stub.resolves({}); + + await volume.unpublish(); + expect(volume.getNodeName()).to.be.undefined(); + sinon.assert.notCalled(stub); + }); }; diff --git a/csi/moac/test/volumes_test.js b/csi/moac/test/volumes_test.js index 3cdca8f97..4ccb179da 100644 --- a/csi/moac/test/volumes_test.js +++ b/csi/moac/test/volumes_test.js @@ -6,28 +6,30 @@ 'use strict'; +const _ = require('lodash'); const expect = require('chai').expect; const sinon = require('sinon'); const { Nexus } = require('../nexus'); -const Node = require('../node'); +const { Node } = require('../node'); const { Pool } = require('../pool'); const Registry = require('../registry'); const { Replica } = require('../replica'); -const Volume = require('../volume'); -const Volumes = require('../volumes'); +const { Volume } = require('../volume'); +const { Volumes } = require('../volumes'); const { GrpcCode } = require('../grpc_client'); const { shouldFailWith, waitUntil } = require('./utils'); +const enums = require('./grpc_enums'); const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; module.exports = function () { - var registry, volumes; - var pool1, pool2, pool3; - var node1, node2, node3; - var stub1, stub2, stub3; - var nexus, replica1, replica2; - var volume; - var volEvents; + let registry, volumes; + let pool1, pool2, pool3; + let node1, node2, node3; + let stub1, stub2, stub3; + let nexus, replica1, replica2; + let volume; + let volEvents; // Create pristine test env with 3 pools on 3 nodes function createTestEnv () { @@ -78,14 +80,14 @@ module.exports = function () { volEvents = []; volumes.on('volume', (ev) => { - volEvents.push(ev); + volEvents.push(_.cloneDeep(ev)); }); } // Create a setup with standard env (from createTestEnv()) and on top of that - // a nexus on node1 with two replicas on node1 and node2 and a volume that is - // in healthy state. - async function setUpReferenceEnv () { + // a volume with two replicas on node1 and node2 and nexus on node1 if the + // volume should be created in published state. + async function setUpReferenceEnv (published) { createTestEnv(); replica1 = new Replica({ @@ -104,39 +106,49 @@ module.exports = function () { }); pool2.registerReplica(replica2); - nexus = new Nexus({ - uuid: UUID, - size: 95, - deviceUri: '', - state: 'NEXUS_ONLINE', - children: [ - { - uri: `bdev:///${UUID}`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - }, - { - uri: `nvmf://remote/${UUID}`, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); - node1._registerNexus(nexus); + if (published) { + nexus = new Nexus({ + uuid: UUID, + size: 95, + deviceUri: 'file:///dev/nbd0', + state: 'NEXUS_ONLINE', + children: [ + { + uri: `bdev:///${UUID}`, + state: 'CHILD_ONLINE', + rebuildProgress: 0 + }, + { + uri: `nvmf://remote/${UUID}`, + state: 'CHILD_ONLINE', + rebuildProgress: 0 + } + ] + }); + node1._registerNexus(nexus); + } // Fake the volume - volume = new Volume(UUID, registry, { + volume = new Volume(UUID, registry, (type) => { + volumes.emit('volume', { + eventType: type, + object: volume + }); + }, { replicaCount: 2, preferredNodes: [], requiredNodes: [], requiredBytes: 90, limitBytes: 110, protocol: 'nbd' - }); + }, 'pending', 95, published ? 'node1' : undefined); volumes.volumes[UUID] = volume; volumes.start(); - await waitUntil(() => volume.state === 'healthy', 'volume to come up'); + await waitUntil(() => { + return volEvents.length >= (published ? 3 : 2); + }, 'volume events'); + volume.state = 'healthy'; } function tearDownReferenceEnv () {} @@ -163,12 +175,12 @@ module.exports = function () { protocol: 'nbd' }) ); - expect(volEvents).to.have.lengthOf(2); + expect(volEvents).to.have.lengthOf(3); expect(volEvents[0].eventType).to.equal('new'); - expect(volEvents[0].object.uuid).to.equal(UUID); - expect(volEvents[0].object.state).to.equal('pending'); - expect(volEvents[1].eventType).to.equal('del'); - expect(volEvents[1].object.state).to.equal('pending'); + expect(volEvents[1].eventType).to.equal('mod'); + expect(volEvents[2].eventType).to.equal('del'); + expect(volEvents[2].object.uuid).to.equal(UUID); + expect(volEvents[2].object.state).to.equal('destroyed'); }); it('should set the size of the volume to required minimum if limit is not set', async () => { @@ -181,18 +193,6 @@ module.exports = function () { share: 'REPLICA_NONE', uri: 'bdev:///' + UUID }); - stub1.onCall(1).resolves({ - uuid: UUID, - size: 90, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); volumes.start(); volume = await volumes.createVolume(UUID, { @@ -226,18 +226,6 @@ module.exports = function () { share: 'REPLICA_NONE', uri: 'bdev:///' + UUID }); - stub1.onCall(1).resolves({ - uuid: UUID, - size: 50, - state: 'NEXUS_ONLINE', - children: [ - { - uri: 'bdev:///' + UUID, - state: 'CHILD_ONLINE', - rebuildProgress: 0 - } - ] - }); volumes.start(); volume = await volumes.createVolume(UUID, { @@ -311,21 +299,16 @@ module.exports = function () { limitBytes: 50, protocol: 'nbd' }); - await waitUntil(() => !!volume.nexus, 'nexus'); sinon.assert.notCalled(stub2); sinon.assert.notCalled(stub3); - sinon.assert.calledOnce(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'createNexus', { - uuid: UUID, - size: 10, - children: [`bdev:///${UUID}`] - }); + sinon.assert.notCalled(stub1); expect(Object.keys(volume.replicas)).to.have.lengthOf(1); expect(Object.values(volume.replicas)[0]).to.equal(replica); - expect(volume.state).to.equal('faulted'); - expect(volEvents).to.have.lengthOf(2); + expect(volume.state).to.equal('healthy'); + expect(volEvents).to.have.lengthOf(3); expect(volEvents[0].eventType).to.equal('new'); expect(volEvents[1].eventType).to.equal('mod'); + expect(volEvents[2].eventType).to.equal('mod'); }); it('should create the volume object and include pre-existing nexus', async () => { @@ -362,7 +345,7 @@ module.exports = function () { volumes.start(); volume = await volumes.createVolume(UUID, { - replicaCount: 2, + replicaCount: 1, preferredNodes: [], requiredNodes: [], requiredBytes: 10, @@ -392,14 +375,11 @@ module.exports = function () { }); expect(Object.keys(volume.replicas)).to.have.lengthOf(1); expect(volume.nexus).to.equal(nexus); - expect(volEvents).to.have.lengthOf(3); - expect(volEvents[0].eventType).to.equal('new'); - expect(volEvents[1].eventType).to.equal('mod'); - expect(volEvents[2].eventType).to.equal('mod'); + expect(volEvents).to.have.lengthOf(6); }); }); - describe('import volume from MSV CRD', function () { + describe('import volume', function () { // this creates an env with 3 pools on 3 nodes without any replica and nexus beforeEach(createTestEnv); @@ -407,22 +387,23 @@ module.exports = function () { volumes.stop(); }); - const volumeCRD = { - UUID: UUID, - spec: { - replicaCount: 2, - preferredNodes: [], - requiredNodes: [], - requiredBytes: 10, - limitBytes: 50, - protocol: 'nbd' - }, - status: { - size: 40 - } + const volumeSpec = { + replicaCount: 1, + preferredNodes: [], + requiredNodes: [], + requiredBytes: 10, + limitBytes: 50, + protocol: 'nbd' }; - it('should import volume', async () => { + it('should import a volume and fault it if there are no replicas', async () => { + volumes.start(); + volume = await volumes.importVolume(UUID, volumeSpec, { size: 40 }); + expect(volume.state).to.equal('faulted'); + expect(Object.keys(volume.replicas)).to.have.lengthOf(0); + }); + + it('should import a volume without nexus', async () => { const replica = new Replica({ uuid: UUID, size: 10, @@ -434,47 +415,21 @@ module.exports = function () { getReplicaSetStub.returns([replica]); volumes.start(); - await volumes.importVolume(volumeCRD.UUID, volumeCRD.spec, volumeCRD.status); - }); - - it('imported volume should keep the same size', async () => { - volumes.start(); - volume = await volumes.importVolume(volumeCRD.UUID, volumeCRD.spec, volumeCRD.status); - expect(volume.state).to.equal('pending'); + volume = await volumes.importVolume(UUID, volumeSpec, { size: 40 }); + expect(volume.nexus).to.be.null(); + expect(volume.state).to.equal('healthy'); expect(volume.size).to.equal(40); - expect(volEvents).to.have.lengthOf(1); - expect(volEvents[0].eventType).to.equal('new'); + expect(volEvents).to.have.lengthOf(3); }); - it('imported volume should attach to the replicas', async () => { - const replica1 = new Replica({ + it('should import unpublished volume with nexus', async () => { + const replica = new Replica({ uuid: UUID, size: 40, share: 'REPLICA_NONE', uri: `bdev:///${UUID}` }); - replica1.pool = { node: node1 }; - const replica2 = new Replica({ - uuid: UUID, - size: 40, - share: 'REPLICA_NVMF', - uri: `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${UUID}` - }); - replica2.pool = { node: node2 }; - const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica1, replica2]); - - volumes.start(); - volume = await volumes.importVolume(volumeCRD.UUID, volumeCRD.spec, volumeCRD.status); - expect(Object.keys(volume.replicas)).to.have.lengthOf(2); - expect(Object.values(volume.replicas)[0]).to.equal(replica1); - expect(Object.values(volume.replicas)[1]).to.equal(replica2); - expect(volume.state).to.equal('pending'); - expect(volEvents).to.have.lengthOf(1); - expect(volEvents[0].eventType).to.equal('new'); - }); - - it('imported volume with all replicas and nexus available should be healthy', async () => { + replica.pool = { node: node1 }; const nexus = new Nexus({ uuid: UUID, size: 20, @@ -484,50 +439,68 @@ module.exports = function () { { uri: `bdev:///${UUID}`, state: 'CHILD_ONLINE' - }, - { - uri: `nvmf://remote/${UUID}`, - state: 'CHILD_ONLINE' } ] }); nexus.node = node1; - node1._registerNexus(nexus); + const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); + getReplicaSetStub.returns([replica]); + const getNexusStub = sinon.stub(registry, 'getNexus'); + getNexusStub.returns(nexus); - const replica1 = new Replica({ + volumes.start(); + volume = await volumes.importVolume(UUID, volumeSpec, { size: 40 }); + expect(volume.nexus.getUri()).to.be.undefined(); + expect(Object.keys(volume.replicas)).to.have.lengthOf(1); + expect(Object.values(volume.replicas)[0]).to.equal(replica); + expect(volume.state).to.equal('healthy'); + expect(volEvents).to.have.lengthOf(4); + }); + + it('should import published volume with nexus', async () => { + const deviceUri = 'nbd:///dev/ndb0'; + const replica = new Replica({ uuid: UUID, size: 40, share: 'REPLICA_NONE', uri: `bdev:///${UUID}` }); - replica1.pool = { node: node1 }; - const replica2 = new Replica({ + replica.pool = { node: node1 }; + const nexus = new Nexus({ uuid: UUID, - size: 40, - share: 'REPLICA_NVMF', - uri: `nvmf://remote/${UUID}` + size: 20, + deviceUri: '', + state: 'NEXUS_ONLINE', + children: [ + { + uri: `bdev:///${UUID}`, + state: 'CHILD_ONLINE' + } + ] }); - replica2.pool = { node: node2 }; + nexus.node = node1; const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica1, replica2]); + getReplicaSetStub.returns([replica]); const getNexusStub = sinon.stub(registry, 'getNexus'); getNexusStub.returns(nexus); + stub1.onCall(0).resolves({ deviceUri }); volumes.start(); - volume = await volumes.importVolume(volumeCRD.UUID, volumeCRD.spec, volumeCRD.status); - expect(Object.keys(volume.replicas)).to.have.lengthOf(2); - expect(Object.values(volume.replicas)[0]).to.equal(replica1); - expect(Object.values(volume.replicas)[1]).to.equal(replica2); + volume = await volumes.importVolume(UUID, volumeSpec, { + size: 40, + targetNodes: ['node1'] + }); + await waitUntil(() => volume.nexus.deviceUri === deviceUri, 'published nexus'); + expect(Object.keys(volume.replicas)).to.have.lengthOf(1); + expect(Object.values(volume.replicas)[0]).to.equal(replica); expect(volume.state).to.equal('healthy'); - expect(volEvents).to.have.lengthOf(2); - expect(volEvents[0].eventType).to.equal('new'); - expect(volEvents[1].eventType).to.equal('mod'); - expect(volume.nexus).is.not.null(); - expect(volume.nexus.children).to.have.lengthOf(2); + expect(volEvents).to.have.lengthOf(5); }); }); describe('update volume', function () { + let modCount; + // We create an artificial volume at the beginning of each test. this.beforeEach(() => { createTestEnv(); @@ -559,7 +532,9 @@ module.exports = function () { getNexusStub.returns(nexus); // Fake the volume - volume = new Volume(UUID, registry, { + volume = new Volume(UUID, registry, () => { + modCount += 1; + }, { replicaCount: 1, preferredNodes: [], requiredNodes: [], @@ -570,12 +545,15 @@ module.exports = function () { volume.newReplica(replica); volumes.volumes[UUID] = volume; volume.newNexus(nexus); + volume.state = 'healthy'; + modCount = 0; volumes.start(); }); this.afterEach(() => { volumes.stop(); + modCount = 0; }); it('should update volume parameters if a volume to be created already exists', async () => { @@ -600,8 +578,7 @@ module.exports = function () { expect(volume.requiredBytes).to.equal(89); expect(volume.limitBytes).to.equal(111); expect(volume.state).to.equal('healthy'); - expect(volEvents).to.have.lengthOf(1); - expect(volEvents[0].eventType).to.equal('mod'); + expect(modCount).to.equal(1); }); it('should not do anything if creating a volume that exists and has the same parameters', async () => { @@ -617,7 +594,7 @@ module.exports = function () { sinon.assert.notCalled(stub2); sinon.assert.notCalled(stub3); expect(returnedVolume).to.equal(volume); - expect(volEvents).to.have.lengthOf(0); + expect(modCount).to.equal(0); }); it('should fail to shrink the volume', async () => { @@ -659,7 +636,7 @@ module.exports = function () { }); describe('scale up/down', function () { - beforeEach(setUpReferenceEnv); + beforeEach(() => setUpReferenceEnv(true)); afterEach(tearDownReferenceEnv); it('should scale up if a child is faulted', async () => { @@ -985,8 +962,20 @@ module.exports = function () { }); }); - describe('state transitions', function () { - beforeEach(setUpReferenceEnv); + describe('state transitions on a volume without nexus', function () { + beforeEach(() => setUpReferenceEnv(false)); + afterEach(tearDownReferenceEnv); + + it('should move to "faulted" when none of replicas is online', async () => { + node3._offline(); // prevent FSA from scheduling a new replica + replica1.offline(); + replica2.offline(); + await waitUntil(() => volume.state === 'faulted', 'faulted volume'); + }); + }); + + describe('state transitions on a volume with nexus', function () { + beforeEach(() => setUpReferenceEnv(true)); afterEach(tearDownReferenceEnv); it('should move to "faulted" when none of replicas is online', async () => { @@ -1016,18 +1005,27 @@ module.exports = function () { await waitUntil(() => volume.state === 'healthy', 'healthy volume'); }); - it('should move to "offline" state when nexus goes offline', async () => { + it('should move to "faulted" state when nexus goes offline', async () => { nexus.state = 'NEXUS_OFFLINE'; registry.emit('nexus', { eventType: 'mod', object: nexus }); - await waitUntil(() => volume.state === 'offline', 'offline volume'); + await waitUntil(() => volume.state === 'faulted', 'offline volume'); }); - it('should not move to any state when in "pending" state', async () => { - volume.delNexus(nexus); - await waitUntil(() => volume.state === 'pending', 'pending volume'); + it('should move to "healthy" when volume is unpublished', async () => { + nexus.state = 'NEXUS_OFFLINE'; + registry.emit('nexus', { + eventType: 'del', + object: nexus + }); + await volume.unpublish(); + await waitUntil(() => volume.state === 'healthy', 'healthy volume'); + }); + + it('should not move to any state when in "destroyed" state', async () => { + volume.state = 'destroyed'; // try to move all replicas to faulted and the state should not change nexus.children.forEach((ch) => (ch.state = 'CHILD_FAULTED')); registry.emit('nexus', { @@ -1040,60 +1038,155 @@ module.exports = function () { // ok - the state did not change } finally { // this will throw - expect(volume.state).to.equal('pending'); + expect(volume.state).to.equal('destroyed'); } }); }); - // Volume is created once in the first test and then all tests use it - describe('misc', function () { - before(createTestEnv); - - afterEach(() => { - stub1.resetHistory(); - stub2.resetHistory(); - stub3.resetHistory(); - volEvents = []; - }); - - after(() => { - volumes.stop(); - }); + describe('nexus failover', function () { + beforeEach(() => setUpReferenceEnv(true)); + afterEach(tearDownReferenceEnv); - // this creates a volume used in subsequent cases - it('should create a new volume', async () => { - // on node 1 is created replica and nexus + it('should create nexus on the same node where it was published', async () => { + // FSA should try to create and share the nexus again stub1.onCall(0).resolves({ uuid: UUID, - pool: 'pool1', size: 96, - thin: false, - share: 'REPLICA_NONE', - uri: 'bdev:///' + UUID + state: 'NEXUS_ONLINE', + children: [ + { + uri: `bdev:///${UUID}`, + state: 'CHILD_ONLINE', + rebuildProgress: 0 + }, + { + uri: `nvmf://remote/${UUID}`, + state: 'CHILD_ONLINE', + rebuildProgress: 0 + } + ] }); stub1.onCall(1).resolves({ + deviceUri: 'file:///dev/nbd0' + }); + + // we unbind the nexus - that happens when node goes down + nexus.unbind(); + await waitUntil(() => volume.state === 'faulted', 'volume faulted'); + expect(volume.nexus).to.be.null(); + expect(volume.publishedOn).to.equal('node1'); + + // this simulates node that has been just successfully sync'd + const isSyncedStub = sinon.stub(node1, 'isSynced'); + isSyncedStub.returns(true); + node1.emit('node', { + eventType: 'mod', + object: node1 + }); + await waitUntil(() => volume.state === 'healthy', 'healthy volume'); + expect(volume.nexus.deviceUri).to.equal('file:///dev/nbd0'); + expect(volume.publishedOn).to.equal('node1'); + }); + + it('should set state to healthy again when nexus comes online', async () => { + nexus.offline(); + await waitUntil(() => volume.state === 'faulted', 'volume faulted'); + + nexus.state = 'NEXUS_ONLINE'; + registry.emit('nexus', { + eventType: 'mod', + object: nexus + }); + await waitUntil(() => volume.state === 'healthy', 'healthy volume'); + }); + + it('should destroy a new nexus on wrong node', async () => { + stub2.onCall(0).resolves({}); + const wrongNexus = new Nexus({ uuid: UUID, - size: 96, + size: 95, + deviceUri: '', state: 'NEXUS_ONLINE', children: [ { - uri: 'bdev:///' + UUID, + uri: `bdev:///${UUID}`, state: 'CHILD_ONLINE', rebuildProgress: 0 }, { - uri: 'nvmf://replica2', + uri: `nvmf://remote/${UUID}`, + state: 'CHILD_ONLINE', + rebuildProgress: 0 + } + ] + }); + node2._registerNexus(wrongNexus); + + await waitUntil(() => stub2.callCount > 0, 'destroy grpc call'); + sinon.assert.calledOnce(stub2); + sinon.assert.calledWithMatch(stub2, 'destroyNexus', { uuid: UUID }); + expect(volume.nexus).to.equal(nexus); + expect(volume.state).to.equal('healthy'); + }); + + it('should replace a nexus in volume on wrong node', async () => { + volume.publishedOn = 'node2'; + stub1.onCall(0).resolves({}); + const newNexus = new Nexus({ + uuid: UUID, + size: 95, + deviceUri: '', + state: 'NEXUS_ONLINE', + children: [ + { + uri: `bdev:///${UUID}`, state: 'CHILD_ONLINE', rebuildProgress: 0 }, { - uri: 'nvmf://replica3', + uri: `nvmf://remote/${UUID}`, state: 'CHILD_ONLINE', rebuildProgress: 0 } ] }); + node2._registerNexus(newNexus); + await waitUntil(() => stub1.callCount > 0, 'destroy grpc call'); + sinon.assert.calledOnce(stub1); + sinon.assert.calledWithMatch(stub1, 'destroyNexus', { uuid: UUID }); + expect(volume.nexus).to.equal(newNexus); + expect(volume.state).to.equal('healthy'); + }); + }); + + // Volume is created once in the first test and then all tests use it. + // This tests the typical life-cycle of a volume from create to destroy. + describe('misc', function () { + before(createTestEnv); + + afterEach(() => { + stub1.resetHistory(); + stub2.resetHistory(); + stub3.resetHistory(); + volEvents = []; + }); + + after(() => { + volumes.stop(); + }); + + // this creates a volume used in subsequent cases + it('should create a new volume', async () => { + // on node 1 is created replica + stub1.onCall(0).resolves({ + uuid: UUID, + pool: 'pool1', + size: 96, + thin: false, + share: 'REPLICA_NONE', + uri: 'bdev:///' + UUID + }); // on node 2 is created replica and it is shared stub2.onCall(0).resolves({ uuid: UUID, @@ -1104,7 +1197,6 @@ module.exports = function () { uri: 'bdev:///' + UUID }); stub2.onCall(1).resolves({ uri: 'nvmf://replica2' }); - // on node 3 is created replica and it is shared stub3.onCall(0).resolves({ uuid: UUID, @@ -1126,7 +1218,7 @@ module.exports = function () { protocol: 'nbd' }); - sinon.assert.calledTwice(stub1); + sinon.assert.calledOnce(stub1); sinon.assert.calledWithMatch(stub1.firstCall, 'createReplica', { uuid: UUID, pool: 'pool1', @@ -1134,13 +1226,8 @@ module.exports = function () { thin: false, share: 'REPLICA_NONE' }); - sinon.assert.calledWithMatch(stub1.secondCall, 'createNexus', { - uuid: UUID, - size: 96, - children: ['bdev:///' + UUID, 'nvmf://replica2', 'nvmf://replica3'] - }); - sinon.assert.calledTwice(stub2); + sinon.assert.calledOnce(stub2); sinon.assert.calledWithMatch(stub2.firstCall, 'createReplica', { uuid: UUID, pool: 'pool2', @@ -1148,12 +1235,8 @@ module.exports = function () { thin: false, share: 'REPLICA_NONE' }); - sinon.assert.calledWithMatch(stub2.secondCall, 'shareReplica', { - uuid: UUID, - share: 'REPLICA_NVMF' - }); - sinon.assert.calledTwice(stub3); + sinon.assert.calledOnce(stub3); sinon.assert.calledWithMatch(stub3.firstCall, 'createReplica', { uuid: UUID, pool: 'pool3', @@ -1161,28 +1244,110 @@ module.exports = function () { thin: false, share: 'REPLICA_NONE' }); - sinon.assert.calledWithMatch(stub3.secondCall, 'shareReplica', { - uuid: UUID, - share: 'REPLICA_NVMF' - }); expect(volumes.get(UUID)).to.equal(volume); expect(volume.uuid).to.equal(UUID); - expect(volume.size).to.equal(96); + expect(volume.getSize()).to.equal(96); + expect(volume.getNodeName()).to.be.undefined(); expect(volume.replicaCount).to.equal(3); expect(volume.preferredNodes).to.have.lengthOf(0); expect(volume.requiredNodes).to.have.lengthOf(0); expect(volume.requiredBytes).to.equal(90); expect(volume.limitBytes).to.equal(110); - expect(volume.nexus.uuid).to.equal(UUID); + expect(volume.nexus).to.be.null(); expect(Object.keys(volume.replicas)).to.have.lengthOf(3); expect(volume.replicas.node1.uuid).to.equal(UUID); expect(volume.replicas.node2.uuid).to.equal(UUID); expect(volume.replicas.node3.uuid).to.equal(UUID); expect(volume.state).to.equal('healthy'); - // 1 new + 6 mods (3 new replicas, 2 set share, 1 new nexus) - expect(volEvents).to.have.lengthOf(7); + // 1 new + 3 new replicas + state change + expect(volEvents).to.have.lengthOf(5); + }); + + it('should publish the volume', async () => { + const deviceUri = 'file:///dev/nbd0'; + // on node 1 is created nexus + stub1.onCall(0).resolves({ + uuid: UUID, + size: 96, + state: 'NEXUS_ONLINE', + children: [ + { + uri: 'bdev:///' + UUID, + state: 'CHILD_ONLINE', + rebuildProgress: 0 + }, + { + uri: 'nvmf://replica2', + state: 'CHILD_ONLINE', + rebuildProgress: 0 + }, + { + uri: 'nvmf://replica3', + state: 'CHILD_ONLINE', + rebuildProgress: 0 + } + ] + }); + stub1.onCall(1).resolves({ deviceUri }); + // on node 2 is shared replica + stub2.onCall(0).resolves({ uri: 'nvmf://replica2' }); + // on node 3 is shared replica + stub3.onCall(0).resolves({ uri: 'nvmf://replica3' }); + + const uri = await volume.publish('nbd'); + expect(uri).to.equal(deviceUri); + + sinon.assert.calledTwice(stub1); + sinon.assert.calledWithMatch(stub1.firstCall, 'createNexus', { + uuid: UUID, + size: 96, + children: ['bdev:///' + UUID, 'nvmf://replica2', 'nvmf://replica3'] + }); + sinon.assert.calledWithMatch(stub1.secondCall, 'publishNexus', { + uuid: UUID, + key: '', + share: enums.NEXUS_NBD + }); + + sinon.assert.calledOnce(stub2); + sinon.assert.calledWithMatch(stub2.firstCall, 'shareReplica', { + uuid: UUID, + share: 'REPLICA_NVMF' + }); + + sinon.assert.calledOnce(stub3); + sinon.assert.calledWithMatch(stub3.firstCall, 'shareReplica', { + uuid: UUID, + share: 'REPLICA_NVMF' + }); + + expect(volume.getNodeName()).to.equal('node1'); + expect(volume.getSize()).to.equal(96); + expect(volume.replicaCount).to.equal(3); + expect(volume.nexus.uuid).to.equal(UUID); + expect(Object.keys(volume.replicas)).to.have.lengthOf(3); + expect(volume.state).to.equal('healthy'); + + // 5 mods (2 set share, 1 new nexus, 1 publish nexus, state change) + expect(volEvents).to.have.lengthOf(5); + }); + + it('should unpublish the volume', async () => { + stub1.onCall(0).resolves({}); + await volume.unpublish(); + sinon.assert.calledOnce(stub1); + sinon.assert.calledWithMatch(stub1, 'destroyNexus', { + uuid: UUID + }); + expect(volume.getNodeName()).to.be.undefined(); + expect(volume.uuid).to.equal(UUID); + expect(volume.nexus).is.null(); + expect(volume.state).to.equal('healthy'); + expect(Object.keys(volume.replicas)).to.have.length(3); + // 2 nexus events + expect(volEvents).to.have.lengthOf(2); }); it('should destroy the volume', async () => { @@ -1192,23 +1357,19 @@ module.exports = function () { await volumes.destroyVolume(UUID); - sinon.assert.calledTwice(stub1); - sinon.assert.calledWithMatch(stub1.firstCall, 'destroyNexus', { - uuid: UUID - }); - sinon.assert.calledWithMatch(stub1.secondCall, 'destroyReplica', { - uuid: UUID - }); + sinon.assert.calledOnce(stub1); + sinon.assert.calledWithMatch(stub1, 'destroyReplica', { uuid: UUID }); sinon.assert.calledOnce(stub2); sinon.assert.calledWithMatch(stub2, 'destroyReplica', { uuid: UUID }); sinon.assert.calledOnce(stub3); sinon.assert.calledWithMatch(stub3, 'destroyReplica', { uuid: UUID }); - expect(volumes.get(UUID)).is.null(); + expect(volumes.get(UUID)).is.undefined(); + expect(volume.getNodeName()).to.be.undefined(); expect(volume.nexus).is.null(); - expect(volume.state).to.equal('pending'); + expect(volume.state).to.equal('destroyed'); expect(Object.keys(volume.replicas)).to.have.length(0); - // 3 replicas, 1 nexus and 1 del volume event + // 3 replicas and 1 del volume event expect(volEvents).to.have.lengthOf(5); }); @@ -1216,7 +1377,7 @@ module.exports = function () { stub1.onCall(0).resolves({}); stub2.onCall(0).resolves({}); stub3.onCall(0).resolves({}); - expect(volumes.get(UUID)).is.null(); + expect(volumes.get(UUID)).is.undefined(); await volumes.destroyVolume(UUID); diff --git a/csi/moac/tsconfig.json b/csi/moac/tsconfig.json index 4e5658426..fc025ce12 100644 --- a/csi/moac/tsconfig.json +++ b/csi/moac/tsconfig.json @@ -63,10 +63,13 @@ "files": [ "watcher.ts", "nexus.ts", + "node.ts", "node_operator.ts", "replica.ts", "pool.ts", "pool_operator.ts", + "volume.ts", + "volumes.ts", "volume_operator.ts", ] } diff --git a/csi/moac/volume.js b/csi/moac/volume.js deleted file mode 100644 index d0a041b97..000000000 --- a/csi/moac/volume.js +++ /dev/null @@ -1,700 +0,0 @@ -// Volume object abstracts user from volume components nexus and -// replicas and implements algorithms for volume recovery. - -'use strict'; - -const _ = require('lodash'); -const assert = require('assert'); -const log = require('./logger').Logger('volume'); -const { GrpcCode, GrpcError } = require('./grpc_client'); - -// Abstraction of the volume. It is an abstract object which consists of -// physical entities nexus and replicas. It provides high level methods -// for doing operations on the volume as well as recovery algorithms for -// maintaining desired redundancy. -class Volume { - // Construct a volume object with given uuid. - // - // @params {string} uuid ID of the volume. - // @params {object} registry Registry object. - // @params {object} spec Volume parameters. - // @params {number} spec.replicaCount Number of desired replicas. - // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. - // @params {string[]} spec.requiredNodes Replicas must be on these nodes. - // @params {number} spec.requiredBytes The volume must have at least this size. - // @params {number} spec.limitBytes The volume should not be bigger than this. - // @params {string} spec.protocol The share protocol for the nexus. - // @params {object} [size=0] Current properties of the volume. - // - constructor (uuid, registry, spec, size = 0) { - assert(spec); - // specification of the volume - this.uuid = uuid; - this.registry = registry; - this.replicaCount = spec.replicaCount || 1; - this.preferredNodes = _.clone(spec.preferredNodes || []).sort(); - this.requiredNodes = _.clone(spec.requiredNodes || []).sort(); - this.requiredBytes = spec.requiredBytes; - this.limitBytes = spec.limitBytes; - this.protocol = spec.protocol; - this.size = size; - // state variables of the volume - this.nexus = null; - this.replicas = {}; // replicas indexed by node name - this.state = 'pending'; - this.runFsa = 0; // number of requests to run FSA - } - - // Stringify volume - toString () { - return this.uuid; - } - - // Get the size of the volume. - getSize () { - return this.size; - } - - // Get the node which the volume is accessible from - // (currently that is where the nexus is). - getNodeName () { - return this.nexus ? this.nexus.node.name : ''; - } - - // Publish the volume. That means make it accessible through a block device. - // @params {string} protocol The nexus share protocol. - // @return {string} uri The URI to access the nexus. - async publish (protocol) { - if (this.nexus) { - const uri = await this.nexus.publish(protocol); - return uri; - } else { - throw new GrpcError( - GrpcCode.INTERNAL, - 'Cannot publish a volume without nexus' - ); - } - } - - // Undo publish operation on the volume. - async unpublish () { - if (this.nexus) { - await this.nexus.unpublish(); - } else { - throw new GrpcError( - GrpcCode.INTERNAL, - 'Cannot unpublish a volume without nexus' - ); - } - } - - // Delete nexus and destroy all replicas of the volume. - async destroy () { - if (this.nexus) { - await this.nexus.destroy(); - } - const promises = Object.values(this.replicas).map((replica) => - replica.destroy() - ); - await Promise.all(promises); - } - - // Trigger the run of FSA. It will either run immediately or if it is already - // running, it will start again when the current run finishes. - // - // Why critical section on fsa? Certain operations done by fsa are async. If - // we allow another process to enter fsa before the async operation is done - // and the state of volume updated we risk that the second process repeats - // exactly the same action (because from its point of view it hasn't been - // done yet). - fsa () { - if (this.runFsa++ === 0) { - this._fsa().finally(() => { - const runAgain = this.runFsa > 1; - this.runFsa = 0; - if (runAgain) this.fsa(); - }); - } - } - - // Implementation of finite state automaton (FSA) that moves the volume - // through states: pending, degraded, faulted, healthy - trying to preserve - // data on volume "no matter what". - async _fsa () { - if (!this.nexus) { - // nexus does not exist yet - nothing to do - assert.strictEqual(this.state, 'pending'); - return; - } - log.debug(`Volume "${this}" enters FSA in ${this.state} state`); - - if (this.nexus.state === 'NEXUS_OFFLINE') { - // if nexus is not accessible then the information about children is stale - // and we cannot make any reasonable decisions, so bail out. - this._setState('offline'); - return; - } - - // check that replicas are shared as they should be - for (const nodeName in this.replicas) { - const replica = this.replicas[nodeName]; - if (!replica.isOffline()) { - let share; - const local = replica.pool.node === this.nexus.node; - // make sure that replica that is local to the nexus is accessed locally - if (local && replica.share !== 'REPLICA_NONE') { - share = 'REPLICA_NONE'; - } else if (!local && replica.share === 'REPLICA_NONE') { - // make sure that replica that is remote to nexus can be accessed - share = 'REPLICA_NVMF'; - } - if (share) { - try { - await replica.setShare(share); - // fsa will get called again because the replica was modified - return; - } catch (err) { - throw new GrpcError( - GrpcCode.INTERNAL, - `Failed to set share protocol to ${share} for replica "${replica}": ${err}` - ); - } - } - } - } - // pair nexus children with replica objects to get the full picture - var self = this; - const children = this.nexus.children.map((ch) => { - return { - uri: ch.uri, - state: ch.state, - replica: Object.values(self.replicas).find((r) => r.uri === ch.uri) - }; - }); - // add newly found replicas to the nexus (one by one) - const newReplica = Object.values(this.replicas).filter( - (r) => !r.isOffline() && !children.find((ch) => ch.replica === r) - )[0]; - if (newReplica) { - try { - await this.nexus.addReplica(newReplica); - } catch (err) { - log.error(err.toString()); - } - return; - } - - // If there is not a single replica that is online then there is no hope - // that we could rebuild anything. - var onlineCount = children.filter((ch) => ch.state === 'CHILD_ONLINE') - .length; - if (onlineCount === 0) { - this._setState('faulted'); - return; - } - - // If we don't have sufficient number of sound replicas (sound means online - // , under rebuild or pending) then add a new one. - var soundCount = children.filter((ch) => { - return ['CHILD_ONLINE', 'CHILD_DEGRADED'].indexOf(ch.state) >= 0; - }).length; - if (this.replicaCount > soundCount) { - this._setState('degraded'); - // add new replica - try { - await this._createReplicas(this.replicaCount - soundCount); - } catch (err) { - log.error(err.toString()); - } - // The replicas will be added to nexus when the fsa is run next time - // which happens immediately after we exit. - return; - } - - // The condition for later actions is that volume must not be rebuilding or - // waiting for a child add. So check that and return if that's the case. - var rebuildCount = children.filter((ch) => ch.state === 'CHILD_DEGRADED').length; - if (rebuildCount > 0) { - this._setState('degraded'); - return; - } - - assert(onlineCount >= this.replicaCount); - this._setState('healthy'); - - // If we have more online replicas then we need to, then remove one. - // Child that is broken and without a replica is a good fit for removal. - let rmChild = children.find( - (ch) => !ch.replica && ch.state === 'CHILD_FAULTED' - ); - if (!rmChild) { - rmChild = children.find((ch) => ch.state === 'CHILD_FAULTED'); - if (!rmChild) { - // A child that is unknown to us (without replica object) - rmChild = children.find((ch) => !ch.replica); - // If all replicas are online, then continue searching for a candidate - // only if there are more online replicas than it needs to be. - if (!rmChild && onlineCount > this.replicaCount) { - // The replica with the lowest score must go away - const rmReplica = this._prioritizeReplicas( - children.map((ch) => ch.replica) - ).pop(); - if (rmReplica) { - rmChild = children.find((ch) => ch.replica === rmReplica); - } - } - } - } - if (rmChild) { - try { - await this.nexus.removeReplica(rmChild.uri); - } catch (err) { - log.error(err.toString()); - return; - } - if (rmChild.replica) { - try { - await rmChild.replica.destroy(); - } catch (err) { - log.error(err.toString()); - } - } - return; - } - - // If a replica should run on a different node then move it - var moveChild = children.find((ch) => { - if ( - ch.replica && - ch.state === 'CHILD_ONLINE' && - self.requiredNodes.length > 0 && - self.requiredNodes.indexOf(ch.replica.pool.node.name) < 0 - ) { - if (self.requiredNodes.indexOf(ch.replica.pool.node.name) < 0) { - return true; - } - } - return false; - }); - if (moveChild) { - // We add a new replica and the old one will be removed when both are - // online since there will be more of them than needed. We do one by one - // not to trigger too many changes. - try { - await this._createReplicas(1); - } catch (err) { - log.error(err.toString()); - } - } - } - - // Change the volume state to given state. If the state is not the same as - // previous one, we should emit a volume mod event. - // - // TODO: we should emit but we don't because currently we don't have reference - // to the volumes object. Instead we rely that every state transition is - // triggered by another event (i.e. new replica) so the volume operator will - // be notified about the change anyway. It would be nice to fix this when we - // replace our ad-hoc message bus by something better what allows us to store - // the reference to message channel in every volume. - // - // @param {string} newState New state to set on volume. - _setState (newState) { - if (this.state !== newState) { - if (newState === 'healthy') { - log.info(`Volume "${this}" is ${newState}`); - } else { - log.warn(`Volume "${this}" is ${newState}`); - } - this.state = newState; - } - } - - // Create the volume in accordance with requirements specified during the - // object creation. Create whatever component is missing (note that we - // might not be creating it from the scratch). - // - // NOTE: Until we create a nexus at the end, the volume is not acted upon by FSA. - // When "new nexus" event comes in, that moves it from pending state and kicks - // off FSA. Exactly what we want, because the async events produced by this - // function do not interfere with execution of the "create". - async create () { - log.debug(`Creating the volume "${this}"`); - assert(!this.nexus); - - // Ensure there is sufficient number of replicas for the volume. - // TODO: take replica state into account - const newReplicaCount = this.replicaCount - Object.keys(this.replicas).length; - if (newReplicaCount > 0) { - // create more replicas if higher replication factor is desired - await this._createReplicas(newReplicaCount); - } - - // Ensure replicas can be accessed from nexus. Set share protocols. - const replicaSet = await this._ensureReplicaShareProtocols(); - - // If the nexus poped up while we were creating replicas pick it up now. - // Though it's an unsual situation so we log a warning if it happens. - const nexus = this.registry.getNexus(this.uuid); - if (nexus) { - log.warn( - `The nexus "${nexus}" appeared while creating replicas - using it` - ); - this.newNexus(nexus); - return; - } - if (!this.size) { - // the size will be the smallest replica - this.size = Object.values(this.replicas) - .map((r) => r.size) - .reduce((acc, cur) => (cur < acc ? cur : acc), Number.MAX_SAFE_INTEGER); - } - // create a new nexus with children (replicas) created in previous steps - this.nexus = await this._createNexus(replicaSet); - log.info(`Volume "${this}" with size ${this.size} was created`); - } - - // Update child devices of existing nexus or create a new nexus if it does not - // exist. - // - // @param {object[]} replicas Replicas that should be used for child bdevs of nexus. - // @returns {object} Created nexus object. - // - async _createNexus (replicas) { - // create a new nexus - const localReplica = Object.values(this.replicas).find( - (r) => r.share === 'REPLICA_NONE' - ); - if (!localReplica) { - // should not happen but who knows .. - throw new GrpcError( - GrpcCode.INTERNAL, - 'Cannot create nexus if none of the replicas is local' - ); - } - return localReplica.pool.node.createNexus( - this.uuid, - this.size, - Object.values(replicas) - ); - } - - // Adjust replica count for the volume to required count. - // - // TODO: Take into account state of replicas. - // - // @param {number} newCount Number of new replicas to create. - // - async _createReplicas (count) { - let pools = this.registry.choosePools( - this.requiredBytes, - this.requiredNodes, - this.preferredNodes - ); - // remove pools that are already used by existing replicas - const usedNodes = Object.keys(this.replicas); - pools = pools.filter((p) => usedNodes.indexOf(p.node.name) < 0); - if (pools.length < count) { - log.error( - `No suitable pool(s) for volume "${this}" with capacity ` + - `${this.requiredBytes} and replica count ${this.replicaCount}` - ); - throw new GrpcError( - GrpcCode.RESOURCE_EXHAUSTED, - 'Cannot find suitable storage pool(s) for the volume' - ); - } - - // Calculate the size of the volume if not given precisely. - // - // TODO: Size of the smallest pool is a safe choice though too conservative. - if (!this.size) { - this.size = Math.min( - pools.reduce( - (acc, pool) => Math.min(acc, pool.freeBytes()), - Number.MAX_SAFE_INTEGER - ), - this.limitBytes || this.requiredBytes - ); - } - - // We record all failures as we try to create the replica on available - // pools to return them to the user at the end if we ultimately fail. - const errors = []; - // try one pool after another until success - for (let i = 0; i < pools.length && count > 0; i++) { - const pool = pools[i]; - - try { - // this will add the replica to the cache if successful - await pool.createReplica(this.uuid, this.size); - } catch (err) { - log.error(err.message); - errors.push(err.message); - continue; - } - count--; - } - // check if we created enough replicas - if (count > 0) { - let msg = `Failed to create required number of replicas for volume "${this}": `; - msg += errors.join('. '); - throw new GrpcError(GrpcCode.INTERNAL, msg); - } - } - - // Get list of replicas for this volume sorted from the most to the - // least preferred. - // - // @returns {object[]} List of replicas sorted by preference (the most first). - // - _prioritizeReplicas (replicas) { - const self = this; - return Object.values(replicas).sort( - (a, b) => self._scoreReplica(b) - self._scoreReplica(a) - ); - } - - // Assign score to a replica based on certain criteria. The higher the better. - // - // @param {object} replica Replica object. - // @returns {number} Score from 0 to 18. - // - _scoreReplica (replica) { - let score = 0; - const node = replica.pool.node; - - // criteria #1: must be on the required nodes if set - if ( - this.requiredNodes.length > 0 && - this.requiredNodes.indexOf(node.name) >= 0 - ) { - score += 10; - } - // criteria #2: replica should be online - if (!replica.isOffline()) { - score += 5; - } - // criteria #2: would be nice to run on preferred node - if ( - this.preferredNodes.length > 0 && - this.preferredNodes.indexOf(node.name) >= 0 - ) { - score += 2; - } - // criteria #3: local IO from nexus is certainly an advantage - if (this.nexus && node === this.nexus.node) { - score += 1; - } - - // TODO: Score the replica based on the pool parameters. - // I.e. the replica on a less busy pool would have higher score. - return score; - } - - // Share replicas as appropriate to allow access from the nexus and return - // just replicas that should be used for the nexus (excessive replicas will - // be trimmed). - // - // @returns {object[]} Replicas that should be used for nexus sorted by preference. - // - async _ensureReplicaShareProtocols () { - // If nexus does not exist it will be created on the same node as the most - // preferred replica. - const replicaSet = this._prioritizeReplicas(Object.values(this.replicas)); - if (replicaSet.length === 0) { - throw new GrpcError( - GrpcCode.INTERNAL, - `There are no replicas for volume "${this}"` - ); - } - replicaSet.splice(this.replicaCount); - - const nexusNode = this.nexus ? this.nexus.node : replicaSet[0].pool.node; - - for (let i = 0; i < replicaSet.length; i++) { - const replica = replicaSet[i]; - let share; - const local = replica.pool.node === nexusNode; - // make sure that replica which is local to the nexus is accessed locally - if (local && replica.share !== 'REPLICA_NONE') { - share = 'REPLICA_NONE'; - } else if (!local && replica.share === 'REPLICA_NONE') { - // make sure that replica which is remote to nexus can be accessed - share = 'REPLICA_NVMF'; - } - if (share) { - try { - await replica.setShare(share); - } catch (err) { - throw new GrpcError( - GrpcCode.INTERNAL, - `Failed to set share protocol to ${share} for replica "${replica}": ${err}` - ); - } - } - } - return replicaSet; - } - - // Update parameters of the volume. - // - // Throw exception if size of volume is changed in an incompatible way - // (unsupported). - // - // @params {object} spec Volume parameters. - // @params {number} spec.replicaCount Number of desired replicas. - // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. - // @params {string[]} spec.requiredNodes Replicas must be on these nodes. - // @params {number} spec.requiredBytes The volume must have at least this size. - // @params {number} spec.limitBytes The volume should not be bigger than this. - // @params {string} spec.protocol The share protocol for the nexus. - // @returns {boolean} True if the volume spec has changed, false otherwise. - // - update (spec) { - var changed = false; - - if (this.size < spec.requiredBytes) { - throw new GrpcError( - GrpcCode.INVALID_ARGUMENT, - `Extending the volume "${this}" is not supported` - ); - } - if (spec.limitBytes && this.size > spec.limitBytes) { - throw new GrpcError( - GrpcCode.INVALID_ARGUMENT, - `Shrinking the volume "${this}" is not supported` - ); - } - if (this.protocol !== spec.protocol) { - throw new GrpcError( - GrpcCode.INVALID_ARGUMENT, - `Changing the protocol for volume "${this}" is not supported` - ); - } - - if (this.replicaCount !== spec.replicaCount) { - this.replicaCount = spec.replicaCount; - changed = true; - } - const preferredNodes = _.clone(spec.preferredNodes || []).sort(); - if (!_.isEqual(this.preferredNodes, preferredNodes)) { - this.preferredNodes = preferredNodes; - changed = true; - } - const requiredNodes = _.clone(spec.requiredNodes || []).sort(); - if (!_.isEqual(this.requiredNodes, requiredNodes)) { - this.requiredNodes = requiredNodes; - changed = true; - } - if (this.requiredBytes !== spec.requiredBytes) { - this.requiredBytes = spec.requiredBytes; - changed = true; - } - if (this.limitBytes !== spec.limitBytes) { - this.limitBytes = spec.limitBytes; - changed = true; - } - return changed; - } - - // - // Handlers for the events from node registry follow - // - - // Add new replica to the volume. - // - // @param {object} replica New replica object. - newReplica (replica) { - assert(replica.uuid === this.uuid); - const nodeName = replica.pool.node.name; - if (this.replicas[nodeName]) { - log.warn( - `Trying to add the same replica "${replica}" to the volume twice` - ); - } else { - log.debug(`Replica "${replica}" attached to the volume`); - this.replicas[nodeName] = replica; - this.fsa(); - } - } - - // Modify replica in the volume. - // - // @param {object} replica Modified replica object. - modReplica (replica) { - assert.strictEqual(replica.uuid, this.uuid); - const nodeName = replica.pool.node.name; - if (!this.replicas[nodeName]) { - log.warn(`Modified replica "${replica}" does not belong to the volume`); - } else { - assert(this.replicas[nodeName] === replica); - // the share protocol or uri could have changed - this.fsa(); - } - } - - // Delete replica in the volume. - // - // @param {object} replica Deleted replica object. - delReplica (replica) { - assert.strictEqual(replica.uuid, this.uuid); - const nodeName = replica.pool.node.name; - if (!this.replicas[nodeName]) { - log.warn(`Deleted replica "${replica}" does not belong to the volume`); - } else { - log.debug(`Replica "${replica}" detached from the volume`); - assert(this.replicas[nodeName] === replica); - delete this.replicas[nodeName]; - this.fsa(); - } - } - - // Assign nexus to the volume. - // - // @param {object} nexus New nexus object. - newNexus (nexus) { - assert.strictEqual(nexus.uuid, this.uuid); - if (this.nexus) { - log.warn(`Trying to add nexus "${nexus}" to the volume twice`); - } else { - log.debug(`Nexus "${nexus}" attached to the volume`); - assert.strictEqual(this.state, 'pending'); - this.nexus = nexus; - if (!this.size) this.size = nexus.size; - this.fsa(); - } - } - - // Nexus has been modified. - // - // @param {object} nexus Modified nexus object. - modNexus (nexus) { - assert.strictEqual(nexus.uuid, this.uuid); - if (!this.nexus) { - log.warn(`Modified nexus "${nexus}" does not belong to the volume`); - } else { - assert.strictEqual(this.nexus, nexus); - this.fsa(); - } - } - - // Delete nexus in the volume. - // - // @param {object} nexus Deleted nexus object. - delNexus (nexus) { - assert.strictEqual(nexus.uuid, this.uuid); - if (!this.nexus) { - log.warn(`Deleted nexus "${nexus}" does not belong to the volume`); - } else { - log.debug(`Nexus "${nexus}" detached from the volume`); - assert.strictEqual(this.nexus, nexus); - this.nexus = null; - // this brings up back to a starting state. No FSA transitions are - // possible after this point unless we receive new nexus event again. - this._setState('pending'); - } - } -} - -module.exports = Volume; diff --git a/csi/moac/volume.ts b/csi/moac/volume.ts new file mode 100644 index 000000000..af6f25846 --- /dev/null +++ b/csi/moac/volume.ts @@ -0,0 +1,877 @@ +// Volume object abstracts user from volume components nexus and +// replicas and implements algorithms for volume recovery. + +import assert from 'assert'; +import * as _ from 'lodash'; +import { Replica } from './replica'; +import { Child, Nexus, Protocol } from './nexus'; +import { Pool } from './pool'; +import { Node } from './node'; + +const log = require('./logger').Logger('volume'); +const { GrpcCode, GrpcError } = require('./grpc_client'); + +// State of the volume +export enum VolumeState { + Unknown = 'unknown', + Pending = 'pending', + Healthy = 'healthy', + Degraded = 'degraded', + Faulted = 'faulted', + Destroyed = 'destroyed', + Error = 'error', // used by the volume operator +} + +export function volumeStateFromString(val: string): VolumeState { + if (val == VolumeState.Healthy) { + return VolumeState.Healthy; + } else if (val == VolumeState.Degraded) { + return VolumeState.Degraded; + } else if (val == VolumeState.Faulted) { + return VolumeState.Faulted; + } else if (val == VolumeState.Destroyed) { + return VolumeState.Destroyed; + } else if (val == VolumeState.Error) { + return VolumeState.Error; + } else if (val == VolumeState.Pending) { + return VolumeState.Pending; + } else { + return VolumeState.Unknown; + } +} + +// Abstraction of the volume. It is an abstract object which consists of +// physical entities nexus and replicas. It provides high level methods +// for doing operations on the volume as well as recovery algorithms for +// maintaining desired redundancy. +export class Volume { + // volume spec properties + private uuid: string; + private replicaCount: number; + private preferredNodes: string[]; + private requiredNodes: string[]; + private requiredBytes: number; + private limitBytes: number; + private protocol: Protocol; + // volume status properties + private size: number; + private nexus: Nexus | null; + private replicas: Record; // replicas indexed by node name + public state: VolumeState; + private publishedOn: string | undefined; + // internal properties + private emitEvent: (type: string) => void; + private registry: any; + private runFsa: number; // number of requests to run FSA + private nodeBlackList: Record; // replicas on these nodes should be avoided + + // Construct a volume object with given uuid. + // + // @params uuid ID of the volume. + // @params registry Registry object. + // @params emitEvent Callback that should be called anytime volume state changes. + // @params spec Volume parameters. + // @params spec.replicaCount Number of desired replicas. + // @params spec.preferredNodes Nodes to prefer for scheduling replicas. + // @params spec.requiredNodes Replicas must be on these nodes. + // @params spec.requiredBytes The volume must have at least this size. + // @params spec.limitBytes The volume should not be bigger than this. + // @params spec.protocol The share protocol for the nexus. + // @params [size] Current properties of the volume. + // @params [publishedOn] Node name where this volume is published. + // + constructor( + uuid: string, + registry: any, + emitEvent: (type: string) => void, + spec: any, + state?: VolumeState, + size?: number, + publishedOn?: string, + ) { + assert(spec); + // specification of the volume + this.uuid = uuid; + this.registry = registry; + this.replicaCount = spec.replicaCount || 1; + this.preferredNodes = _.clone(spec.preferredNodes || []).sort(); + this.requiredNodes = _.clone(spec.requiredNodes || []).sort(); + this.requiredBytes = spec.requiredBytes; + this.limitBytes = spec.limitBytes; + this.protocol = spec.protocol; + // state variables of the volume + this.size = size || 0; + this.publishedOn = publishedOn; + this.nexus = null; + this.replicas = {}; + this.state = state || VolumeState.Pending; + // other properties + this.runFsa = 0; + this.nodeBlackList = {}; + this.emitEvent = emitEvent; + } + + // Stringify volume + toString(): string { + return this.uuid; + } + + // Get the size of the volume. + getSize(): number { + return this.size; + } + + // Get the node where the volume is accessible from (that is the node with + // the nexus) or undefined when nexus does not exist (unpublished/published). + getNodeName(): string | undefined { + return this.publishedOn; + } + + // Publish the volume. That means, make it accessible through a block device. + // + // @params protocol The nexus share protocol. + // @return uri The URI to access the nexus. + async publish(protocol: Protocol): Promise { + if (this.state !== VolumeState.Degraded && this.state !== VolumeState.Healthy) { + throw new GrpcError( + GrpcCode.INTERNAL, + 'Cannot publish a volume that is neither healthy nor degraded' + ); + } + let nexus = this.nexus; + if (!nexus) { + // Ensure replicas can be accessed from nexus. Set share protocols. + const [nexusNode, replicaSet] = await this._ensureReplicaShareProtocols(); + + if (!this.size) { + // the size will be the smallest replica + this.size = Object.values(this.replicas) + .map((r) => r.size) + .reduce((acc, cur) => (cur < acc ? cur : acc), Number.MAX_SAFE_INTEGER); + } + // create a new nexus with children (replicas) created in previous steps + nexus = await this._createNexus(nexusNode, replicaSet); + } else { + log.debug(`Publishing volume ${this} that already has a nexus`) + } + let uri = nexus.getUri(); + if (!uri) { + uri = await nexus.publish(protocol); + } else { + log.debug(`Publishing volume ${this} that has been already published`) + } + this.publishedOn = nexus.node.name; + log.info(`Published "${this}" at ${uri}`); + this.emitEvent('mod'); + return uri; + } + + // Undo publish operation on the volume. + async unpublish() { + if (this.publishedOn) { + this.publishedOn = undefined; + if (this.nexus) { + // We can directly destroy the nexus without unsharing it first + // but later we will use this block of code in case we cannot + // destroy the nexus immediately because it is rebuilding + //if (this.nexus.getUri()) { + // try { + // await this.nexus.unpublish(); + // } catch (err) { + // log.error(`Defering nexus unpublish for volume ${this}: ${err}`) + // } + //} + try { + // TODO: defer destruction in case that the volume is rebuilding + await this.nexus.destroy(); + } catch (err) { + // We let unpublish to always succeed and rely on FSA to remove + // the nexus later when it's possible to do. + log.error(`Defering nexus destroy for volume ${this}: ${err}`) + } + } + this.emitEvent('mod'); + this.fsa(); + } + } + + // Delete nexus and destroy all replicas of the volume. + async destroy() { + this.publishedOn = undefined; + this._setState(VolumeState.Destroyed); + if (this.nexus) { + await this.nexus.destroy(); + } + const promises = Object.values(this.replicas).map((replica) => + replica.destroy() + ); + await Promise.all(promises); + this.emitEvent('del'); + } + + // Trigger the run of FSA. It will either run immediately or if it is already + // running, it will start again when the current run finishes. + // + // Why critical section on fsa? Certain operations done by fsa are async. If + // we allow another process to enter fsa before the async operation is done + // and the state of volume updated we risk that the second process repeats + // exactly the same action (because from its point of view it hasn't been + // done yet). + fsa() { + if (this.runFsa++ === 0) { + this._fsa().finally(() => { + const runAgain = this.runFsa > 1; + this.runFsa = 0; + if (runAgain) this.fsa(); + }); + } + } + + // Implementation of finite state automaton (FSA) that moves the volume + // through the states: degraded, faulted, healthy, ... - trying to preserve + // data on volume "no matter what". + async _fsa() { + // If the volume is being created, FSA should not interfere. + if (this.state === VolumeState.Pending || this.state === VolumeState.Destroyed) { + return; + } + log.debug(`Volume "${this}" enters FSA in ${this.state} state`); + + if (!this.nexus) { + // if none of the replicas is usable then there is nothing we can do + if (Object.values(this.replicas).filter((r) => !r.isOffline()).length == 0) { + this._setState(VolumeState.Faulted); + return; + } + + // check number of replicas for the volume + const newReplicaCount = this.replicaCount - Object.values(this.replicas).length; + if (newReplicaCount > 0) { + this._setState(VolumeState.Degraded); + try { + await this._createReplicas(newReplicaCount); + } catch (err) { + logError(err); + } + // New replicas will be added to the volume through events. On next fsa + // enter they will be there and we may continue beyound this point then. + return; + } + + if (!this.publishedOn) { + // If the volume hasn't been published we can't do anything more than what + // we have done (that is maintain required # of replicas). When we create + // the nexus, it may find out that some of the replicas are unusable, but + // we don't know that now. + this._setState(VolumeState.Healthy); + return; + } + } + + // check that replicas are shared in the way they should be + let localNode: string = (this.nexus) ? this.nexus.node.name : this.publishedOn; + for (const nodeName in this.replicas) { + const replica = this.replicas[nodeName]; + if (replica.isOffline()) { + continue; + } + let share; + const isLocal = replica.pool!.node.name === localNode; + if (isLocal && replica.share !== 'REPLICA_NONE') { + // make sure that replica that is local to the nexus is accessed locally + share = 'REPLICA_NONE'; + } else if (!isLocal && replica.share === 'REPLICA_NONE') { + // make sure that replica that is remote to nexus can be accessed + share = 'REPLICA_NVMF'; + } + if (share) { + try { + await replica.setShare(share); + delete this.nodeBlackList[nodeName]; + // fsa will get called again because the replica was modified + return; + } catch (err) { + this.nodeBlackList[nodeName] = true; + log.error( + `Failed to set share protocol to ${share} for replica "${replica}": ${err}` + ); + } + } + } + + // If we don't have a nexus and the volume is published, then try to create one + if (!this.nexus) { + assert(this.publishedOn); + let nexusNode = this.registry.getNode(this.publishedOn); + if (nexusNode && nexusNode.isSynced()) { + let replicas = []; + for (let nodeName in this.replicas) { + if (!this.replicas[nodeName].isOffline() && !this.nodeBlackList[nodeName]) { + replicas.push(this.replicas[nodeName]); + } + } + if (replicas.length === 0) { + log.warn(`Cannot create nexus for ${this} because all replicas are bad`); + return; + } + try { + await this._createNexus(nexusNode, replicas); + } catch (err) { + log.error(`Failed to create nexus for ${this} on "${this.publishedOn}"`); + this._setState(VolumeState.Faulted); + } + } else { + log.warn(`Cannot create nexus for ${this} because "${this.publishedOn}" is down`) + this._setState(VolumeState.Faulted); + } + // fsa will get called again when event about created nexus arrives + return; + } + + // pair nexus children with replica objects to get the full picture + const childReplicaPairs: {ch: Child, r: Replica | undefined}[] = this.nexus.children.map((ch) => { + const r = Object.values(this.replicas).find((r) => r.uri === ch.uri); + return {ch, r}; + }); + // add newly found replicas to the nexus (one by one) + const newReplicas = Object.values(this.replicas).filter((r) => { + return (!r.isOffline() && + !childReplicaPairs.find((pair) => pair.r === r) && + !this.nodeBlackList[r.pool!.node!.name]); + }); + for (let i = 0; i < newReplicas.length; i++) { + try { + await this.nexus.addReplica(newReplicas[i]); + return; + } catch (err) { + // XXX what should we do with the replica? Destroy it? + this.nodeBlackList[newReplicas[i].pool!.node!.name] = true; + logError(err); + } + } + + // If there is not a single child that is online then there is no hope + // that we could rebuild anything. + var onlineCount = childReplicaPairs + .filter((pair) => pair.ch.state === 'CHILD_ONLINE') + .length; + if (onlineCount === 0) { + this._setState(VolumeState.Faulted); + return; + } + if (this.nexus.state === 'NEXUS_OFFLINE') { + this._setState(VolumeState.Faulted); + return; + } + + // publish the nexus if it is not and should be + let uri = this.nexus.getUri(); + if (!uri && this.publishedOn) { + try { + uri = await this.nexus.publish(this.protocol); + } catch (err) { + logError(err); + return; + } + } + + // If we don't have sufficient number of sound replicas (sound means online + // or under rebuild) then add a new one. + var soundCount = childReplicaPairs.filter((pair) => { + return ['CHILD_ONLINE', 'CHILD_DEGRADED'].indexOf(pair.ch.state) >= 0; + }).length; + if (this.replicaCount > soundCount) { + this._setState(VolumeState.Degraded); + // add new replica + try { + await this._createReplicas(this.replicaCount - soundCount); + } catch (err) { + logError(err); + } + // The replicas will be added to nexus when the fsa is run next time + // which happens immediately after we exit. + return; + } + + // The condition for later actions is that volume must not be rebuilding or + // waiting for a child add. So check that and return if that's the case. + var rebuildCount = childReplicaPairs + .filter((pair) => pair.ch.state === 'CHILD_DEGRADED') + .length; + if (rebuildCount > 0) { + this._setState(VolumeState.Degraded); + return; + } + + assert(onlineCount >= this.replicaCount); + this._setState(VolumeState.Healthy); + + // If we have more online replicas then we need to, then remove one. + // Child that is broken or without a replica goes first. + let rmPair = childReplicaPairs.find( + (pair) => !pair.r && pair.ch.state === 'CHILD_FAULTED' + ); + if (!rmPair) { + rmPair = childReplicaPairs.find((pair) => pair.ch.state === 'CHILD_FAULTED'); + if (!rmPair) { + // A child that is unknown to us (without replica object) + rmPair = childReplicaPairs.find((pair) => !pair.r); + // If all replicas are online, then continue searching for a candidate + // only if there are more online replicas than it needs to be. + if (!rmPair && onlineCount > this.replicaCount) { + // The replica with the lowest score must go away + const rmReplica = this._prioritizeReplicas( + childReplicaPairs + .map((pair) => pair.r) + .filter((r) => r !== undefined) + ).pop(); + if (rmReplica) { + rmPair = childReplicaPairs.find((pair) => pair.r === rmReplica); + } + } + } + } + if (rmPair) { + try { + await this.nexus.removeReplica(rmPair.ch.uri); + } catch (err) { + logError(err); + return; + } + if (rmPair.r) { + try { + await rmPair.r.destroy(); + } catch (err) { + logError(err); + } + } + return; + } + + // If a replica should run on a different node then move it + var moveChild = childReplicaPairs.find((pair) => { + if ( + pair.r && + pair.ch.state === 'CHILD_ONLINE' && + this.requiredNodes.length > 0 && + this.requiredNodes.indexOf(pair.r.pool!.node.name) < 0 + ) { + if (this.requiredNodes.indexOf(pair.r.pool!.node.name) < 0) { + return true; + } + } + return false; + }); + if (moveChild) { + // We add a new replica and the old one will be removed when both are + // online since there will be more of them than needed. We do one by one + // not to trigger too many changes. + try { + await this._createReplicas(1); + } catch (err) { + logError(err); + } + } + } + + // Change the volume state to given state. If the state is not the same as + // previous one, we should emit a volume mod event. + // + // @param newState New state to set on volume. + _setState(newState: VolumeState) { + if (this.state !== newState) { + if (newState === VolumeState.Healthy || newState === VolumeState.Destroyed) { + log.info(`Volume state of "${this}" is ${newState}`); + } else { + log.warn(`Volume state of "${this}" is ${newState}`); + } + this.state = newState; + this.emitEvent('mod'); + } + } + + // Create the volume in accordance with requirements specified during the + // object creation. Create whatever component is missing (note that we + // might not be creating it from the scratch). + // + // NOTE: Until we switch state from "pending" at the end, the volume is not + // acted upon by FSA. That's exactly what we want, because the async events + // produced by this function do not interfere with execution of the "create". + async create() { + log.debug(`Creating the volume "${this}"`); + + // Ensure there is sufficient number of replicas for the volume. + const newReplicaCount = this.replicaCount - Object.keys(this.replicas).length; + if (newReplicaCount > 0) { + // create more replicas if higher replication factor is desired + await this._createReplicas(newReplicaCount); + } + this._setState(VolumeState.Healthy); + log.info(`Volume "${this}" with ${this.replicaCount} replica(s) and size ${this.size} was created`); + } + + // Update child devices of existing nexus or create a new nexus if it does not + // exist. + // + // @param node Node where the nexus should be created. + // @param replicas Replicas that should be used for child bdevs of nexus. + // @returns Created nexus object. + // + async _createNexus(node: Node, replicas: Replica[]): Promise { + return node.createNexus( + this.uuid, + this.size, + Object.values(replicas) + ); + } + + // Adjust replica count for the volume to required count. + // + // @param count Number of new replicas to create. + // + async _createReplicas(count: number) { + let pools: Pool[] = this.registry.choosePools( + this.requiredBytes, + this.requiredNodes, + this.preferredNodes + ); + // remove pools that are already used by existing replicas + const usedNodes = Object.keys(this.replicas); + pools = pools.filter((p) => usedNodes.indexOf(p.node.name) < 0); + if (pools.length < count) { + log.error( + `No suitable pool(s) for volume "${this}" with capacity ` + + `${this.requiredBytes} and replica count ${this.replicaCount}` + ); + throw new GrpcError( + GrpcCode.RESOURCE_EXHAUSTED, + 'Cannot find suitable storage pool(s) for the volume' + ); + } + + // Calculate the size of the volume if not given precisely. + // + // TODO: Size of the smallest pool is a safe choice though too conservative. + if (!this.size) { + this.size = Math.min( + pools.reduce( + (acc, pool) => Math.min(acc, pool.freeBytes()), + Number.MAX_SAFE_INTEGER + ), + this.limitBytes || this.requiredBytes + ); + } + + // We record all failures as we try to create the replica on available + // pools to return them to the user at the end if we ultimately fail. + const errors = []; + // try one pool after another until success + for (let i = 0; i < pools.length && count > 0; i++) { + const pool = pools[i]; + + try { + // this will add the replica to the cache if successful + await pool.createReplica(this.uuid, this.size); + } catch (err) { + log.error(err.message); + errors.push(err.message); + continue; + } + count--; + } + // check if we created enough replicas + if (count > 0) { + let msg = `Failed to create required number of replicas for volume "${this}": `; + msg += errors.join('. '); + throw new GrpcError(GrpcCode.INTERNAL, msg); + } + } + + // Get list of replicas for this volume sorted from the most to the + // least preferred. + // + // @returns {object[]} List of replicas sorted by preference (the most first). + // + _prioritizeReplicas(replicas: Replica[]): Replica[] { + // Object.values clones the array so that we don't modify the original value + return Object.values(replicas).sort( + (a, b) => this._scoreReplica(b) - this._scoreReplica(a) + ); + } + + // Assign score to a replica based on certain criteria. The higher the better. + // + // @param {object} replica Replica object. + // @returns {number} Score from 0 to 18. + // + _scoreReplica (replica: Replica) { + let score = 0; + const node = replica.pool!.node; + + // criteria #1: must be on the required nodes if set + if ( + this.requiredNodes.length > 0 && + this.requiredNodes.indexOf(node.name) >= 0 + ) { + score += 10; + } + // criteria #2: replica should be online + if (!replica.isOffline()) { + score += 5; + } + // criteria #2: would be nice to run on preferred node + if ( + this.preferredNodes.length > 0 && + this.preferredNodes.indexOf(node.name) >= 0 + ) { + score += 2; + } + // criteria #3: local IO from nexus is certainly an advantage + if (this.nexus && node === this.nexus.node) { + score += 1; + } + + // TODO: Score the replica based on the pool parameters. + // I.e. the replica on a less busy pool would have higher score. + return score; + } + + // Share replicas as appropriate to allow access from the nexus and return + // just replicas that should be used for the nexus (excessive replicas will + // be trimmed). + // + // @returns Node where nexus should be and list of replicas that should be + // used for nexus sorted by preference. + // + async _ensureReplicaShareProtocols(): Promise<[Node, Replica[]]> { + // sort replicas and remove replicas that aren't online + const replicaSet = this + ._prioritizeReplicas(Object.values(this.replicas)) + .filter((r) => !r.isOffline()); + if (replicaSet.length === 0) { + throw new GrpcError( + GrpcCode.INTERNAL, + `There are no replicas for volume "${this}"` + ); + } + replicaSet.splice(this.replicaCount); + + // If nexus does not exist it will be created on the same node as the most + // preferred replica. + const nexusNode = this.nexus ? this.nexus.node : replicaSet[0].pool!.node; + + for (let i = 0; i < replicaSet.length; i++) { + const replica = replicaSet[i]; + let share; + const local = replica.pool!.node === nexusNode; + // make sure that replica which is local to the nexus is accessed locally + if (local && replica.share !== 'REPLICA_NONE') { + share = 'REPLICA_NONE'; + } else if (!local && replica.share === 'REPLICA_NONE') { + // make sure that replica which is remote to nexus can be accessed + share = 'REPLICA_NVMF'; + } + if (share) { + try { + await replica.setShare(share); + } catch (err) { + throw new GrpcError( + GrpcCode.INTERNAL, + `Failed to set share protocol to ${share} for replica "${replica}": ${err}` + ); + } + } + } + return [nexusNode, replicaSet]; + } + + // Update parameters of the volume. + // + // Throw exception if size of volume is changed in an incompatible way + // (unsupported). + // + // @params {object} spec Volume parameters. + // @params {number} spec.replicaCount Number of desired replicas. + // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. + // @params {string[]} spec.requiredNodes Replicas must be on these nodes. + // @params {number} spec.requiredBytes The volume must have at least this size. + // @params {number} spec.limitBytes The volume should not be bigger than this. + // @params {string} spec.protocol The share protocol for the nexus. + // + update(spec: any) { + var changed = false; + + if (this.size < spec.requiredBytes) { + throw new GrpcError( + GrpcCode.INVALID_ARGUMENT, + `Extending the volume "${this}" is not supported` + ); + } + if (spec.limitBytes && this.size > spec.limitBytes) { + throw new GrpcError( + GrpcCode.INVALID_ARGUMENT, + `Shrinking the volume "${this}" is not supported` + ); + } + if (this.protocol !== spec.protocol) { + throw new GrpcError( + GrpcCode.INVALID_ARGUMENT, + `Changing the protocol for volume "${this}" is not supported` + ); + } + + if (this.replicaCount !== spec.replicaCount) { + this.replicaCount = spec.replicaCount; + changed = true; + } + const preferredNodes = _.clone(spec.preferredNodes || []).sort(); + if (!_.isEqual(this.preferredNodes, preferredNodes)) { + this.preferredNodes = preferredNodes; + changed = true; + } + const requiredNodes = _.clone(spec.requiredNodes || []).sort(); + if (!_.isEqual(this.requiredNodes, requiredNodes)) { + this.requiredNodes = requiredNodes; + changed = true; + } + if (this.requiredBytes !== spec.requiredBytes) { + this.requiredBytes = spec.requiredBytes; + changed = true; + } + if (this.limitBytes !== spec.limitBytes) { + this.limitBytes = spec.limitBytes; + changed = true; + } + if (changed) { + this.emitEvent('mod'); + this.fsa(); + } + } + + // + // Handlers for the events from node registry follow + // + + // Add new replica to the volume. + // + // @param {object} replica New replica object. + newReplica(replica: Replica) { + assert.strictEqual(replica.uuid, this.uuid); + const nodeName = replica.pool!.node.name; + if (this.replicas[nodeName]) { + log.warn( + `Trying to add the same replica "${replica}" to the volume twice` + ); + } else { + log.debug(`Replica "${replica}" attached to the volume`); + this.replicas[nodeName] = replica; + this.emitEvent('mod'); + this.fsa(); + } + } + + // Modify replica in the volume. + // + // @param {object} replica Modified replica object. + modReplica(replica: Replica) { + assert.strictEqual(replica.uuid, this.uuid); + const nodeName = replica.pool!.node.name; + if (!this.replicas[nodeName]) { + log.warn(`Modified replica "${replica}" does not belong to the volume`); + } else { + assert(this.replicas[nodeName] === replica); + this.emitEvent('mod'); + // the share protocol or uri could have changed + this.fsa(); + } + } + + // Delete replica in the volume. + // + // @param {object} replica Deleted replica object. + delReplica(replica: Replica) { + assert.strictEqual(replica.uuid, this.uuid); + const nodeName = replica.pool!.node.name; + if (!this.replicas[nodeName]) { + log.warn(`Deleted replica "${replica}" does not belong to the volume`); + } else { + log.debug(`Replica "${replica}" detached from the volume`); + assert(this.replicas[nodeName] === replica); + delete this.replicas[nodeName]; + this.emitEvent('mod'); + this.fsa(); + } + } + + // Assign nexus to the volume. + // + // @param {object} nexus New nexus object. + newNexus(nexus: Nexus) { + assert.strictEqual(nexus.uuid, this.uuid); + if (!this.nexus) { + // If there is no nexus then accept any. This is to support rebuild when + // volume is not published. + log.debug(`Nexus "${nexus}" attached to the volume`); + this.nexus = nexus; + if (!this.size) this.size = nexus.size; + this.emitEvent('mod'); + this.fsa(); + } else if (this.nexus === nexus) { + log.warn(`Trying to add the same nexus "${nexus}" to the volume twice`); + } else if (!this.publishedOn) { + log.warn(`Trying to add another nexus "${nexus}" to unpublished volume`); + nexus.destroy().catch((err) => { + log.error(`Failed to destroy duplicated nexus ${nexus}: ${err}`); + }); + } else if (this.publishedOn === nexus.node?.name) { + log.warn(`Replacing nexus "${this.nexus}" by "${nexus}" in the volume`); + const oldNexus = this.nexus; + this.nexus = nexus; + oldNexus.destroy().catch((err) => { + log.error(`Failed to destroy stale nexus "${oldNexus}": ${err}`); + }); + } else { + log.warn(`Destroying new nexus "${nexus}" on the wrong node`); + nexus.destroy().catch((err) => { + log.error(`Failed to destroy wrong nexus "${nexus}": ${err}`); + }); + } + } + + // Nexus has been modified. + // + // @param {object} nexus Modified nexus object. + modNexus(nexus: Nexus) { + assert.strictEqual(nexus.uuid, this.uuid); + if (!this.nexus) { + log.warn(`Modified nexus "${nexus}" does not belong to the volume`); + } else if (this.nexus === nexus) { + this.emitEvent('mod'); + this.fsa(); + } + } + + // Delete nexus in the volume. + // + // @param {object} nexus Deleted nexus object. + delNexus(nexus: Nexus) { + assert.strictEqual(nexus.uuid, this.uuid); + if (!this.nexus) { + log.warn(`Deleted nexus "${nexus}" does not belong to the volume`); + } else if (this.nexus === nexus) { + log.debug(`Nexus "${nexus}" detached from the volume`); + assert.strictEqual(this.nexus, nexus); + this.emitEvent('mod'); + this.nexus = null; + this.fsa(); + } else { + // if this is a different nexus than ours, ignore it + } + } +} + +// When debugging unexpected errors in try-catch it is easy to modify +// this function to print a stack as well, which is handy. +function logError(err: any) { + log.error(err.toString()); +} \ No newline at end of file diff --git a/csi/moac/volume_operator.ts b/csi/moac/volume_operator.ts index 7d9fb6cf3..8a01a346e 100644 --- a/csi/moac/volume_operator.ts +++ b/csi/moac/volume_operator.ts @@ -55,6 +55,9 @@ import { CustomResourceCache, CustomResourceMeta, } from './watcher'; +import { Protocol, protocolFromString } from './nexus'; +import { Volumes } from './volumes'; +import { VolumeState, volumeStateFromString } from './volume'; const RESOURCE_NAME: string = 'mayastorvolume'; const crdVolume = yaml.safeLoad( @@ -63,55 +66,6 @@ const crdVolume = yaml.safeLoad( // lower-case letters uuid pattern const uuidRegexp = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}$/; -// Protocol used to export nexus (volume) -enum Protocol { - Unknown = 'unknown', - Nbd = 'nbd', - Iscsi = 'iscsi', - Nvmf = 'nvmf', -} - -function protocolFromString(val: string): Protocol { - if (val == Protocol.Nbd) { - return Protocol.Nbd; - } else if (val == Protocol.Iscsi) { - return Protocol.Iscsi; - } else if (val == Protocol.Nvmf) { - return Protocol.Nvmf; - } else { - return Protocol.Unknown; - } -} - -// State of the volume -enum State { - Unknown = 'unknown', - Healthy = 'healthy', - Degraded = 'degraded', - Faulted = 'faulted', - Pending = 'pending', - Offline = 'offline', - Error = 'error', -} - -function stateFromString(val: string): State { - if (val == State.Healthy) { - return State.Healthy; - } else if (val == State.Degraded) { - return State.Degraded; - } else if (val == State.Faulted) { - return State.Faulted; - } else if (val == State.Pending) { - return State.Pending; - } else if (val == State.Offline) { - return State.Offline; - } else if (val == State.Error) { - return State.Error; - } else { - return State.Unknown; - } -} - // Spec part in volume resource type VolumeSpec = { replicaCount: number, @@ -125,9 +79,9 @@ type VolumeSpec = { // Optional status part in volume resource type VolumeStatus = { size: number, - state: State, + state: VolumeState, reason?: string, - node: string, + targetNodes?: string[], // node name of nexus if the volume is published replicas: { node: string, pool: string, @@ -135,6 +89,7 @@ type VolumeStatus = { offline: boolean, }[], nexus?: { + node: string, deviceUri?: string, state: string, children: { @@ -182,14 +137,16 @@ export class VolumeResource extends CustomResource { if (status !== undefined) { this.status = { size: status.size || 0, - state: stateFromString(status.state), - node: status.node, + state: volumeStateFromString(status.state), // sort the replicas according to uri to have deterministic order replicas: [].concat(status.replicas || []).sort((a: any, b: any) => { if (a.uri < b.uri) return -1; else if (a.uri > b.uri) return 1; else return 0; }), + }; + if (status.targetNodes) { + this.status.targetNodes = [].concat(status.targetNodes).sort(); } if (status.nexus) { this.status.nexus = status.nexus; @@ -210,7 +167,7 @@ export class VolumeResource extends CustomResource { // Volume operator managing volume k8s custom resources. export class VolumeOperator { namespace: string; - volumes: any; // Volume manager + volumes: Volumes; // Volume manager eventStream: any; // A stream of node, replica and nexus events. watcher: CustomResourceCache; // volume resource watcher. workq: any; // Events from k8s are serialized so that we don't flood moac by @@ -225,7 +182,7 @@ export class VolumeOperator { constructor ( namespace: string, kubeConfig: KubeConfig, - volumes: any, + volumes: Volumes, idleTimeout: number | undefined, ) { this.namespace = namespace; @@ -318,8 +275,7 @@ export class VolumeOperator { _volumeToStatus (volume: any): VolumeStatus { const st: VolumeStatus = { size: volume.getSize(), - state: stateFromString(volume.state), - node: volume.getNodeName(), + state: volumeStateFromString(volume.state), replicas: Object.values(volume.replicas).map((r: any) => { return { node: r.pool.node.name, @@ -329,8 +285,12 @@ export class VolumeOperator { }; }) }; + if (volume.getNodeName()) { + st.targetNodes = [ volume.getNodeName() ]; + } if (volume.nexus) { st.nexus = { + node: volume.nexus.node.name, deviceUri: volume.nexus.deviceUri || '', state: volume.nexus.state, children: volume.nexus.children.map((ch: any) => { @@ -416,7 +376,7 @@ export class VolumeOperator { } // Set state and reason not touching the other status fields. - async _updateState (uuid: string, state: State, reason: string) { + async _updateState (uuid: string, state: VolumeState, reason: string) { try { await this.watcher.updateStatus(uuid, (orig: VolumeResource) => { if (orig.status?.state === state && orig.status?.reason === reason) { @@ -476,7 +436,7 @@ export class VolumeOperator { watcher.on('del', (obj: VolumeResource) => { // most likely it was not user but us (the operator) who deleted // the resource. So check if it really exists first. - if (this.volumes.get(obj.metadata.name)) { + if (this.volumes.get(obj.metadata.name!)) { this.workq.push(obj.metadata.name, this._destroyVolume.bind(this)); } }); @@ -492,12 +452,12 @@ export class VolumeOperator { log.debug(`Importing volume "${uuid}" in response to "new" resource event`); try { - await this.volumes.importVolume(uuid, resource.spec, resource.status); + this.volumes.importVolume(uuid, resource.spec, resource.status); } catch (err) { log.error( `Failed to import volume "${uuid}" based on new resource: ${err}` ); - await this._updateState(uuid, State.Error, err.toString()); + await this._updateState(uuid, VolumeState.Error, err.toString()); } } @@ -516,12 +476,7 @@ export class VolumeOperator { return; } try { - if (volume.update(resource.spec)) { - log.debug( - `Updating volume "${uuid}" in response to "mod" resource event` - ); - volume.fsa(); - } + volume.update(resource.spec); } catch (err) { log.error(`Failed to update volume "${uuid}" based on resource: ${err}`); } diff --git a/csi/moac/volumes.js b/csi/moac/volumes.ts similarity index 57% rename from csi/moac/volumes.js rename to csi/moac/volumes.ts index 49ffe9a5b..2657fcfdc 100644 --- a/csi/moac/volumes.js +++ b/csi/moac/volumes.ts @@ -1,34 +1,40 @@ // Volume manager implementation. -'use strict'; +import assert from 'assert'; +import { Nexus } from './nexus'; +import { Replica } from './replica'; +import { Volume, VolumeState } from './volume'; const EventEmitter = require('events'); const EventStream = require('./event_stream'); -const Volume = require('./volume'); const { GrpcCode, GrpcError } = require('./grpc_client'); const log = require('./logger').Logger('volumes'); // Volume manager that emit events for new/modified/deleted volumes. -class Volumes extends EventEmitter { - constructor (registry) { +export class Volumes extends EventEmitter { + private registry: any; + private events: any; // stream of events from registry + private volumes: Record; // volumes indexed by uuid + + constructor (registry: any) { super(); this.registry = registry; - this.events = null; // stream of events from registry - this.volumes = {}; // volumes indexed by uuid + this.events = null; + this.volumes = {}; } - start () { - var self = this; + start() { + const self = this; this.events = new EventStream({ registry: this.registry }); - this.events.on('data', async function (ev) { + this.events.on('data', async function (ev: any) { if (ev.kind === 'pool' && ev.eventType === 'new') { // New pool was added and perhaps we have volumes waiting to schedule // their replicas on it. Object.values(self.volumes) - .filter((v) => v.state === 'degraded') + .filter((v) => v.state === VolumeState.Degraded) .forEach((v) => v.fsa()); } else if (ev.kind === 'replica' || ev.kind === 'nexus') { - const uuid = ev.object.uuid; + const uuid: string = ev.object.uuid; const volume = self.volumes[uuid]; if (!volume) { // Ignore events for volumes that do not exist. Those might be events @@ -53,29 +59,33 @@ class Volumes extends EventEmitter { volume.delNexus(ev.object); } } - self.emit('volume', { - eventType: 'mod', - object: volume - }); + } else if (ev.kind === 'node' && ev.object.isSynced()) { + // Create nexus for volumes that should have one on the node + Object.values(self.volumes) + .filter((v) => v.getNodeName() === ev.object.name) + .forEach((v) => v.fsa()); } }); } - stop () { + stop() { this.events.destroy(); this.events.removeAllListeners(); this.events = null; } - // Return a volume with specified uuid or all volumes if called without - // an argument. + // Return a volume with specified uuid. // - // @param {string} uuid ID of the volume. - // @returns {object|object[]} Matching volume (or null if not found) or all volumes. + // @param uuid ID of the volume. + // @returns Matching volume or undefined if not found. // - get (uuid) { - if (uuid) return this.volumes[uuid] || null; - else return Object.values(this.volumes); + get(uuid: string): Volume | undefined { + return this.volumes[uuid]; + } + + // Return all volumes. + list(): Volume[] { + return Object.values(this.volumes); } // Create volume object (just the object) and add it to the internal list @@ -92,7 +102,7 @@ class Volumes extends EventEmitter { // @params {string} spec.protocol The share protocol for the nexus. // @returns {object} New volume object. // - async createVolume (uuid, spec) { + async createVolume(uuid: string, spec: any): Promise { if (!spec.requiredBytes || spec.requiredBytes < 0) { throw new GrpcError( GrpcCode.INVALID_ARGUMENT, @@ -101,16 +111,15 @@ class Volumes extends EventEmitter { } let volume = this.volumes[uuid]; if (volume) { - if (volume.update(spec)) { - // TODO: What to do if the size changes and is incompatible? + volume.update(spec); + } else { + volume = new Volume(uuid, this.registry, (type: string) => { + assert(volume); this.emit('volume', { - eventType: 'mod', + eventType: type, object: volume }); - volume.fsa(); - } - } else { - volume = new Volume(uuid, this.registry, spec); + }, spec); // The volume starts to exist before it is created because we must receive // events for it and we want to show to user that it is being created. this.volumes[uuid] = volume; @@ -119,11 +128,10 @@ class Volumes extends EventEmitter { object: volume }); // check for components that already exist and assign them to the volume - this.registry.getReplicaSet(uuid).forEach((r) => volume.newReplica(r)); - const nexus = this.registry.getNexus(uuid); + this.registry.getReplicaSet(uuid).forEach((r: Replica) => volume.newReplica(r)); + const nexus: Nexus = this.registry.getNexus(uuid); if (nexus) { volume.newNexus(nexus); - return volume; } try { @@ -131,12 +139,14 @@ class Volumes extends EventEmitter { } catch (err) { // undo the pending state delete this.volumes[uuid]; - this.emit('volume', { - eventType: 'del', - object: volume - }); + try { + await volume.destroy(); + } catch (err) { + log.error(`Failed to destroy "${volume}": ${err}`); + } throw err; } + volume.fsa(); } return volume; } @@ -146,70 +156,61 @@ class Volumes extends EventEmitter { // The method is idempotent - if the volume does not exist it does not return // an error. // - // @param {string} uuid ID of the volume. + // @param uuid ID of the volume. // - async destroyVolume (uuid) { + async destroyVolume(uuid: string) { const volume = this.volumes[uuid]; if (!volume) return; await volume.destroy(); delete this.volumes[uuid]; - this.emit('volume', { - eventType: 'del', - object: volume - }); } // Import the volume object (just the object) and add it to the internal list // of volumes. The method is idempotent. If a volume with the same uuid // already exists, then update its parameters. // - // @param {string} uuid ID of the volume. - // @param {object} spec Properties of the volume. - // @params {number} spec.replicaCount Number of desired replicas. - // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. - // @params {string[]} spec.requiredNodes Replicas must be on these nodes. - // @params {number} spec.requiredBytes The volume must have at least this size. - // @params {number} spec.limitBytes The volume should not be bigger than this. - // @params {string} spec.protocol The share protocol for the nexus. - // @params {object} status Current properties of the volume - // @returns {object} New volume object. + // @param {string} uuid ID of the volume. + // @param {object} spec Properties of the volume. + // @params {number} spec.replicaCount Number of desired replicas. + // @params {string[]} spec.preferredNodes Nodes to prefer for scheduling replicas. + // @params {string[]} spec.requiredNodes Replicas must be on these nodes. + // @params {number} spec.requiredBytes The volume must have at least this size. + // @params {number} spec.limitBytes The volume should not be bigger than this. + // @params {string} spec.protocol The share protocol for the nexus. + // @params {object} status Current properties of the volume + // @params {string} status.state Last known state of the volume. + // @params {number} status.size Size of the volume. + // @params {string} status.targetNodes Node(s) where the volume is published. + // @returns {object} New volume object. // - async importVolume (uuid, spec, status) { + importVolume(uuid: string, spec: any, status: any): Volume { let volume = this.volumes[uuid]; if (volume) { - if (volume.update(spec)) { + volume.update(spec); + } else { + // We don't support multiple nexuses yet so take the first one + let publishedOn = (status.targetNodes || []).pop(); + volume = new Volume(uuid, this.registry, (type: string) => { + assert(volume); this.emit('volume', { - eventType: 'mod', + eventType: type, object: volume }); - volume.fsa(); - } - } else { - volume = new Volume(uuid, this.registry, spec, status.size); + }, spec, status.state, status.size, publishedOn); this.volumes[uuid] = volume; // attach any associated replicas to the volume - this.registry.getReplicaSet(uuid).forEach((r) => volume.newReplica(r)); + this.registry.getReplicaSet(uuid).forEach((r: Replica) => volume.newReplica(r)); const nexus = this.registry.getNexus(uuid); if (nexus) { volume.newNexus(nexus); - } else { - // if the nexus still exists then it will get attached eventually - // otherwise, it will not be recreated and the volume will remain - // in an unusable pending state until some other entity recreates it } - - this.emit('volume', { - eventType: 'new', - object: volume - }); + volume._setState(VolumeState.Unknown); volume.fsa(); } return volume; } -} - -module.exports = Volumes; +} \ No newline at end of file diff --git a/csi/moac/watcher.ts b/csi/moac/watcher.ts index b13b54ecf..f70ec8313 100644 --- a/csi/moac/watcher.ts +++ b/csi/moac/watcher.ts @@ -500,7 +500,7 @@ export class CustomResourceCache extends EventEmitter { name, async () => { try { - this.k8sApi.deleteNamespacedCustomObject( + await this.k8sApi.deleteNamespacedCustomObject( GROUP, VERSION, this.namespace, From 91e410208442a7b6ab3c7056eb4aaaf1e7b99511 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Tue, 8 Dec 2020 15:36:46 +0000 Subject: [PATCH 17/85] Fix mayastorvolume.yaml file with CRD definition. This was a regression introduced publish nexus changes. Somehow this part of the change did not make it to develop ... --- csi/moac/crds/mayastorvolume.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/csi/moac/crds/mayastorvolume.yaml b/csi/moac/crds/mayastorvolume.yaml index 280d5e9b2..80ee984bd 100644 --- a/csi/moac/crds/mayastorvolume.yaml +++ b/csi/moac/crds/mayastorvolume.yaml @@ -121,14 +121,12 @@ spec: type: boolean additionalPrinterColumns: - name: Targets - type: array + type: string description: k8s node(s) with storage targets for the volume. jsonPath: .status.targetNodes - items: string - name: Size type: integer format: int64 - minimum: 0 description: Size of the volume jsonPath: .status.size - name: State From e2c90042ef599dd0fa3e941704cce8d14854d599 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Tue, 8 Dec 2020 15:43:28 +0000 Subject: [PATCH 18/85] Let Tom go .. --- .github/auto_assign.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/auto_assign.yml b/.github/auto_assign.yml index 823a7d540..b3639ddcd 100644 --- a/.github/auto_assign.yml +++ b/.github/auto_assign.yml @@ -9,8 +9,8 @@ reviewers: - blaisedias - cjones1024 - jonathan-teh - - tjoshum - jkryl + - gila - paulyoong - chriswldenyer - tiagolobocastro From 851b09b5452a74fc8d91a4f3b246ebadb61b85f2 Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Tue, 8 Dec 2020 18:33:53 +0100 Subject: [PATCH 19/85] Templatize deploy/* using helm --- chart/.helmignore | 23 +++++ chart/Chart.yaml | 6 ++ chart/README.md | 31 ++++++ chart/crds/mayastorpoolcrd.yaml | 1 + chart/templates/_helpers.tpl | 8 ++ chart/templates/csi-daemonset.yaml | 128 ++++++++++++++++++++++++ chart/templates/mayastor-daemonset.yaml | 109 ++++++++++++++++++++ chart/templates/moac-deployment.yaml | 84 ++++++++++++++++ chart/templates/moac-rbac.yaml | 93 +++++++++++++++++ chart/templates/nats-deployment.yaml | 37 +++++++ chart/values.yaml | 2 + deploy/csi-daemonset.yaml | 2 + deploy/mayastor-daemonset.yaml | 2 + deploy/moac-deployment.yaml | 28 +++--- deploy/moac-rbac.yaml | 3 + deploy/namespace.yaml | 16 ++- deploy/nats-deployment.yaml | 28 +++--- scripts/generate-deploy-yamls.sh | 40 ++++++++ 18 files changed, 605 insertions(+), 36 deletions(-) create mode 100644 chart/.helmignore create mode 100644 chart/Chart.yaml create mode 100644 chart/README.md create mode 120000 chart/crds/mayastorpoolcrd.yaml create mode 100644 chart/templates/_helpers.tpl create mode 100644 chart/templates/csi-daemonset.yaml create mode 100644 chart/templates/mayastor-daemonset.yaml create mode 100644 chart/templates/moac-deployment.yaml create mode 100644 chart/templates/moac-rbac.yaml create mode 100644 chart/templates/nats-deployment.yaml create mode 100644 chart/values.yaml create mode 100755 scripts/generate-deploy-yamls.sh diff --git a/chart/.helmignore b/chart/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/chart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/chart/Chart.yaml b/chart/Chart.yaml new file mode 100644 index 000000000..f10a80744 --- /dev/null +++ b/chart/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mayastor +description: Mayastor Helm chart for Kubernetes +type: application +version: 0.0.1 +# appVersion: "latest" diff --git a/chart/README.md b/chart/README.md new file mode 100644 index 000000000..d22a69214 --- /dev/null +++ b/chart/README.md @@ -0,0 +1,31 @@ +# Helm chart for Mayastor + +Helm chart isn't published yet and is used mostly internally to generate yamls in `deploy/` directory and for end2end test. But chart should be deployable from this repo with helm anyway. Command below expects that: + + * you have k8s cluster up and running with [mayastor requirements](https://mayastor.gitbook.io/introduction/quickstart/preparing-the-cluster) fulfilled (take a look at [mayastor-terraform-playground](https://github.com/mayadata-io/mayastor-terraform-playground/) (WARNING - super-pre-alpha) + * kubectl is able to access your cluster without any arguments (i.e. you have cluster configured in config as default or your environment variable KUBECONFIG points to working kubeconfig) + +``` +cd /path/to/openebs/Mayastor +helm install mayastor ./chart --namespace=mayastor --create-namespace +``` + +To uninstall: + +``` +helm uninstall mayastor -n mayastor +kubectl delete namespace mayastor +``` + +# TODO + +[ ] publish :-) + +## templating + +[ ] templatize namespace properly - helm installs to `default` but mayastor hardcodes `mayastor` namespace + - use Release.Namespace + - use Release.Name +[ ] allow pulling image from authenticated repository +[ ] allow changing image versions separately + diff --git a/chart/crds/mayastorpoolcrd.yaml b/chart/crds/mayastorpoolcrd.yaml new file mode 120000 index 000000000..6f163fc5e --- /dev/null +++ b/chart/crds/mayastorpoolcrd.yaml @@ -0,0 +1 @@ +../../csi/moac/crds/mayastorpool.yaml \ No newline at end of file diff --git a/chart/templates/_helpers.tpl b/chart/templates/_helpers.tpl new file mode 100644 index 000000000..296354258 --- /dev/null +++ b/chart/templates/_helpers.tpl @@ -0,0 +1,8 @@ +{{/* Enforce trailing slash to mayastorImagesPrefix or leave empty */}} +{{- define "mayastorImagesPrefix" -}} +{{- if .Values.mayastorImagesRepo }} +{{- printf "%s/" (.Values.mayastorImagesRepo | trimSuffix "/") }} +{{- else }} +{{- "" }} +{{- end }} +{{- end }} diff --git a/chart/templates/csi-daemonset.yaml b/chart/templates/csi-daemonset.yaml new file mode 100644 index 000000000..93a2b0de6 --- /dev/null +++ b/chart/templates/csi-daemonset.yaml @@ -0,0 +1,128 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: mayastor + name: mayastor-csi + labels: + openebs/engine: mayastor +spec: + selector: + matchLabels: + app: mayastor-csi + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: mayastor-csi + spec: + hostNetwork: true + nodeSelector: + kubernetes.io/arch: amd64 + # NOTE: Each container must have mem/cpu limits defined in order to + # belong to Guaranteed QoS class, hence can never get evicted in case of + # pressure unless they exceed those limits. limits and requests must be + # the same. + containers: + - name: mayastor-csi + image: {{ include "mayastorImagesPrefix" . }}mayadata/mayastor-csi:{{ .Values.mayastorImagesTag }} + imagePullPolicy: Always + # we need privileged because we mount filesystems and use mknod + securityContext: + privileged: true + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: RUST_BACKTRACE + value: "1" + args: + - "--csi-socket=/csi/csi.sock" + - "--node-name=$(MY_NODE_NAME)" + - "--grpc-endpoint=$(MY_POD_IP):10199" + - "-v" + volumeMounts: + - name: device + mountPath: /dev + - name: sys + mountPath: /sys + - name: run-udev + mountPath: /run/udev + - name: host-root + mountPath: /host + - name: plugin-dir + mountPath: /csi + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "100m" + memory: "50Mi" + - name: csi-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 + args: + - "--csi-address=/csi/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/mayastor.openebs.io/csi.sock" + lifecycle: + preStop: + exec: + # this is needed in order for CSI to detect that the plugin is gone + command: ["/bin/sh", "-c", "rm -f /registration/io.openebs.csi-mayastor-reg.sock /csi/csi.sock"] + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "100m" + memory: "50Mi" + # Mayastor node plugin gRPC server + ports: + - containerPort: 10199 + protocol: TCP + name: mayastor-node + volumes: + - name: device + hostPath: + path: /dev + type: Directory + - name: sys + hostPath: + path: /sys + type: Directory + - name: run-udev + hostPath: + path: /run/udev + type: Directory + - name: host-root + hostPath: + path: / + type: Directory + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/mayastor.openebs.io/ + type: DirectoryOrCreate + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory diff --git a/chart/templates/mayastor-daemonset.yaml b/chart/templates/mayastor-daemonset.yaml new file mode 100644 index 000000000..aae72087b --- /dev/null +++ b/chart/templates/mayastor-daemonset.yaml @@ -0,0 +1,109 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: mayastor + name: mayastor + labels: + openebs/engine: mayastor +spec: + selector: + matchLabels: + app: mayastor + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: mayastor + spec: + hostNetwork: true + # To resolve services from mayastor namespace + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + openebs.io/engine: mayastor + kubernetes.io/arch: amd64 + initContainers: + - name: message-bus-probe + image: busybox:latest + command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] + containers: + - name: mayastor + image: {{ include "mayastorImagesPrefix" . }}mayadata/mayastor:{{ .Values.mayastorImagesTag }} + imagePullPolicy: Always + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: IMPORT_NEXUSES + value: "false" + args: + # In order to select what cores mayastor should be running on, a mask or a list can be specified. + # For example: -m 0x1 will tell mayastor to only use one core which is equivalent to -l 1 + # Using a mask of 0x3 will use the first 2 cores, which is equivalent to -l 1-2 + # + # The -l argument supports ranges to be able to do the same as passing a mask for example: + # -l 1,2,10-20 means use core 1, 2, 10 to 20 + # + # Note: + # 1. When both -m and -l are specified the -l argument is takes precedence. + # 2. Ensure that the CPU resources are updated accordingly. If you use 2 CPUs, the CPU: field should also read 2. + - "-N$(MY_NODE_NAME)" + - "-g$(MY_POD_IP)" + - "-nnats" + - "-y/var/local/mayastor/config.yaml" + - "-m0x3" + securityContext: + privileged: true + volumeMounts: + - name: device + mountPath: /dev + - name: dshm + mountPath: /dev/shm + - name: configlocation + mountPath: /var/local/mayastor/ + - name: config + mountPath: /var/local/mayastor/config.yaml + resources: + # NOTE: Each container must have mem/cpu limits defined in order to + # belong to Guaranteed QoS class, hence can never get evicted in case of + # pressure unless they exceed those limits. limits and requests must be the same. + limits: + cpu: "2" + memory: "500Mi" + hugepages-2Mi: "1Gi" + requests: + cpu: "2" + memory: "500Mi" + hugepages-2Mi: "1Gi" + ports: + - containerPort: 10124 + protocol: TCP + name: mayastor + volumes: + - name: device + hostPath: + path: /dev + type: Directory + - name: dshm + emptyDir: + medium: Memory + sizeLimit: "1Gi" + - name: hugepage + emptyDir: + medium: HugePages + - name: configlocation + hostPath: + path: /var/local/mayastor/ + type: DirectoryOrCreate + - name: config + hostPath: + path: /var/local/mayastor/config.yaml + type: FileOrCreate diff --git a/chart/templates/moac-deployment.yaml b/chart/templates/moac-deployment.yaml new file mode 100644 index 000000000..d1f2af816 --- /dev/null +++ b/chart/templates/moac-deployment.yaml @@ -0,0 +1,84 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: moac + namespace: mayastor +spec: + replicas: 1 + selector: + matchLabels: + app: moac + template: + metadata: + labels: + app: moac + spec: + serviceAccount: moac + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.6.0 + args: + - "--v=2" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v2.2.0 + args: + - "--v=2" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + - name: moac + image: {{ include "mayastorImagesPrefix" . }}mayadata/moac:{{ .Values.mayastorImagesTag }} + imagePullPolicy: Always + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--namespace=$(MY_POD_NAMESPACE)" + - "--port=4000" + - "--message-bus=nats" + - "-v" + env: + - name: CSI_ENDPOINT + value: /var/lib/csi/sockets/pluginproxy/csi.sock + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + ports: + - containerPort: 4000 + protocol: TCP + name: "rest-api" + volumes: + - name: socket-dir + emptyDir: +--- +kind: Service +apiVersion: v1 +metadata: + name: moac + namespace: mayastor +spec: + selector: + app: moac + ports: + - protocol: TCP + port: 4000 + targetPort: 4000 diff --git a/chart/templates/moac-rbac.yaml b/chart/templates/moac-rbac.yaml new file mode 100644 index 000000000..464799af5 --- /dev/null +++ b/chart/templates/moac-rbac.yaml @@ -0,0 +1,93 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: moac + namespace: mayastor +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: moac +rules: + # must create mayastor crd if it doesn't exist +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create"] + # must read csi plugin info +- apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + # must read/write mayastor node resources +- apiGroups: ["openebs.io"] + resources: ["mayastornodes"] + verbs: ["get", "list", "watch", "update", "create", "delete"] + # must update mayastor node status +- apiGroups: ["openebs.io"] + resources: ["mayastornodes/status"] + verbs: ["update"] + # must read mayastor pools info +- apiGroups: ["openebs.io"] + resources: ["mayastorpools"] + verbs: ["get", "list", "watch", "update", "replace"] + # must update mayastor pools status +- apiGroups: ["openebs.io"] + resources: ["mayastorpools/status"] + verbs: ["update"] + # must read/write mayastor volume resources +- apiGroups: ["openebs.io"] + resources: ["mayastorvolumes"] + verbs: ["get", "list", "watch", "update", "create", "delete"] + # must update mayastor volumes status +- apiGroups: ["openebs.io"] + resources: ["mayastorvolumes/status"] + verbs: ["update"] + + # external provisioner & attacher +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + + # external provisioner +- apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + + # external attacher +- apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: moac +subjects: +- kind: ServiceAccount + name: moac + namespace: mayastor +roleRef: + kind: ClusterRole + name: moac + apiGroup: rbac.authorization.k8s.io diff --git a/chart/templates/nats-deployment.yaml b/chart/templates/nats-deployment.yaml new file mode 100644 index 000000000..a64702a5e --- /dev/null +++ b/chart/templates/nats-deployment.yaml @@ -0,0 +1,37 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nats + namespace: mayastor +spec: + replicas: 1 + selector: + matchLabels: + app: nats + template: + metadata: + labels: + app: nats + spec: + containers: + - name: nats + image: nats:2.1-alpine3.11 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 4222 + protocol: TCP + name: "nats" +--- +kind: Service +apiVersion: v1 +metadata: + name: nats + namespace: mayastor +spec: + selector: + app: nats + ports: + - protocol: TCP + port: 4222 + targetPort: 4222 diff --git a/chart/values.yaml b/chart/values.yaml new file mode 100644 index 000000000..427b5b163 --- /dev/null +++ b/chart/values.yaml @@ -0,0 +1,2 @@ +mayastorImagesTag: latest +mayastorImagesRepo: "" diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index a8c91b687..f7d11ebcd 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -1,3 +1,5 @@ +--- +# Source: mayastor/templates/csi-daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 57a3f413c..1dc62a797 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -1,3 +1,5 @@ +--- +# Source: mayastor/templates/mayastor-daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index 90484d4a2..22e24a1bb 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -1,4 +1,19 @@ --- +# Source: mayastor/templates/moac-deployment.yaml +kind: Service +apiVersion: v1 +metadata: + name: moac + namespace: mayastor +spec: + selector: + app: moac + ports: + - protocol: TCP + port: 4000 + targetPort: 4000 +--- +# Source: mayastor/templates/moac-deployment.yaml kind: Deployment apiVersion: apps/v1 metadata: @@ -69,16 +84,3 @@ spec: volumes: - name: socket-dir emptyDir: ---- -kind: Service -apiVersion: v1 -metadata: - name: moac - namespace: mayastor -spec: - selector: - app: moac - ports: - - protocol: TCP - port: 4000 - targetPort: 4000 diff --git a/deploy/moac-rbac.yaml b/deploy/moac-rbac.yaml index 464799af5..89211347e 100644 --- a/deploy/moac-rbac.yaml +++ b/deploy/moac-rbac.yaml @@ -1,10 +1,12 @@ --- +# Source: mayastor/templates/moac-rbac.yaml apiVersion: v1 kind: ServiceAccount metadata: name: moac namespace: mayastor --- +# Source: mayastor/templates/moac-rbac.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -79,6 +81,7 @@ rules: resources: ["volumeattachments/status"] verbs: ["patch"] --- +# Source: mayastor/templates/moac-rbac.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/deploy/namespace.yaml b/deploy/namespace.yaml index 12551881f..1e426a7eb 100644 --- a/deploy/namespace.yaml +++ b/deploy/namespace.yaml @@ -1,10 +1,6 @@ -{ - "kind": "Namespace", - "apiVersion": "v1", - "metadata": { - "name": "mayastor", - "labels": { - "name": "mayastor" - } - } -} +kind: Namespace +apiVersion: v1 +metadata: + name: mayastor + labels: + name: mayastor diff --git a/deploy/nats-deployment.yaml b/deploy/nats-deployment.yaml index a64702a5e..2e2883ba0 100644 --- a/deploy/nats-deployment.yaml +++ b/deploy/nats-deployment.yaml @@ -1,4 +1,19 @@ --- +# Source: mayastor/templates/nats-deployment.yaml +kind: Service +apiVersion: v1 +metadata: + name: nats + namespace: mayastor +spec: + selector: + app: nats + ports: + - protocol: TCP + port: 4222 + targetPort: 4222 +--- +# Source: mayastor/templates/nats-deployment.yaml kind: Deployment apiVersion: apps/v1 metadata: @@ -22,16 +37,3 @@ spec: - containerPort: 4222 protocol: TCP name: "nats" ---- -kind: Service -apiVersion: v1 -metadata: - name: nats - namespace: mayastor -spec: - selector: - app: nats - ports: - - protocol: TCP - port: 4222 - targetPort: 4222 diff --git a/scripts/generate-deploy-yamls.sh b/scripts/generate-deploy-yamls.sh new file mode 100755 index 000000000..8ac8d1d7a --- /dev/null +++ b/scripts/generate-deploy-yamls.sh @@ -0,0 +1,40 @@ +#! /bin/sh + +set -e + +if [ "x$1" = x ]; then +cat < [] + +Generate (some) deployment YAMLs from the helm chart and store them to deploy/ +in the repo. +EOF + exit 1 +fi +if [ "x$2" = x ]; then + mayastor_images_repo="NONE" +else + mayastor_images_repo="$2" +fi + +set -u + +if ! which helm > /dev/null 2>&1; then + echo "Install helm to path >v3.4.1" + echo "https://github.com/helm/helm/releases/tag/v3.4.1" + exit 1 +fi + +SCRIPTDIR="$(realpath "$(dirname "$0")")" + +tmpd=$(mktemp -d /tmp/generate-deploy.sh.XXXXXXXX) +# shellcheck disable=SC2064 +trap "rm -fr '$tmpd'" HUP QUIT EXIT TERM INT + +if [ "$mayastor_images_repo" = "NONE" ]; then + helm template --set "mayastorImagesTag=$1" mayastor "$SCRIPTDIR/../chart" --output-dir="$tmpd" --namespace mayastor +else + helm template --set "mayastorImagesTag=$1,mayastorImagesRepo=$mayastor_images_repo" mayastor "$SCRIPTDIR/../chart" --output-dir="$tmpd" --namespace mayastor +fi + +mv "$tmpd"/mayastor/templates/*.yaml "$SCRIPTDIR/../deploy/" From e0463ad3516f5fe8ec2401287396540d8571b150 Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Tue, 8 Dec 2020 20:39:03 +0100 Subject: [PATCH 20/85] Use new templatized deployment in e2e tests --- .../deploy/csi-daemonset.yaml.template | 128 ------------------ .../deploy/mayastor-daemonset.yaml.template | 84 ------------ .../deploy/moac-deployment.yaml.template | 84 ------------ mayastor-test/e2e/install/install_test.go | 14 +- mayastor-test/e2e/uninstall/uninstall_test.go | 59 +------- 5 files changed, 14 insertions(+), 355 deletions(-) delete mode 100644 mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template delete mode 100644 mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template delete mode 100644 mayastor-test/e2e/install/deploy/moac-deployment.yaml.template diff --git a/mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template b/mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template deleted file mode 100644 index 71e436db7..000000000 --- a/mayastor-test/e2e/install/deploy/csi-daemonset.yaml.template +++ /dev/null @@ -1,128 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - namespace: mayastor - name: mayastor-csi - labels: - openebs/engine: mayastor -spec: - selector: - matchLabels: - app: mayastor-csi - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - minReadySeconds: 10 - template: - metadata: - labels: - app: mayastor-csi - spec: - hostNetwork: true - nodeSelector: - kubernetes.io/arch: amd64 - # NOTE: Each container must have mem/cpu limits defined in order to - # belong to Guaranteed QoS class, hence can never get evicted in case of - # pressure unless they exceed those limits. limits and requests must be - # the same. - containers: - - name: mayastor-csi - image: ${IMAGE_NAME} - imagePullPolicy: Always - # we need privileged because we mount filesystems and use mknod - securityContext: - privileged: true - env: - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: RUST_BACKTRACE - value: "1" - args: - - "--csi-socket=/csi/csi.sock" - - "--node-name=$(MY_NODE_NAME)" - - "--grpc-endpoint=$(MY_POD_IP):10199" - - "-v" - volumeMounts: - - name: device - mountPath: /dev - - name: sys - mountPath: /sys - - name: run-udev - mountPath: /run/udev - - name: host-root - mountPath: /host - - name: plugin-dir - mountPath: /csi - - name: kubelet-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "100m" - memory: "50Mi" - - name: csi-driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 - args: - - "--csi-address=/csi/csi.sock" - - "--kubelet-registration-path=/var/lib/kubelet/plugins/mayastor.openebs.io/csi.sock" - lifecycle: - preStop: - exec: - # this is needed in order for CSI to detect that the plugin is gone - command: ["/bin/sh", "-c", "rm -f /registration/io.openebs.csi-mayastor-reg.sock /csi/csi.sock"] - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "100m" - memory: "50Mi" - # Mayastor node plugin gRPC server - ports: - - containerPort: 10199 - protocol: TCP - name: mayastor-node - volumes: - - name: device - hostPath: - path: /dev - type: Directory - - name: sys - hostPath: - path: /sys - type: Directory - - name: run-udev - hostPath: - path: /run/udev - type: Directory - - name: host-root - hostPath: - path: / - type: Directory - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/ - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/mayastor.openebs.io/ - type: DirectoryOrCreate - - name: kubelet-dir - hostPath: - path: /var/lib/kubelet - type: Directory diff --git a/mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template b/mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template deleted file mode 100644 index 894d470f6..000000000 --- a/mayastor-test/e2e/install/deploy/mayastor-daemonset.yaml.template +++ /dev/null @@ -1,84 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - namespace: mayastor - name: mayastor - labels: - openebs/engine: mayastor -spec: - selector: - matchLabels: - app: mayastor - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - minReadySeconds: 10 - template: - metadata: - labels: - app: mayastor - spec: - hostNetwork: true - # To resolve services from mayastor namespace - dnsPolicy: ClusterFirstWithHostNet - nodeSelector: - openebs.io/engine: mayastor - kubernetes.io/arch: amd64 - # NOTE: Each container must have mem/cpu limits defined in order to - # belong to Guaranteed QoS class, hence can never get evicted in case of - # pressure unless they exceed those limits. limits and requests must be - # the same. - initContainers: - - name: message-bus-probe - image: busybox:latest - command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] - containers: - - name: mayastor - image: ${IMAGE_NAME} - imagePullPolicy: Always - env: - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - args: - - "-N$(MY_NODE_NAME)" - - "-g$(MY_POD_IP)" - - "-nnats" - securityContext: - privileged: true - volumeMounts: - - name: device - mountPath: /dev - - name: dshm - mountPath: /dev/shm - resources: - limits: - cpu: "1" - memory: "500Mi" - hugepages-2Mi: "1Gi" - requests: - cpu: "1" - memory: "500Mi" - hugepages-2Mi: "1Gi" - ports: - - containerPort: 10124 - protocol: TCP - name: mayastor - volumes: - - name: device - hostPath: - path: /dev - type: Directory - - name: dshm - emptyDir: - medium: Memory - sizeLimit: "1Gi" - - name: hugepage - emptyDir: - medium: HugePages diff --git a/mayastor-test/e2e/install/deploy/moac-deployment.yaml.template b/mayastor-test/e2e/install/deploy/moac-deployment.yaml.template deleted file mode 100644 index 04592dd35..000000000 --- a/mayastor-test/e2e/install/deploy/moac-deployment.yaml.template +++ /dev/null @@ -1,84 +0,0 @@ ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: moac - namespace: mayastor -spec: - replicas: 1 - selector: - matchLabels: - app: moac - template: - metadata: - labels: - app: moac - spec: - serviceAccount: moac - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.6.0 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.2.0 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: moac - image: ${IMAGE_NAME} - imagePullPolicy: Always - args: - - "--csi-address=$(CSI_ENDPOINT)" - - "--namespace=$(MY_POD_NAMESPACE)" - - "--port=4000" - - "--message-bus=nats" - - "-v" - env: - - name: CSI_ENDPOINT - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - ports: - - containerPort: 4000 - protocol: TCP - name: "rest-api" - volumes: - - name: socket-dir - emptyDir: ---- -kind: Service -apiVersion: v1 -metadata: - name: moac - namespace: mayastor -spec: - selector: - app: moac - ports: - - protocol: TCP - port: 4000 - targetPort: 4000 diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go index f35d0f03e..db9ecd02a 100644 --- a/mayastor-test/e2e/install/install_test.go +++ b/mayastor-test/e2e/install/install_test.go @@ -127,6 +127,13 @@ func makeImageName(registryAddress string, imagename string, imageversion string return registryAddress + "/mayadata/" + imagename + ":" + imageversion } +func generateYamls(registryAddress string) { + bashcmd := "../../../scripts/generate-deploy-yamls.sh ci " + registryAddress + cmd := exec.Command("bash", "-c", bashcmd) + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + func applyTemplatedYaml(filename string, imagename string, registryAddress string) { fullimagename := makeImageName(registryAddress, imagename, "ci") bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl apply -f -" @@ -202,9 +209,10 @@ func installMayastor() { applyDeployYaml("moac-rbac.yaml") applyDeployYaml("mayastorpoolcrd.yaml") applyDeployYaml("nats-deployment.yaml") - applyTemplatedYaml("csi-daemonset.yaml.template", "mayastor-csi", registryAddress) - applyTemplatedYaml("moac-deployment.yaml.template", "moac", registryAddress) - applyTemplatedYaml("mayastor-daemonset.yaml.template", "mayastor", registryAddress) + generateYamls(registryAddress) + applyDeployYaml("csi-daemonset.yaml") + applyDeployYaml("moac-deployment") + applyDeployYaml("mayastor-daemonset") // Given the yamls and the environment described in the test readme, // we expect mayastor to be running on exactly 2 nodes. diff --git a/mayastor-test/e2e/uninstall/uninstall_test.go b/mayastor-test/e2e/uninstall/uninstall_test.go index b8e00ac62..b93bce924 100644 --- a/mayastor-test/e2e/uninstall/uninstall_test.go +++ b/mayastor-test/e2e/uninstall/uninstall_test.go @@ -2,7 +2,6 @@ package basic_test import ( "context" - "errors" "fmt" "os/exec" "path" @@ -14,7 +13,6 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" - coreV1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/deprecated/scheme" "k8s.io/client-go/rest" @@ -30,46 +28,6 @@ var k8sClient client.Client var k8sManager ctrl.Manager var testEnv *envtest.Environment -/// Examine the nodes in the k8s cluster and return -/// the IP address of the master node (if one exists), -/// The assumption is that the test-registry is accessible via the IP addr of the master, -/// or any node in the cluster if the master node does not exist -/// TODO Refine how we workout the address of the test-registry -func getRegistryAddress() (string, error) { - var master = "" - nodeList := coreV1.NodeList{} - if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { - return master, errors.New("failed to list nodes") - } - nodeIPs := make([]string, len(nodeList.Items)) - for ix, k8node := range nodeList.Items { - for _, k8Addr := range k8node.Status.Addresses { - if k8Addr.Type == coreV1.NodeInternalIP { - nodeIPs[ix] = k8Addr.Address - for label := range k8node.Labels { - if label == "node-role.kubernetes.io/master" { - master = k8Addr.Address - } - } - } - } - } - - /// TODO Refine how we workout the address of the test-registry - - /// If there is master node, use its IP address as the registry IP address - if len(master) != 0 { - return master, nil - } - - if len(nodeIPs) == 0 { - return "", errors.New("no usable nodes found") - } - - /// Choose the IP address of first node in the list as the registry IP address - return nodeIPs[0], nil -} - // Encapsulate the logic to find where the deploy yamls are func getDeployYamlDir() string { _, filename, _, _ := runtime.Caller(0) @@ -94,15 +52,6 @@ func makeImageName(registryAddress string, registryport string, imagename string return registryAddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion } -func deleteTemplatedYaml(filename string, imagename string, registryAddress string) { - fullimagename := makeImageName(registryAddress, "30291", imagename, "ci") - bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl delete -f -" - cmd := exec.Command("bash", "-c", bashcmd) - cmd.Dir = getTemplateYamlDir() - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - // We expect this to fail a few times before it succeeds, // so no throwing errors from here. func mayastorReadyPodCount() int { @@ -117,11 +66,9 @@ func mayastorReadyPodCount() int { // We deliberately call out to kubectl, rather than constructing the client-go // objects, so that we can verfiy the local deploy yamls are correct. func teardownMayastor() { - registryAddress, err := getRegistryAddress() - Expect(err).ToNot(HaveOccurred()) - deleteTemplatedYaml("mayastor-daemonset.yaml.template", "mayastor", registryAddress) - deleteTemplatedYaml("moac-deployment.yaml.template", "moac", registryAddress) - deleteTemplatedYaml("csi-daemonset.yaml.template", "mayastor-csi", registryAddress) + deleteDeployYaml("mayastor-daemonset.yaml") + deleteDeployYaml("moac-deployment.yaml") + deleteDeployYaml("csi-daemonset.yaml") deleteDeployYaml("nats-deployment.yaml") deleteDeployYaml("mayastorpoolcrd.yaml") deleteDeployYaml("moac-rbac.yaml") From fbba721371ae4bfc2049c73440af1c6c052bcf9d Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Tue, 8 Dec 2020 21:18:33 +0100 Subject: [PATCH 21/85] Fix chart/README.md namespace TODO item --- chart/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chart/README.md b/chart/README.md index d22a69214..f9c8a4730 100644 --- a/chart/README.md +++ b/chart/README.md @@ -23,7 +23,7 @@ kubectl delete namespace mayastor ## templating -[ ] templatize namespace properly - helm installs to `default` but mayastor hardcodes `mayastor` namespace +[ ] templatize namespace properly - mayastor namespace is hardcoded in yaml templates - use Release.Namespace - use Release.Name [ ] allow pulling image from authenticated repository From 6b60b09e24a3869db85e589aeeb92a144df9250b Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Mon, 30 Nov 2020 21:07:37 +0100 Subject: [PATCH 22/85] Connect jenkins to e2e test cluster. Modest goal of this commit is to make jenkins aware of the e2e test cluster without executing any e2e tests for now. A new e2e stage is added and in that stage we build mayastor debug images, upload them to private docker registry and run kubectl get nodes to test that kubeconfig is set correctly. Command line arguments of release.sh have been reworked in order to provide greater flexibility for various use cases. parallelsAlwaysFailFast option was removed from Jenkinsfile because it increases the likelyhood of leaking resources from unclean shutdowns. --- Jenkinsfile | 20 +++-- doc/jenkins.md | 11 ++- mayastor-test/e2e/example-parallel.sh | 4 +- mayastor-test/e2e/example-simple.sh | 2 +- scripts/release.sh | 123 ++++++++++++++++---------- 5 files changed, 97 insertions(+), 63 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index b171d26a8..877958c2d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -49,7 +49,6 @@ pipeline { agent none options { timeout(time: 2, unit: 'HOURS') - parallelsAlwaysFailFast() } triggers { cron(cron_schedule) @@ -134,14 +133,17 @@ pipeline { } } } - stage('dev images') { - agent { label 'nixos-mayastor' } - steps { - sh 'nix-build --no-out-link -A images.mayastor-dev-image' - sh 'nix-build --no-out-link -A images.mayastor-csi-dev-image' - sh 'nix-build --no-out-link -A images.moac-image' - sh 'nix-store --delete /nix/store/*docker-image*' - } + } + } + stage('e2e tests') { + agent { label 'nixos-mayastor' } + steps { + // build images (REGISTRY is set in jenkin's global configuration) + sh "./scripts/release.sh --debug --alias-tag ci --registry ${env.REGISTRY}" + // save space by removing docker images that are never reused + sh 'nix-store --delete /nix/store/*docker-image*' + withCredentials([file(credentialsId: 'kubeconfig', variable: 'KUBECONFIG')]) { + sh 'kubectl get nodes -o wide' } } } diff --git a/doc/jenkins.md b/doc/jenkins.md index 091f70624..12044b27c 100644 --- a/doc/jenkins.md +++ b/doc/jenkins.md @@ -189,7 +189,7 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). users.users.jenkins.openssh.authorizedKeys.keys = [ "ssh-rsa key used by Jenkins master ..." ]; environment.systemPackages = with pkgs; [ - wget curl vim git jdk openiscsi nvme-cli lsof + wget curl vim git jdk openiscsi nvme-cli lsof kubectl ]; } ``` @@ -226,8 +226,13 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). 3. Hardware file is the same as for the master (if needed). -4. Set password for Jenkins user using passwd. You will need it when joining - the slave from Jenkins web UI. +4. Create /etc/docker/daemon.json, replace private registry IP in there and restart the docker daemon: + ``` + { + "insecure-registries" : ["192.168.1.60:5000"] + } + ``` + This will allow the worker node to push docker images to http registry. 5. You can repeat the steps and set as many slaves you want. diff --git a/mayastor-test/e2e/example-parallel.sh b/mayastor-test/e2e/example-parallel.sh index 87454c29c..ea48f5a89 100755 --- a/mayastor-test/e2e/example-parallel.sh +++ b/mayastor-test/e2e/example-parallel.sh @@ -9,14 +9,14 @@ cd "$(dirname ${BASH_SOURCE[0]})" pushd setup ./bringup-cluster.sh & popd -../../scripts/release.sh --skip-publish-to-dockerhub & +../../scripts/release.sh --skip-publish & for job in $(jobs -p); do wait $job done # Now that everything up and built, push the images... -../../scripts/release.sh --skip-publish-to-dockerhub --skip-build --private-registry "172.18.8.101:30291" +../../scripts/release.sh --skip-build --alias-tag "ci" --registry "172.18.8.101:30291" # ... and install mayastor. pushd install diff --git a/mayastor-test/e2e/example-simple.sh b/mayastor-test/e2e/example-simple.sh index 21490acc7..d1a82c0f9 100755 --- a/mayastor-test/e2e/example-simple.sh +++ b/mayastor-test/e2e/example-simple.sh @@ -7,7 +7,7 @@ pushd setup ./bringup-cluster.sh popd -../../scripts/release.sh --private-registry "172.18.8.101:30291" --skip-publish-to-dockerhub +../../scripts/release.sh --registry "172.18.8.101:30291" --alias-tag "ci" pushd install go test diff --git a/scripts/release.sh b/scripts/release.sh index 18d1b479f..7b68d51a4 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -7,10 +7,13 @@ set -euo pipefail -docker_tag_exists() { +# Test if the image already exists in dockerhub +dockerhub_tag_exists() { curl --silent -f -lSL https://index.docker.io/v1/repositories/$1/tags/$2 1>/dev/null 2>&1 } +# Derives tag name from the git repo. That is git tag value or short commit +# hash if there is no git tag on HEAD. get_tag() { vers=`git tag --points-at HEAD` if [ -z "$vers" ]; then @@ -24,14 +27,17 @@ help() { Usage: $(basename $0) [OPTIONS] Options: - -d, --dry-run Output actions that would be taken, but don't run them. - -h, --help Display this text. - --private-registry
Push the built images to the provided registry. - --skip-build Don't perform nix-build. - --skip-publish-to-dockerhub Don't publish to Dockerhub. + -d, --dry-run Output actions that would be taken, but don't run them. + -h, --help Display this text. + --registry Push the built images to the provided registry. + --debug Build debug version of images where possible. + --skip-build Don't perform nix-build. + --skip-publish Don't publish built images. + --image Specify what image to build. + --alias-tag Explicit alias for short commit hash tag. Examples: - $(basename $0) --private-registry 127.0.0.1:5000 + $(basename $0) --registry 127.0.0.1:5000 EOF } @@ -39,13 +45,15 @@ DOCKER="docker" NIX_BUILD="nix-build" RM="rm" SCRIPTDIR=$(dirname "$0") -IMAGES="mayastor mayastor-csi mayastor-client moac" TAG=`get_tag` BRANCH=`git rev-parse --abbrev-ref HEAD` +IMAGES= UPLOAD= -SKIP_PUSH_TO_DOCKERHUB= -PRIVATE_REGISTRY= +SKIP_PUBLISH= SKIP_BUILD= +REGISTRY= +ALIAS= +DEBUG= # Check if all needed tools are installed curl --version >/dev/null @@ -73,17 +81,31 @@ while [ "$#" -gt 0 ]; do exit 0 shift ;; - --private-registry) + --registry) shift - PRIVATE_REGISTRY=$1 + REGISTRY=$1 + shift + ;; + --alias-tag) + shift + ALIAS=$1 + shift + ;; + --image) + shift + IMAGES="$IMAGES $1" shift ;; --skip-build) SKIP_BUILD="yes" shift ;; - --skip-publish-to-dockerhub) - SKIP_PUSH_TO_DOCKERHUB="yes" + --skip-publish) + SKIP_PUBLISH="yes" + shift + ;; + --debug) + DEBUG="yes" shift ;; *) @@ -95,56 +117,61 @@ done cd $SCRIPTDIR/.. +if [ -z "$IMAGES" ]; then + if [ -z "$DEBUG" ]; then + IMAGES="mayastor mayastor-csi mayastor-client moac" + else + IMAGES="mayastor-dev mayastor-csi-dev mayastor-client moac" + fi +fi + for name in $IMAGES; do - image="mayadata/${name}" + image_basename="mayadata/${name}" + image=$image_basename + if [ -n "$REGISTRY" ]; then + image="${REGISTRY}/${image}" + fi + # If we're skipping the build, then we just want to upload + # the images we already have locally. if [ -z $SKIP_BUILD ]; then archive=${name}-image - if docker_tag_exists $image $TAG; then + if [ -z "$REGISTRY" ] && dockerhub_tag_exists $image $TAG; then echo "Skipping $image:$TAG that already exists" - else - echo "Building $image:$TAG ..." - $NIX_BUILD --out-link $archive -A images.$archive - $DOCKER load -i $archive - $RM $archive - UPLOAD="$UPLOAD $image" + continue + fi + echo "Building $image:$TAG ..." + $NIX_BUILD --out-link $archive -A images.$archive + $DOCKER load -i $archive + $RM $archive + if [ "$image" != "$image_basename" ]; then + echo "Renaming $image_basename:$TAG to $image:$TAG" + $DOCKER tag "${image_basename}:$TAG" "$image:$TAG" + $DOCKER image rm "${image_basename}:$TAG" fi - else - # If we're skipping the build, then we just want to upload - # the images we already have locally. - # We should do this for all images. - UPLOAD="$UPLOAD $image" fi + UPLOAD="$UPLOAD $image" done -# Nothing to upload? -[ -z "$UPLOAD" ] && exit 0 - -if [ -z $SKIP_PUSH_TO_DOCKERHUB ]; then +if [ -n "$UPLOAD" ] && [ -z "$SKIP_PUBLISH" ]; then # Upload them for img in $UPLOAD; do echo "Uploading $img:$TAG to registry ..." $DOCKER push $img:$TAG done - # Create aliases - if [ "$BRANCH" == "develop" ]; then - for img in $UPLOAD; do - $DOCKER tag $img:$TAG $img:develop - $DOCKER push $img:develop - done + # Create alias + alias_tag= + if [ -n "$ALIAS" ]; then + alias_tag=$ALIAS + elif [ "$BRANCH" == "develop" ]; then + alias_tag=develop elif [ "$BRANCH" == "master" ]; then + alias_tag=latest + fi + if [ -n "$alias_tag" ]; then for img in $UPLOAD; do - $DOCKER tag $img:$TAG $img:latest - $DOCKER push $img:latest + $DOCKER tag $img:$TAG $img:$alias_tag + $DOCKER push $img:$alias_tag done fi fi - -# If a private registry was specified (ie for ci) -# then we push to it here. -if [ ! -z $PRIVATE_REGISTRY ]; then - for img in $UPLOAD; do - $DOCKER tag $img:$TAG ${PRIVATE_REGISTRY}/$img:ci - $DOCKER push ${PRIVATE_REGISTRY}/$img:ci - done -fi From 99147d92fdeff48afb5b23ad457d323f68e82240 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 7 Dec 2020 17:33:24 +0000 Subject: [PATCH 23/85] Added setup and teardown stages for the disconnect tests. Replaced fixed interval with test+timeout when waiting for the degraded replica. Use custom storage class name. Deploy fio via client-go instead of yaml. Adjust moac node affinity using client-go. --- mayastor-test/e2e/common/util.go | 183 +++++++++++++++++- mayastor-test/e2e/go.sum | 3 + mayastor-test/e2e/node_disconnect/README.md | 26 +-- .../e2e/node_disconnect/deploy/fio_iscsi.yaml | 21 -- .../e2e/node_disconnect/deploy/fio_nvmf.yaml | 21 -- .../deploy/moac-deployment-refuge.yaml | 72 ------- .../deploy/storage-class-2-repl.yaml | 17 -- .../lib/node_disconnect_lib.go | 39 ++-- .../node_disconnect_iscsi_drop_test.go | 6 +- .../node_disconnect_iscsi_reject_test.go | 6 +- .../node_disconnect_nvmf_drop_test.go | 6 +- .../node_disconnect_nvmf_reject_test.go | 7 +- .../node_disconnect_setup_test.go | 118 +++++++++++ .../node_disconnect_teardown_test.go | 71 +++++++ mayastor-test/e2e/node_disconnect/test.sh | 13 +- 15 files changed, 415 insertions(+), 194 deletions(-) delete mode 100644 mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml delete mode 100644 mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml delete mode 100644 mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml delete mode 100644 mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_setup/node_disconnect_setup_test.go create mode 100644 mayastor-test/e2e/node_disconnect/node_disconnect_teardown/node_disconnect_teardown_test.go diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go index 11e128120..7aefac6f5 100644 --- a/mayastor-test/e2e/common/util.go +++ b/mayastor-test/e2e/common/util.go @@ -5,13 +5,18 @@ import ( "errors" "fmt" "os/exec" + "regexp" + "strconv" "strings" + "time" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,13 +44,6 @@ func DeleteDeployYaml(filename string) { Expect(err).ToNot(HaveOccurred()) } -func LabelNode(nodename string, label string) { - cmd := exec.Command("kubectl", "label", "node", nodename, label) - cmd.Dir = "" - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) -} - // Status part of the mayastor volume CRD type MayastorVolStatus struct { State string @@ -393,9 +391,14 @@ func DeletePod(podName string) error { return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) } +func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { + podDef := CreateFioPodDef(podName, volName) + return CreatePod(podDef) +} + /// Create a test fio pod in default namespace, no options and no context /// mayastor volume is mounted on /volume -func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { +func CreateFioPodDef(podName string, volName string) *corev1.Pod { podDef := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -428,7 +431,7 @@ func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { }, }, } - return CreatePod(&podDef) + return &podDef } type NodeLocation struct { @@ -470,3 +473,165 @@ func GetNodeLocs() ([]NodeLocation, error) { } return NodeLocs, nil } + +// create a storage class +func MkStorageClass(scName string, scReplicas int, protocol string, provisioner string) { + createOpts := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: scName, + Namespace: "default", + }, + Provisioner: provisioner, + } + createOpts.Parameters = make(map[string]string) + createOpts.Parameters["protocol"] = protocol + createOpts.Parameters["repl"] = strconv.Itoa(scReplicas) + + ScApi := gTestEnv.KubeInt.StorageV1().StorageClasses + _, createErr := ScApi().Create(context.TODO(), createOpts, metav1.CreateOptions{}) + Expect(createErr).To(BeNil()) +} + +// remove a storage class +func RmStorageClass(scName string) { + ScApi := gTestEnv.KubeInt.StorageV1().StorageClasses + deleteErr := ScApi().Delete(context.TODO(), scName, metav1.DeleteOptions{}) + Expect(deleteErr).To(BeNil()) +} + +// Add a node selector to the given pod definition +func ApplyNodeSelectorToPodObject(pod *corev1.Pod, label string, value string) { + if pod.Spec.NodeSelector == nil { + pod.Spec.NodeSelector = make(map[string]string) + } + pod.Spec.NodeSelector[label] = value +} + +// Add a node selector to the deployment spec and apply +func ApplyNodeSelectorToDeployment(deploymentName string, namespace string, label string, value string) { + depApi := gTestEnv.KubeInt.AppsV1().Deployments + deployment, err := depApi(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + if deployment.Spec.Template.Spec.NodeSelector == nil { + deployment.Spec.Template.Spec.NodeSelector = make(map[string]string) + } + deployment.Spec.Template.Spec.NodeSelector[label] = value + _, err = depApi("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) +} + +// Remove all node selectors from the deployment spec and apply +func RemoveAllNodeSelectorsFromDeployment(deploymentName string, namespace string) { + depApi := gTestEnv.KubeInt.AppsV1().Deployments + deployment, err := depApi(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + if deployment.Spec.Template.Spec.NodeSelector != nil { + deployment.Spec.Template.Spec.NodeSelector = nil + _, err = depApi("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + } + Expect(err).ToNot(HaveOccurred()) +} + +// Adjust the number of replicas in the deployment +func SetDeploymentReplication(deploymentName string, namespace string, replicas *int32) { + depApi := gTestEnv.KubeInt.AppsV1().Deployments + deployment, err := depApi(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + deployment.Spec.Replicas = replicas + _, err = depApi("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) +} + +// TODO remove dependency on kubectl +// label is a string in the form "key=value" +// function still succeeds if label already present +func LabelNode(nodename string, label string) { + cmd := exec.Command("kubectl", "label", "node", nodename, label, "--overwrite=true") + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// TODO remove dependency on kubectl +// function still succeeds if label not present +func UnlabelNode(nodename string, label string) { + cmd := exec.Command("kubectl", "label", "node", nodename, label+"-") + cmd.Dir = "" + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// Wait until all instances of the specified pod are absent from the given node +func WaitForPodAbsentFromNode(podNameRegexp string, namespace string, nodeName string, timeoutSeconds int) error { + var validID = regexp.MustCompile(podNameRegexp) + var podAbsent bool = false + + podApi := gTestEnv.KubeInt.CoreV1().Pods + + for i := 0; i < timeoutSeconds && podAbsent == false; i++ { + podAbsent = true + time.Sleep(time.Second) + podList, err := podApi(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return errors.New("failed to list pods") + } + for _, pod := range podList.Items { + if pod.Spec.NodeName == nodeName { + if validID.MatchString(pod.Name) { + podAbsent = false + break + } + } + } + } + if podAbsent == false { + return errors.New("timed out waiting for pod") + } + return nil +} + +// Wait until the instance of the specified pod is present and in the running +// state on the given node +func WaitForPodRunningOnNode(podNameRegexp string, namespace string, nodeName string, timeoutSeconds int) error { + var validID = regexp.MustCompile(podNameRegexp) + podReady := false + + podApi := gTestEnv.KubeInt.CoreV1().Pods + + for i := 0; i < timeoutSeconds && podReady == false; i++ { + time.Sleep(time.Second) + podList, err := podApi(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return errors.New("failed to list pods") + } + for _, pod := range podList.Items { + if pod.Spec.NodeName == nodeName && pod.Status.Phase == v1.PodRunning { + if validID.MatchString(pod.Name) { + podReady = true + break + } + } + } + } + if podReady == false { + return errors.New("timed out waiting for pod") + } + return nil +} + +// returns true if the pod is present on the given node +func PodPresentOnNode(podNameRegexp string, namespace string, nodeName string) bool { + var validID = regexp.MustCompile(podNameRegexp) + podApi := gTestEnv.KubeInt.CoreV1().Pods + podList, err := podApi(namespace).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + + for _, pod := range podList.Items { + if pod.Spec.NodeName == nodeName { + if validID.MatchString(pod.Name) { + return true + } + } + } + return false +} diff --git a/mayastor-test/e2e/go.sum b/mayastor-test/e2e/go.sum index 5029a9981..72ca0926a 100644 --- a/mayastor-test/e2e/go.sum +++ b/mayastor-test/e2e/go.sum @@ -481,9 +481,12 @@ k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrq k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw= k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= +k8s.io/client-go v1.5.1 h1:XaX/lo2/u3/pmFau8HN+sB5C/b4dc4Dmm2eXjBH4p1E= +k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/mayastor-test/e2e/node_disconnect/README.md b/mayastor-test/e2e/node_disconnect/README.md index a2105a61c..a235bf702 100644 --- a/mayastor-test/e2e/node_disconnect/README.md +++ b/mayastor-test/e2e/node_disconnect/README.md @@ -1,27 +1,9 @@ ## Note The tests in this folder are not currently deployable by the CI system -as the automated install does not provide the pre-requisites below. +as the test assumes a vagrant installation ## Pre-requisites for this test -* A 3-node cluster with nodes k8s-1 k8s-2 and k8s-3 located at - 172.18.8.101-3 respectively -* k8s-1 is the master node, does NOT have the label openebs.io/engine - to avoid having to disconnect the master node. and is labelled - openebs.io/podrefuge=true -* moac is deployed with the following selector to keep it on k8s-1: - -``` - nodeSelector: - openebs.io/podrefuge: "true" -``` - - see deploy/moac-deployment-refuge.yaml` - -* k8s-2 and k8s-3 are labelled openebs.io/engine=mayastor, as usual -* the cluster is deployed using vagrant via bringup_cluster.sh and - KUBESPRAY_REPO is correctly defined in ../common/io_connect_node.sh -* mayastor is installed on the cluster, with mayastor instances on - k8s-2 and k8s-3 only (due to the node labels) -* the storage classes defined in deploy/storage-class-2-repl.yaml - have been applied (replica count of 2). +* A Kubernetes cluster with at least 3 nodes, with mayastor installed. +* The cluster is deployed using vagrant and KUBESPRAY_REPO is correctly + defined in ./lib/io_connect_node.sh diff --git a/mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml b/mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml deleted file mode 100644 index 59b0f92b6..000000000 --- a/mayastor-test/e2e/node_disconnect/deploy/fio_iscsi.yaml +++ /dev/null @@ -1,21 +0,0 @@ -kind: Pod -apiVersion: v1 -metadata: - name: fio -spec: - volumes: - - name: ms-volume - persistentVolumeClaim: - claimName: loss-test-pvc-iscsi - containers: - - name: fio - image: nixery.dev/shell/fio/tini - command: [ "tini", "--" ] - args: - - sleep - - "1000000" - volumeMounts: - - mountPath: "/volume" - name: ms-volume - nodeSelector: - openebs.io/podrefuge: "true" diff --git a/mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml b/mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml deleted file mode 100644 index 69c35a570..000000000 --- a/mayastor-test/e2e/node_disconnect/deploy/fio_nvmf.yaml +++ /dev/null @@ -1,21 +0,0 @@ -kind: Pod -apiVersion: v1 -metadata: - name: fio -spec: - volumes: - - name: ms-volume - persistentVolumeClaim: - claimName: loss-test-pvc-nvmf - containers: - - name: fio - image: nixery.dev/shell/fio/tini - command: [ "tini", "--" ] - args: - - sleep - - "1000000" - volumeMounts: - - mountPath: "/volume" - name: ms-volume - nodeSelector: - openebs.io/podrefuge: "true" diff --git a/mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml b/mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml deleted file mode 100644 index 2aefd59ae..000000000 --- a/mayastor-test/e2e/node_disconnect/deploy/moac-deployment-refuge.yaml +++ /dev/null @@ -1,72 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: moac - namespace: mayastor -spec: - replicas: 1 - selector: - matchLabels: - app: moac - template: - metadata: - labels: - app: moac - spec: - nodeSelector: - openebs.io/podrefuge: "true" - serviceAccount: moac - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.6.0 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.2.0 - args: - - "--v=2" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: moac - image: 172.18.8.101:30291/mayadata/moac:ci - imagePullPolicy: Always - args: - - "--csi-address=$(CSI_ENDPOINT)" - - "--namespace=$(MY_POD_NAMESPACE)" - - "--port=4000" - - "--message-bus=nats" - - "-v" - env: - - name: CSI_ENDPOINT - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - ports: - - containerPort: 4000 - protocol: TCP - name: "rest-api" - volumes: - - name: socket-dir - emptyDir: diff --git a/mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml b/mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml deleted file mode 100644 index 0c827ec72..000000000 --- a/mayastor-test/e2e/node_disconnect/deploy/storage-class-2-repl.yaml +++ /dev/null @@ -1,17 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: mayastor-iscsi -parameters: - repl: '2' - protocol: 'iscsi' -provisioner: io.openebs.csi-mayastor ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: mayastor-nvmf -parameters: - repl: '2' - protocol: 'nvmf' -provisioner: io.openebs.csi-mayastor diff --git a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go index ae7d1f992..9a1795060 100644 --- a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -13,6 +13,7 @@ var ( defTimeoutSecs = "90s" ) +// disconnect a node from the other nodes in the cluster func DisconnectNode(vmname string, otherNodes []string, method string) { for _, targetIP := range otherNodes { cmd := exec.Command("bash", "../lib/io_connect_node.sh", vmname, targetIP, "DISCONNECT", method) @@ -22,6 +23,7 @@ func DisconnectNode(vmname string, otherNodes []string, method string) { } } +// reconnect a node to the other nodes in the cluster func ReconnectNode(vmname string, otherNodes []string, checkError bool, method string) { for _, targetIP := range otherNodes { cmd := exec.Command("bash", "../lib/io_connect_node.sh", vmname, targetIP, "RECONNECT", method) @@ -65,6 +67,7 @@ func GetNodes(uuid string) (string, []string) { return nodeToIsolate, otherAddresses } +// Run fio against the cluster while a replica is being removed and reconnected to the network func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { fmt.Printf("running spawned fio\n") go common.RunFio("fio", 20) @@ -74,16 +77,19 @@ func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod str DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) - fmt.Printf("waiting 60s for disconnection to affect the nexus\n") - time.Sleep(60 * time.Second) + fmt.Printf("waiting up to 90s for disconnection to affect the nexus\n") + Eventually(func() string { + return common.GetMsvState(uuid) + }, + 90*time.Second, // timeout + "1s", // polling interval + ).Should(Equal("degraded")) + + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) fmt.Printf("running fio while node is disconnected\n") common.RunFio("fio", 20) - volumeState := common.GetMsvState(uuid) - fmt.Printf("Volume state is \"%s\"\n", volumeState) - Expect(volumeState == "degraded") - fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) @@ -91,10 +97,11 @@ func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod str common.RunFio("fio", 20) } -func Setup(pvc_name string, storage_class_name string, fio_yaml_path string) string { +// Common steps required when setting up the test +func Setup(pvc_name string, storage_class_name string) string { uuid := common.MkPVC(fmt.Sprintf(pvc_name), storage_class_name) - common.ApplyDeployYaml(fio_yaml_path) + CreateFioOnRefugeNode("fio", pvc_name) fmt.Printf("waiting for fio\n") Eventually(func() bool { @@ -106,11 +113,21 @@ func Setup(pvc_name string, storage_class_name string, fio_yaml_path string) str return uuid } -func Teardown(pvc_name string, storage_class_name string, fio_yaml_path string) { +// Common steps required when tearing down the test +func Teardown(pvcName string, storageClassName string) { fmt.Printf("removing fio pod\n") - common.DeleteDeployYaml(fio_yaml_path) + err := common.DeletePod("fio") + Expect(err).ToNot(HaveOccurred()) fmt.Printf("removing pvc\n") - common.RmPVC(fmt.Sprintf(pvc_name), storage_class_name) + common.RmPVC(fmt.Sprintf(pvcName), storageClassName) +} + +// Deploy an instance of fio on a node labelled as "podrefuge" +func CreateFioOnRefugeNode(podName string, vol_claim_name string) { + podObj := common.CreateFioPodDef(podName, vol_claim_name) + common.ApplyNodeSelectorToPodObject(podObj, "openebs.io/podrefuge", "true") + _, err := common.CreatePod(podObj) + Expect(err).ToNot(HaveOccurred()) } diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go index aaa1bda12..c575fadde 100644 --- a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go @@ -3,7 +3,6 @@ package node_disconnect_iscsi_drop_test import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" - "fmt" "testing" . "github.com/onsi/ginkgo" @@ -39,7 +38,7 @@ var _ = Describe("Mayastor node loss test", func() { var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2") close(done) }, 60) @@ -49,8 +48,7 @@ var _ = AfterSuite(func() { By("tearing down the test environment") // ensure node is reconnected in the event of a test failure - fmt.Printf("reconnecting %s\n", g_nodeToIsolate) disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi-2") common.TeardownTestEnv() }) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go index 10954d2b3..5f9cf4412 100644 --- a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go @@ -3,7 +3,6 @@ package node_disconnect_iscsi_reject_test import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" - "fmt" "testing" . "github.com/onsi/ginkgo" @@ -39,7 +38,7 @@ var _ = Describe("Mayastor node loss test", func() { var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2") close(done) }, 60) @@ -49,8 +48,7 @@ var _ = AfterSuite(func() { By("tearing down the test environment") // ensure node is reconnected in the event of a test failure - fmt.Printf("reconnecting %s\n", g_nodeToIsolate) disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi", "../deploy/fio_iscsi.yaml") + disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi-2") common.TeardownTestEnv() }) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go index a586b226c..c5c49bad5 100644 --- a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go @@ -3,7 +3,6 @@ package node_disconnect_nvmf_drop_test import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" - "fmt" "testing" . "github.com/onsi/ginkgo" @@ -39,7 +38,7 @@ var _ = Describe("Mayastor node loss test", func() { var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") close(done) }, 60) @@ -49,8 +48,7 @@ var _ = AfterSuite(func() { By("tearing down the test environment") // ensure node is reconnected in the event of a test failure - fmt.Printf("reconnecting %s\n", g_nodeToIsolate) disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") common.TeardownTestEnv() }) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go index d4e896e78..ffa55f4ae 100644 --- a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go @@ -3,7 +3,7 @@ package node_disconnect_nvmf_reject_test import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" - "fmt" + "testing" . "github.com/onsi/ginkgo" @@ -39,7 +39,7 @@ var _ = Describe("Mayastor node loss test", func() { var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") close(done) }, 60) @@ -49,8 +49,7 @@ var _ = AfterSuite(func() { By("tearing down the test environment") // ensure node is reconnected in the event of a test failure - fmt.Printf("reconnecting %s\n", g_nodeToIsolate) disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf", "../deploy/fio_nvmf.yaml") + disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") common.TeardownTestEnv() }) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_setup/node_disconnect_setup_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_setup/node_disconnect_setup_test.go new file mode 100644 index 000000000..1632e2ace --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_setup/node_disconnect_setup_test.go @@ -0,0 +1,118 @@ +package node_disconnect_setup_test + +import ( + "e2e-basic/common" + "fmt" + "sort" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +const mayastor_regexp = "^mayastor-.....$" +const moac_regexp = "^moac-..........-.....$" +const namespace = "mayastor" +const timeoutSeconds = 100 + +// Set up for disconnection tests. Ensure moac is on the refuge node but +// no mayastor instances are +func disconnectSetupTest() { + // ensure we are using 2 replicas + common.MkStorageClass("mayastor-iscsi-2", 2, "iscsi", "io.openebs.csi-mayastor") + common.MkStorageClass("mayastor-nvmf-2", 2, "nvmf", "io.openebs.csi-mayastor") + + nodeList, err := common.GetNodeLocs() + Expect(err).ToNot(HaveOccurred()) + Expect(len(nodeList) >= 3) + + // sort the nodes - that also means k8s-1 is the refuge on local clusters + sort.Slice(nodeList, func(i, j int) bool { return nodeList[i].NodeName < nodeList[j].NodeName }) + refugeIndex := 0 + + // Select one node to be the refuge, remove the engine=mayastor label so mayastor does not run there + refugeNode := "" + for i, node := range nodeList { + if i == refugeIndex { + refugeNode = node.NodeName + common.UnlabelNode(refugeNode, "openebs.io/engine") + common.LabelNode(refugeNode, "openebs.io/podrefuge=true") + } + } + Expect(refugeNode != "") + + moacOnRefugeNode := common.PodPresentOnNode(moac_regexp, namespace, refugeNode) + + // Update moac to ensure it stays on the refuge node (even if it currently is) + fmt.Printf("apply moac node selector for node \"%s\"\n", refugeNode) + common.ApplyNodeSelectorToDeployment("moac", namespace, "openebs.io/podrefuge", "true") + + // if not already on the refuge node + if moacOnRefugeNode == false { + fmt.Printf("moving moac to node \"%s\"\n", refugeNode) + // reduce the number of moac instances to be zero + // this seems to be needed to guarantee that moac moves to the refuge node + var repl int32 = 0 + common.SetDeploymentReplication("moac", namespace, &repl) + + // wait for moac to disappear from the cluster + for _, node := range nodeList { + fmt.Printf("waiting for moac absence from %s\n", node.NodeName) + err = common.WaitForPodAbsentFromNode(moac_regexp, namespace, node.NodeName, timeoutSeconds) + Expect(err).ToNot(HaveOccurred()) + } + + // bring the number of moac instances back to 1 + repl = 1 + common.SetDeploymentReplication("moac", namespace, &repl) + + // wait for moac to be running on the refuge node + fmt.Printf("waiting for moac presence on %s\n", refugeNode) + err = common.WaitForPodRunningOnNode(moac_regexp, namespace, refugeNode, timeoutSeconds) + Expect(err).ToNot(HaveOccurred()) + } + + // wait until all mayastor pods are in state "Running" and only on the non-refuge nodes + fmt.Printf("waiting for mayastor absence from %s\n", refugeNode) + err = common.WaitForPodAbsentFromNode(mayastor_regexp, namespace, refugeNode, timeoutSeconds) + Expect(err).ToNot(HaveOccurred()) + + for _, node := range nodeList { + if node.NodeName != refugeNode { + fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) + err = common.WaitForPodRunningOnNode(mayastor_regexp, namespace, node.NodeName, timeoutSeconds) + Expect(err).ToNot(HaveOccurred()) + } + } +} + +func TestNodeLossSetup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss Test Setup") +} + +var _ = Describe("Mayastor disconnect setup", func() { + It("should correctly set up the cluster for disconnection testing", func() { + disconnectSetupTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + common.SetupTestEnv() + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_teardown/node_disconnect_teardown_test.go b/mayastor-test/e2e/node_disconnect/node_disconnect_teardown/node_disconnect_teardown_test.go new file mode 100644 index 000000000..da67c60b8 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/node_disconnect_teardown/node_disconnect_teardown_test.go @@ -0,0 +1,71 @@ +package node_disconnect_teardown_test + +import ( + "e2e-basic/common" + "fmt" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +const mayastor_regexp = "^mayastor-.....$" +const namespace = "mayastor" +const timeoutSeconds = 100 + +func disconnectTeardownTest() { + common.RmStorageClass("mayastor-iscsi-2") + common.RmStorageClass("mayastor-nvmf-2") + + nodeList, err := common.GetNodeLocs() + Expect(err).ToNot(HaveOccurred()) + Expect(len(nodeList) >= 3) + + // apply/remove the labels whether present or not + // An error will not occur if the label is already present/absent + for _, node := range nodeList { + common.LabelNode(node.NodeName, "openebs.io/engine=mayastor") + common.UnlabelNode(node.NodeName, "openebs.io/podrefuge") + } + + fmt.Printf("remove moac node affinity\n") + common.RemoveAllNodeSelectorsFromDeployment("moac", namespace) + + // wait until all nodes have mayastor pods in state "Running" + for _, node := range nodeList { + fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) + err = common.WaitForPodRunningOnNode(mayastor_regexp, namespace, node.NodeName, timeoutSeconds) + Expect(err).ToNot(HaveOccurred()) + } +} + +func TestNodeLossTeardown(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss Test Teardown") +} + +var _ = Describe("Mayastor disconnect setup", func() { + It("should correctly tear down the cluster after disconnection testing", func() { + disconnectTeardownTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + common.SetupTestEnv() + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/test.sh b/mayastor-test/e2e/node_disconnect/test.sh index eeba06c8c..b7a6722f5 100755 --- a/mayastor-test/e2e/node_disconnect/test.sh +++ b/mayastor-test/e2e/node_disconnect/test.sh @@ -3,11 +3,14 @@ set -e timeout=200 -# TODO run setup test here -(cd node_disconnect_nvmf_reject && go test --timeout "${timeout}s") -(cd node_disconnect_iscsi_reject && go test --timeout "${timeout}s") +(cd node_disconnect_setup && go test -timeout "${timeout}s") + +(cd node_disconnect_nvmf_reject && go test -timeout "${timeout}s") +(cd node_disconnect_iscsi_reject && go test -timeout "${timeout}s") # These tests currently fail -# (cd node_disconnect_nvmf_drop && go test --timeout "${timeout}s") -# (cd node_disconnect_iscsi_drop && go test --timeout "${timeout}s") +# (cd node_disconnect_nvmf_drop && go test -timeout "${timeout}s") +# (cd node_disconnect_iscsi_drop && go test -timeout "${timeout}s") + +(cd node_disconnect_teardown && go test -timeout "${timeout}s") From 28164a6459c8a600c3288903e77559eaf593ffd0 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 9 Dec 2020 14:57:15 +0100 Subject: [PATCH 24/85] nexus: log error messages in reconfigure context When we reconfigure channels, we unwrap() as we do not allow/expect that to fail in anyway. Although this is still correct, it can actually failed with for various reasons. We can not determine the actual error so we have to account for this by simply continuing and not unwrapping. --- mayastor/src/bdev/nexus/nexus_channel.rs | 54 ++++++++++++------------ mayastor/src/bdev/nexus/nexus_child.rs | 34 ++++++++++----- mayastor/src/bdev/nexus/nexus_label.rs | 12 +++--- mayastor/src/core/descriptor.rs | 4 ++ mayastor/tests/nexus_label.rs | 5 +-- 5 files changed, 63 insertions(+), 46 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index a1b56e2ea..6e011e375 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -1,6 +1,8 @@ //! //! IO is driven by means of so called channels. -use std::{convert::TryFrom, ffi::c_void}; +use std::{ffi::c_void, ptr::NonNull}; + +use futures::channel::oneshot; use spdk_sys::{ spdk_for_each_channel, @@ -13,11 +15,9 @@ use spdk_sys::{ }; use crate::{ - bdev::{nexus::nexus_child::ChildState, Nexus}, + bdev::{nexus::nexus_child::ChildState, Nexus, Reason}, core::{BdevHandle, Mthread}, }; -use futures::channel::oneshot; -use std::ptr::NonNull; /// io channel, per core #[repr(C)] @@ -125,12 +125,12 @@ impl NexusChannelInner { .iter_mut() .filter(|c| c.state() == ChildState::Open) .for_each(|c| { - self.writers.push( - BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), - ); - self.readers.push( - BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), - ); + if let Ok(hdl) = c.handle() { + self.readers.push(hdl); + } else { + c.set_state(ChildState::Faulted(Reason::CantOpen)); + error!("failed to create handle for {}", c); + } }); // then add write-only children @@ -139,13 +139,14 @@ impl NexusChannelInner { .children .iter_mut() .filter(|c| c.rebuilding()) - .map(|c| { - self.writers.push( - BdevHandle::try_from(c.get_descriptor().unwrap()) - .unwrap(), - ) - }) - .for_each(drop); + .for_each(|c| { + if let Ok(hdl) = c.handle() { + self.writers.push(hdl); + } else { + c.set_state(ChildState::Faulted(Reason::CantOpen)); + error!("failed to create handle for {}", c); + } + }); } trace!( @@ -181,15 +182,16 @@ impl NexusChannel { .children .iter_mut() .filter(|c| c.state() == ChildState::Open) - .map(|c| { - channels.writers.push( - BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), - ); - channels.readers.push( - BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), - ); - }) - .for_each(drop); + .for_each(|c| match (c.handle(), c.handle()) { + (Ok(w), Ok(r)) => { + channels.writers.push(w); + channels.readers.push(r); + } + _ => { + c.set_state(ChildState::Faulted(Reason::CantOpen)); + error!("Failed to get handle for {}, skipping bdev", c) + } + }); ch.inner = Box::into_raw(channels); 0 } diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index d4389635a..40b45911e 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -197,6 +197,9 @@ impl NexusChild { ChildState::Open => { // the child (should) already be open assert_eq!(self.bdev.is_some(), true); + assert_eq!(self.desc.is_some(), true); + info!("called open on an already opened child"); + return Ok(self.name.clone()); } _ => {} } @@ -325,7 +328,7 @@ impl NexusChild { /// return a descriptor to this child pub fn get_descriptor(&self) -> Result, CoreError> { if let Some(ref d) = self.desc { - Ok(d.clone()) + Ok(Arc::clone(d)) } else { Err(CoreError::InvalidDescriptor { name: self.name.clone(), @@ -349,7 +352,7 @@ impl NexusChild { let destroyed = self.destroy().await; // Only wait for bdev removal if the child has been initialised. - // An unintialised child won't have an underlying bdev. + // An uninitialized child won't have an underlying bdev. if self.state.load() != ChildState::Init { self.remove_channel.1.next().await; } @@ -456,20 +459,18 @@ impl NexusChild { /// return reference to child's bdev and a new BdevHandle /// both must be present - otherwise it is considered an error - pub fn get_dev(&self) -> Result<(&Bdev, BdevHandle), ChildError> { + pub(crate) fn get_dev(&self) -> Result<(Bdev, BdevHandle), ChildError> { if !self.is_accessible() { info!("{}: Child is inaccessible: {}", self.parent, self.name); return Err(ChildError::ChildInaccessible {}); } - if let Some(bdev) = &self.bdev { - if let Ok(desc) = self.get_descriptor() { - let hndl = - BdevHandle::try_from(desc).context(HandleCreate {})?; - return Ok((bdev, hndl)); - } - } - Err(ChildError::ChildInvalid {}) + let hdl = self + .handle() + .map_err(|_| ChildError::ChildInaccessible {})?; + let bdev = hdl.get_bdev(); + + Ok((bdev, hdl)) } /// Return the rebuild job which is rebuilding this child, if rebuilding @@ -497,6 +498,17 @@ impl NexusChild { None => None, } } + + pub fn handle(&self) -> Result { + if let Some(desc) = self.desc.as_ref() { + BdevHandle::try_from(Arc::clone(desc)) + } else { + error!("BUG: Child {} does not have valid descriptor", self.name); + Err(CoreError::InvalidDescriptor { + name: self.name.clone(), + }) + } + } } /// Looks up a child based on the underlying bdev name diff --git a/mayastor/src/bdev/nexus/nexus_label.rs b/mayastor/src/bdev/nexus/nexus_label.rs index 9dd02f3a8..52e750246 100644 --- a/mayastor/src/bdev/nexus/nexus_label.rs +++ b/mayastor/src/bdev/nexus/nexus_label.rs @@ -919,10 +919,12 @@ impl NexusLabel { impl NexusChild { /// read and validate this child's label pub async fn probe_label(&self) -> Result { - let (bdev, hndl) = self.get_dev().context(NexusChildError {})?; - let block_size = bdev.block_len() as u64; + let hndl = self.handle().context(ReadError { + name: self.name.clone(), + })?; + let bdev = hndl.get_bdev(); + let block_size = hndl.get_bdev().block_len() as u64; - // // Protective MBR let mut buf = hndl.dma_malloc(block_size).context(ReadAlloc { name: String::from("header"), @@ -1076,10 +1078,10 @@ impl NexusChild { offset: u64, buf: &DmaBuf, ) -> Result { - let (_bdev, hndl) = self.get_dev().context(HandleCreate { + let hdl = self.handle().context(WriteError { name: self.name.clone(), })?; - Ok(hndl.write_at(offset, buf).await.context(WriteError { + Ok(hdl.write_at(offset, buf).await.context(WriteError { name: self.name.clone(), })?) } diff --git a/mayastor/src/core/descriptor.rs b/mayastor/src/core/descriptor.rs index c927bbbc6..67740e4b9 100644 --- a/mayastor/src/core/descriptor.rs +++ b/mayastor/src/core/descriptor.rs @@ -37,6 +37,10 @@ impl Descriptor { pub fn get_channel(&self) -> Option { let ch = unsafe { spdk_bdev_get_io_channel(self.0) }; if ch.is_null() { + error!( + "failed to get IO channel for, probably low on memory! {}", + self.get_bdev().name(), + ); None } else { IoChannel::from_null_checked(ch) diff --git a/mayastor/tests/nexus_label.rs b/mayastor/tests/nexus_label.rs index bed66b53c..efb698484 100644 --- a/mayastor/tests/nexus_label.rs +++ b/mayastor/tests/nexus_label.rs @@ -1,5 +1,4 @@ use std::{ - convert::TryFrom, io::{Cursor, Read, Seek, SeekFrom}, process::Command, }; @@ -10,7 +9,6 @@ use mayastor::{ bdev::{nexus_create, nexus_lookup, GPTHeader, GptEntry}, core::{ mayastor_env_stop, - BdevHandle, DmaBuf, MayastorCliArgs, MayastorEnvironment, @@ -126,8 +124,7 @@ async fn make_nexus() { async fn label_child() { let nexus = nexus_lookup("gpt_nexus").unwrap(); let child = &mut nexus.children[0]; - let desc = child.get_descriptor().unwrap(); - let hdl = BdevHandle::try_from(desc).unwrap(); + let hdl = child.handle().unwrap(); let mut file = std::fs::File::open("./gpt_primary_test_data.bin").unwrap(); let mut buffer = hdl.dma_malloc(34 * 512).unwrap(); From 3c5c1d0a6093bd8e4b44366b2a6fd8b0ddbda596 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 9 Dec 2020 15:05:15 +0100 Subject: [PATCH 25/85] CAS-564: ChildState should remain faulted when retired We erroneously set the child state to closed which prevents the replace logic to kick in. This is a partial fix as the MSV should reflect FAULTED and not DEGRADED. This is will be addressed in follow up fixes. Fixes #568 --- mayastor/src/bdev/nexus/nexus_child.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 40b45911e..0cbdefd8a 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -375,9 +375,7 @@ impl NexusChild { let state = self.state(); match state { - ChildState::Open - | Faulted(Reason::OutOfSync) - | Faulted(Reason::IoError) => { + ChildState::Open | Faulted(Reason::OutOfSync) => { // Change the state of the child to ensure it is taken out of // the I/O path when the nexus is reconfigured. self.set_state(ChildState::Closed) From 461757b813e1c61f047515e08026a6dd4a2dead5 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 9 Dec 2020 16:09:58 +0100 Subject: [PATCH 26/85] nexus: fix error message --- mayastor/src/core/descriptor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mayastor/src/core/descriptor.rs b/mayastor/src/core/descriptor.rs index 67740e4b9..72905d0fe 100644 --- a/mayastor/src/core/descriptor.rs +++ b/mayastor/src/core/descriptor.rs @@ -38,7 +38,7 @@ impl Descriptor { let ch = unsafe { spdk_bdev_get_io_channel(self.0) }; if ch.is_null() { error!( - "failed to get IO channel for, probably low on memory! {}", + "failed to get IO channel for {} probably low on memory!", self.get_bdev().name(), ); None From 9b6845f9853b8429475ccb95c1d3d946580738b7 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Thu, 10 Dec 2020 12:50:29 +0100 Subject: [PATCH 27/85] nexus: ensure proper readers/writers in refresh. Thanks to Paul for finding this. --- mayastor/src/bdev/nexus/nexus_channel.rs | 10 ++++++---- mayastor/tests/rebuild.rs | 22 ++++++++++++++++++++-- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index 6e011e375..a1f0cd724 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -124,10 +124,12 @@ impl NexusChannelInner { .children .iter_mut() .filter(|c| c.state() == ChildState::Open) - .for_each(|c| { - if let Ok(hdl) = c.handle() { - self.readers.push(hdl); - } else { + .for_each(|c| match (c.handle(), c.handle()) { + (Ok(w), Ok(r)) => { + self.writers.push(w); + self.readers.push(r); + } + _ => { c.set_state(ChildState::Faulted(Reason::CantOpen)); error!("failed to create handle for {}", c); } diff --git a/mayastor/tests/rebuild.rs b/mayastor/tests/rebuild.rs index a467bbfb4..c82e3170f 100644 --- a/mayastor/tests/rebuild.rs +++ b/mayastor/tests/rebuild.rs @@ -497,7 +497,26 @@ async fn rebuild_operations() { // Stop rebuild - this will cause the rebuild job to be removed stop_rebuild(nexus_hdl, child2).await; - assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + + let mut ticker = tokio::time::interval(Duration::from_millis(1000)); + let mut number = u32::MAX; + let mut retries = 5; + loop { + ticker.tick().await; + if get_num_rebuilds(nexus_hdl).await == 0 { + number = 0; + break; + } + + retries -= 1; + if retries == 0 { + break; + } + } + + if number != 0 { + panic!("retries failed"); + } } /// Test multiple rebuilds running at the same time. @@ -958,7 +977,6 @@ async fn wait_for_child_state( } false } - /// Returns the state of the nexus with the given uuid. async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> Option { let list = hdl From 965ea68afd7cb352e39b0c02ec9dde459bfd58e1 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Fri, 11 Dec 2020 09:36:17 +0000 Subject: [PATCH 28/85] Add docker compose test for I/O failure The test checks the nexus and child states following an I/O failure. This test case relates to commit d7c2a2eaa9c9cfa3d57e3ffc98a3147349b4fd01. --- mayastor/tests/child_io_error.rs | 207 +++++++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 mayastor/tests/child_io_error.rs diff --git a/mayastor/tests/child_io_error.rs b/mayastor/tests/child_io_error.rs new file mode 100644 index 000000000..3a29d1a2d --- /dev/null +++ b/mayastor/tests/child_io_error.rs @@ -0,0 +1,207 @@ +use composer::{Builder, RpcHandle}; +use crossbeam::channel::{unbounded, Receiver}; +use rpc::mayastor::{ + BdevShareRequest, + BdevUri, + ChildState, + CreateNexusRequest, + CreateReply, + DestroyNexusRequest, + Nexus, + NexusState, + Null, + PublishNexusRequest, + ShareProtocolNexus, +}; +use std::{convert::TryFrom, time::Duration}; + +pub mod common; + +/// Test the states of the nexus and children when an I/O error occurs. +/// A child with a failed I/O is expected to be faulted. +#[tokio::test] +async fn child_io_error() { + let test = Builder::new() + .name("child_io_error") + .network("10.1.0.0/16") + .add_container("ms1") + .add_container("ms2") + .add_container("ms3") + .with_clean(true) + .with_prune(true) + .build() + .await + .unwrap(); + + let nexus_hdl = &mut test.grpc_handle("ms1").await.unwrap(); + let ms2 = &mut test.grpc_handle("ms2").await.unwrap(); + let ms2_share_uri = bdev_create_and_share(ms2).await; + let ms3 = &mut test.grpc_handle("ms3").await.unwrap(); + let ms3_share_uri = bdev_create_and_share(ms3).await; + + const NEXUS_UUID: &str = "00000000-0000-0000-0000-000000000001"; + const NEXUS_SIZE: u64 = 50 * 1024 * 1024; // 50MiB + + // Create a nexus and run fio against it. + let nexus_uri = nexus_create_and_publish( + nexus_hdl, + NEXUS_UUID.into(), + NEXUS_SIZE, + vec![ms2_share_uri.clone(), ms3_share_uri.clone()], + ) + .await; + let nexus_tgt = nvmf_connect(nexus_uri.clone()); + let fio_receiver = run_fio(nexus_tgt, NEXUS_SIZE); + // Let fio run for a bit. + std::thread::sleep(Duration::from_secs(2)); + + // Cause an I/O error by unsharing a child then wait for fio to complete. + bdev_unshare(ms3).await; + let fio_result = fio_receiver.recv().unwrap(); + assert_eq!(fio_result, 0, "Failed to run fio_verify_size"); + + // Check the state of the nexus and children. + assert_eq!( + get_nexus_state(nexus_hdl, &NEXUS_UUID).await, + NexusState::NexusDegraded as i32 + ); + assert_eq!( + get_child_state(nexus_hdl, &NEXUS_UUID, &ms2_share_uri).await, + ChildState::ChildOnline as i32 + ); + assert_eq!( + get_child_state(nexus_hdl, &NEXUS_UUID, &ms3_share_uri).await, + ChildState::ChildFaulted as i32 + ); + + // Teardown. + nvmf_disconnect(nexus_uri); + nexus_hdl + .mayastor + .destroy_nexus(DestroyNexusRequest { + uuid: NEXUS_UUID.into(), + }) + .await + .expect("Failed to destroy nexus"); +} + +/// Create and publish a nexus with the given uuid and size. +/// The nexus is published over NVMf and the nexus uri is returned. +async fn nexus_create_and_publish( + hdl: &mut RpcHandle, + uuid: String, + size: u64, + children: Vec, +) -> String { + hdl.mayastor + .create_nexus(CreateNexusRequest { + uuid: uuid.clone(), + size, + children, + }) + .await + .unwrap(); + hdl.mayastor + .publish_nexus(PublishNexusRequest { + uuid: uuid.clone(), + key: "".into(), + share: ShareProtocolNexus::NexusNvmf as i32, + }) + .await + .unwrap() + .into_inner() + .device_uri +} + +/// Create and share a bdev over NVMf. +async fn bdev_create_and_share(hdl: &mut RpcHandle) -> String { + const DISK_NAME: &str = "disk0"; + hdl.bdev + .create(BdevUri { + uri: format!("malloc:///{}?size_mb=100", DISK_NAME), + }) + .await + .unwrap(); + hdl.bdev + .share(BdevShareRequest { + name: DISK_NAME.into(), + proto: "nvmf".into(), + }) + .await + .unwrap() + .into_inner() + .uri +} + +/// Unshare a bdev. +async fn bdev_unshare(hdl: &mut RpcHandle) { + hdl.bdev + .unshare(CreateReply { + name: "disk0".to_string(), + }) + .await + .unwrap(); +} + +/// Connect to a NVMf target and return the device name. +fn nvmf_connect(uri: String) -> String { + let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); + let devices = target.connect().unwrap(); + devices[0].path.to_string() +} + +// Disconnect from a NVMf target. +fn nvmf_disconnect(uri: String) { + let target = nvmeadm::NvmeTarget::try_from(uri).unwrap(); + target.disconnect().unwrap(); +} + +/// Return the state of the nexus with the given uuid. +async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> i32 { + get_nexus(hdl, uuid).await.state +} + +/// Return the nexus with the given uuid. +async fn get_nexus(hdl: &mut RpcHandle, uuid: &str) -> Nexus { + let nexus_list = hdl + .mayastor + .list_nexus(Null {}) + .await + .unwrap() + .into_inner() + .nexus_list; + let n = nexus_list + .iter() + .filter(|n| n.uuid == uuid) + .collect::>(); + assert_eq!(n.len(), 1); + n[0].clone() +} + +/// Return the state of a child. +async fn get_child_state( + hdl: &mut RpcHandle, + nexus_uuid: &str, + child_uri: &str, +) -> i32 { + let n = get_nexus(hdl, nexus_uuid).await; + let c = n + .children + .iter() + .filter(|c| c.uri == child_uri) + .collect::>(); + assert_eq!(c.len(), 1); + c[0].state +} + +/// Run fio in a spawned thread and return a receiver channel which is signalled +/// when fio completes. +fn run_fio(target: String, target_size: u64) -> Receiver { + let (s, r) = unbounded::(); + std::thread::spawn(move || { + if let Err(e) = s.send(common::fio_verify_size(&target, target_size)) { + tracing::error!("Failed to send fio complete with error {}", e); + } + }); + r +} From 7be912ca6c2193c60266a0fe77fa48586efb986c Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Wed, 9 Dec 2020 08:54:48 +0000 Subject: [PATCH 29/85] Add tests for disconnection with no nexus IO Fixes for the change in MSV spec --- mayastor-test/e2e/common/util.go | 16 +++--- .../node_disconnect_iscsi_drop_test.go | 0 .../node_disconnect_iscsi_reject_test.go | 0 .../node_disconnect_iscsi_reject_idle_test.go | 55 +++++++++++++++++++ .../lib/node_disconnect_lib.go | 29 ++++++++++ .../node_disconnect_nvmf_drop_test.go | 0 .../node_disconnect_nvmf_reject_test.go | 0 .../node_disconnect_nvmf_reject_idle_test.go | 55 +++++++++++++++++++ .../node_disconnect_setup_test.go | 0 .../node_disconnect_teardown_test.go | 0 mayastor-test/e2e/node_disconnect/test.sh | 15 +++-- 11 files changed, 156 insertions(+), 14 deletions(-) rename mayastor-test/e2e/node_disconnect/{node_disconnect_iscsi_drop => iscsi_drop}/node_disconnect_iscsi_drop_test.go (100%) rename mayastor-test/e2e/node_disconnect/{node_disconnect_iscsi_reject => iscsi_reject}/node_disconnect_iscsi_reject_test.go (100%) create mode 100644 mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go rename mayastor-test/e2e/node_disconnect/{node_disconnect_nvmf_drop => nvmf_drop}/node_disconnect_nvmf_drop_test.go (100%) rename mayastor-test/e2e/node_disconnect/{node_disconnect_nvmf_reject => nvmf_reject}/node_disconnect_nvmf_reject_test.go (100%) create mode 100644 mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go rename mayastor-test/e2e/node_disconnect/{node_disconnect_setup => setup}/node_disconnect_setup_test.go (100%) rename mayastor-test/e2e/node_disconnect/{node_disconnect_teardown => teardown}/node_disconnect_teardown_test.go (100%) diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go index 7aefac6f5..8569e7425 100644 --- a/mayastor-test/e2e/common/util.go +++ b/mayastor-test/e2e/common/util.go @@ -46,9 +46,8 @@ func DeleteDeployYaml(filename string) { // Status part of the mayastor volume CRD type MayastorVolStatus struct { - State string - Reason string - Node string + State string + Node string } func GetMSV(uuid string) *MayastorVolStatus { @@ -84,11 +83,12 @@ func GetMSV(uuid string) *MayastorVolStatus { case "state": msVol.State = val.Interface().(string) break - case "reason": - msVol.Reason = val.Interface().(string) - break - case "node": - msVol.Node = val.Interface().(string) + case "nexus": + nexusInt := val.Interface().(map[string]interface{}) + if node, ok := nexusInt["node"].(string); ok { + msVol.Node = node + } + Expect(msVol.Node != "") break } } diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go b/mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_drop/node_disconnect_iscsi_drop_test.go rename to mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go b/mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/node_disconnect_iscsi_reject/node_disconnect_iscsi_reject_test.go rename to mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go diff --git a/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go b/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go new file mode 100644 index 000000000..90553ebe0 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go @@ -0,0 +1,55 @@ +package node_disconnect_iscsi_reject_idle_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "REJECT" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.LossWhenIdleTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss iSCSI reject when idle") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible when no IO is received", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi-2") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go index 9a1795060..0b5d89d07 100644 --- a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -42,6 +42,7 @@ func GetNodes(uuid string) (string, []string) { var nodeToIsolate = "" nexusNode := common.GetMsvNode(uuid) + Expect(nexusNode != "") fmt.Printf("nexus node is \"%s\"\n", nexusNode) var otherAddresses []string @@ -97,6 +98,34 @@ func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod str common.RunFio("fio", 20) } +// Remove the replica without running IO and verify that the volume becomes degraded but is still functional +func LossWhenIdleTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { + fmt.Printf("disconnecting \"%s\"\n", nodeToIsolate) + + DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) + + fmt.Printf("waiting up to 90s for disconnection to affect the nexus\n") + //time.Sleep(90 * time.Second) + + Eventually(func() string { + return common.GetMsvState(uuid) + }, + 90*time.Second, // timeout + "1s", // polling interval + ).Should(Equal("degraded")) + + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + + fmt.Printf("running fio while node is disconnected\n") + common.RunFio("fio", 20) + + fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) + ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) + + fmt.Printf("running fio when node is reconnected\n") + common.RunFio("fio", 20) +} + // Common steps required when setting up the test func Setup(pvc_name string, storage_class_name string) string { uuid := common.MkPVC(fmt.Sprintf(pvc_name), storage_class_name) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go b/mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_drop/node_disconnect_nvmf_drop_test.go rename to mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go b/mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/node_disconnect_nvmf_reject/node_disconnect_nvmf_reject_test.go rename to mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go b/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go new file mode 100644 index 000000000..92a6c3122 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go @@ -0,0 +1,55 @@ +package node_disconnect_nvmf_reject_idle_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "REJECT" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.LossWhenIdleTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss NVMF reject when idle") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify behaviour when a node becomes inaccessible when no IO is received", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_setup/node_disconnect_setup_test.go b/mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/node_disconnect_setup/node_disconnect_setup_test.go rename to mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go diff --git a/mayastor-test/e2e/node_disconnect/node_disconnect_teardown/node_disconnect_teardown_test.go b/mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/node_disconnect_teardown/node_disconnect_teardown_test.go rename to mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go diff --git a/mayastor-test/e2e/node_disconnect/test.sh b/mayastor-test/e2e/node_disconnect/test.sh index b7a6722f5..6b666bdce 100755 --- a/mayastor-test/e2e/node_disconnect/test.sh +++ b/mayastor-test/e2e/node_disconnect/test.sh @@ -3,14 +3,17 @@ set -e timeout=200 -(cd node_disconnect_setup && go test -timeout "${timeout}s") +(cd setup && go test -timeout "${timeout}s") -(cd node_disconnect_nvmf_reject && go test -timeout "${timeout}s") -(cd node_disconnect_iscsi_reject && go test -timeout "${timeout}s") +(cd nvmf_reject && go test -timeout "${timeout}s") +(cd iscsi_reject && go test -timeout "${timeout}s") + +(cd nvmf_reject_idle && go test -timeout "${timeout}s") +(cd iscsi_reject_idle && go test -timeout "${timeout}s") # These tests currently fail -# (cd node_disconnect_nvmf_drop && go test -timeout "${timeout}s") -# (cd node_disconnect_iscsi_drop && go test -timeout "${timeout}s") +# (cd nvmf_drop && go test -timeout "${timeout}s") +# (cd iscsi_drop && go test -timeout "${timeout}s") -(cd node_disconnect_teardown && go test -timeout "${timeout}s") +(cd teardown && go test -timeout "${timeout}s") From db8dbae28d84b160d72d50faf558f78bd87baf63 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Mon, 14 Dec 2020 11:38:42 +0000 Subject: [PATCH 30/85] Allow Mayastor container to access udev database Mayastor requires access to the udev database that resides in /run/udev in order for the ListBlockDevices() gRPC request to function correctly. This access must be provided via an explicit volume mount when Mayastor is run from within a container. Without such access, the gRPC call returns partial and (highly misleading) incorrect information. cjones1024 carried out the investigation and made the original changes. I have moved the changes from their original place in deploy/mayastor-daemonset.yaml to chart/templates/mayastor-daemonset.yaml as this is now used to construct the YAML file in the deploy directory. --- chart/templates/mayastor-daemonset.yaml | 6 ++++++ deploy/mayastor-daemonset.yaml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/chart/templates/mayastor-daemonset.yaml b/chart/templates/mayastor-daemonset.yaml index aae72087b..9ee09fbe3 100644 --- a/chart/templates/mayastor-daemonset.yaml +++ b/chart/templates/mayastor-daemonset.yaml @@ -65,6 +65,8 @@ spec: volumeMounts: - name: device mountPath: /dev + - name: udev + mountPath: /run/udev - name: dshm mountPath: /dev/shm - name: configlocation @@ -92,6 +94,10 @@ spec: hostPath: path: /dev type: Directory + - name: udev + hostPath: + path: /run/udev + type: Directory - name: dshm emptyDir: medium: Memory diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index 1dc62a797..dc4f54484 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -67,6 +67,8 @@ spec: volumeMounts: - name: device mountPath: /dev + - name: udev + mountPath: /run/udev - name: dshm mountPath: /dev/shm - name: configlocation @@ -94,6 +96,10 @@ spec: hostPath: path: /dev type: Directory + - name: udev + hostPath: + path: /run/udev + type: Directory - name: dshm emptyDir: medium: Memory From f4d1d48f19c50d61165376fba7e43da2fc25367a Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Fri, 11 Dec 2020 14:29:07 +0000 Subject: [PATCH 31/85] Add support for testing on volumes with more than 3 nodes. Ensure node to isolate is one hosting a replica for the volume. Use "test -c" to only compile disabled tests. Better test descriptions. Check msv is fully populated. --- mayastor-test/e2e/common/util.go | 38 ++++++++++++++----- .../node_disconnect_iscsi_drop_test.go | 2 +- .../node_disconnect_iscsi_reject_test.go | 2 +- .../node_disconnect_iscsi_reject_idle_test.go | 2 +- .../lib/node_disconnect_lib.go | 27 +++++++------ .../node_disconnect_nvmf_drop_test.go | 2 +- .../node_disconnect_nvmf_reject_test.go | 2 +- .../node_disconnect_nvmf_reject_idle_test.go | 2 +- .../setup/node_disconnect_setup_test.go | 4 +- .../teardown/node_disconnect_teardown_test.go | 2 +- mayastor-test/e2e/node_disconnect/test.sh | 6 +-- 11 files changed, 54 insertions(+), 35 deletions(-) diff --git a/mayastor-test/e2e/common/util.go b/mayastor-test/e2e/common/util.go index 8569e7425..5b9ea7cf3 100644 --- a/mayastor-test/e2e/common/util.go +++ b/mayastor-test/e2e/common/util.go @@ -46,8 +46,9 @@ func DeleteDeployYaml(filename string) { // Status part of the mayastor volume CRD type MayastorVolStatus struct { - State string - Node string + State string + Node string + Replicas []string } func GetMSV(uuid string) *MayastorVolStatus { @@ -74,6 +75,9 @@ func GetMSV(uuid string) *MayastorVolStatus { return nil } msVol := MayastorVolStatus{} + + msVol.Replicas = make([]string, 0, 4) + v := reflect.ValueOf(status) if v.Kind() == reflect.Map { for _, key := range v.MapKeys() { @@ -82,18 +86,33 @@ func GetMSV(uuid string) *MayastorVolStatus { switch sKey { case "state": msVol.State = val.Interface().(string) - break case "nexus": nexusInt := val.Interface().(map[string]interface{}) if node, ok := nexusInt["node"].(string); ok { msVol.Node = node } - Expect(msVol.Node != "") - break + case "replicas": + replicas := val.Interface().([]interface{}) + for _, replica := range replicas { + replicaMap := reflect.ValueOf(replica) + if replicaMap.Kind() == reflect.Map { + for _, field := range replicaMap.MapKeys() { + switch field.Interface().(string) { + case "node": + value := replicaMap.MapIndex(field) + msVol.Replicas = append(msVol.Replicas, value.Interface().(string)) + } + } + } + } } } + // Note: msVol.Node can be unassigned here if the volume is not mounted + Expect(msVol.State).NotTo(Equal("")) + Expect(len(msVol.Replicas)).To(BeNumerically(">", 0)) + return &msVol } - return &msVol + return nil } // Check for a deleted Mayastor Volume, @@ -197,11 +216,12 @@ func GetMsvState(uuid string) string { return fmt.Sprintf("%s", msv.State) } -// Retrieve the nexus node hosting the Mayastor Volume -func GetMsvNode(uuid string) string { +// Retrieve the nexus node hosting the Mayastor Volume, +// and the names of the replica nodes +func GetMsvNodes(uuid string) (string, []string) { msv := GetMSV(uuid) Expect(msv).ToNot(BeNil()) - return fmt.Sprintf("%s", msv.Node) + return msv.Node, msv.Replicas } // Create a PVC and verify that diff --git a/mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go b/mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go index c575fadde..e2cea7062 100644 --- a/mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go +++ b/mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go @@ -30,7 +30,7 @@ func TestNodeLoss(t *testing.T) { } var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible", func() { + It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { lossTest() }) }) diff --git a/mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go b/mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go index 5f9cf4412..07d728263 100644 --- a/mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go +++ b/mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go @@ -30,7 +30,7 @@ func TestNodeLoss(t *testing.T) { } var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible", func() { + It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { lossTest() }) }) diff --git a/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go b/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go index 90553ebe0..c80ace128 100644 --- a/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go +++ b/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go @@ -31,7 +31,7 @@ func TestNodeLoss(t *testing.T) { } var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible when no IO is received", func() { + It("should verify iscsi nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { lossTest() }) }) diff --git a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go index 0b5d89d07..60845096a 100644 --- a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -10,7 +10,8 @@ import ( ) var ( - defTimeoutSecs = "90s" + defTimeoutSecs = "90s" + disconnectionTimeoutSecs = "90s" ) // disconnect a node from the other nodes in the cluster @@ -41,20 +42,20 @@ func GetNodes(uuid string) (string, []string) { Expect(err).ToNot(HaveOccurred()) var nodeToIsolate = "" - nexusNode := common.GetMsvNode(uuid) - Expect(nexusNode != "") + nexusNode, replicas := common.GetMsvNodes(uuid) + Expect(nexusNode).NotTo(Equal("")) fmt.Printf("nexus node is \"%s\"\n", nexusNode) var otherAddresses []string - // find a node which is not the nexus - for _, node := range nodeList { - if node.NodeName != nexusNode && node.MayastorNode == true { - nodeToIsolate = node.NodeName + // find a node which is not the nexus and is a replica + for _, node := range replicas { + if node != nexusNode { + nodeToIsolate = node break } } - Expect(nodeToIsolate != "") + Expect(nodeToIsolate).NotTo(Equal("")) // get a list of the other ip addresses in the cluster for _, node := range nodeList { @@ -62,7 +63,7 @@ func GetNodes(uuid string) (string, []string) { otherAddresses = append(otherAddresses, node.IPAddress) } } - Expect(len(otherAddresses) != 0) + Expect(len(otherAddresses)).To(BeNumerically(">", 0)) fmt.Printf("node to isolate is \"%s\"\n", nodeToIsolate) return nodeToIsolate, otherAddresses @@ -104,14 +105,12 @@ func LossWhenIdleTest(nodeToIsolate string, otherNodes []string, disconnectionMe DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) - fmt.Printf("waiting up to 90s for disconnection to affect the nexus\n") - //time.Sleep(90 * time.Second) - + fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { return common.GetMsvState(uuid) }, - 90*time.Second, // timeout - "1s", // polling interval + disconnectionTimeoutSecs, // timeout + "1s", // polling interval ).Should(Equal("degraded")) fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) diff --git a/mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go b/mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go index c5c49bad5..006154316 100644 --- a/mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go +++ b/mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go @@ -30,7 +30,7 @@ func TestNodeLoss(t *testing.T) { } var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible", func() { + It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { lossTest() }) }) diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go b/mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go index ffa55f4ae..7da49c833 100644 --- a/mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go +++ b/mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go @@ -31,7 +31,7 @@ func TestNodeLoss(t *testing.T) { } var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible", func() { + It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { lossTest() }) }) diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go b/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go index 92a6c3122..ecae43cf4 100644 --- a/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go +++ b/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go @@ -31,7 +31,7 @@ func TestNodeLoss(t *testing.T) { } var _ = Describe("Mayastor node loss test", func() { - It("should verify behaviour when a node becomes inaccessible when no IO is received", func() { + It("should verify nvmf nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { lossTest() }) }) diff --git a/mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go b/mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go index 1632e2ace..ad3f16fcb 100644 --- a/mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go +++ b/mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go @@ -28,7 +28,7 @@ func disconnectSetupTest() { nodeList, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) - Expect(len(nodeList) >= 3) + Expect(len(nodeList)).To(BeNumerically(">=", 3)) // sort the nodes - that also means k8s-1 is the refuge on local clusters sort.Slice(nodeList, func(i, j int) bool { return nodeList[i].NodeName < nodeList[j].NodeName }) @@ -43,7 +43,7 @@ func disconnectSetupTest() { common.LabelNode(refugeNode, "openebs.io/podrefuge=true") } } - Expect(refugeNode != "") + Expect(refugeNode).NotTo(Equal("")) moacOnRefugeNode := common.PodPresentOnNode(moac_regexp, namespace, refugeNode) diff --git a/mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go b/mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go index da67c60b8..22ca26e1c 100644 --- a/mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go +++ b/mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go @@ -23,7 +23,7 @@ func disconnectTeardownTest() { nodeList, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) - Expect(len(nodeList) >= 3) + Expect(len(nodeList)).To(BeNumerically(">=", 3)) // apply/remove the labels whether present or not // An error will not occur if the label is already present/absent diff --git a/mayastor-test/e2e/node_disconnect/test.sh b/mayastor-test/e2e/node_disconnect/test.sh index 6b666bdce..9c3f18896 100755 --- a/mayastor-test/e2e/node_disconnect/test.sh +++ b/mayastor-test/e2e/node_disconnect/test.sh @@ -11,9 +11,9 @@ timeout=200 (cd nvmf_reject_idle && go test -timeout "${timeout}s") (cd iscsi_reject_idle && go test -timeout "${timeout}s") -# These tests currently fail -# (cd nvmf_drop && go test -timeout "${timeout}s") -# (cd iscsi_drop && go test -timeout "${timeout}s") +# These two tests currently fail so are run with -c (compile only) +(cd nvmf_drop && go test -c -timeout "${timeout}s") +(cd iscsi_drop && go test -c -timeout "${timeout}s") (cd teardown && go test -timeout "${timeout}s") From 5cd8418b9651ca163f40e888eb3b66bb1f1cb7ef Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Mon, 14 Dec 2020 17:01:36 +0000 Subject: [PATCH 32/85] mayastor: Build against SPDK 21.01-pre This includes spdk/spdk@6d9d3f87 to fix #555. Sync with upstream changes to nvme_bdev_opts (kato configuration), spdk_bdev_fn_table and iscsi_portal_grp_open. Remove a few members from TcpTransportOpts that are unused and should have been removed in the 20.10 rebase. --- mayastor/src/bdev/nexus/nexus_fn_table.rs | 1 + mayastor/src/subsys/config/opts.rs | 19 ++++++------------- mayastor/src/target/iscsi.rs | 2 +- nix/pkgs/libspdk/default.nix | 6 +++--- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_fn_table.rs b/mayastor/src/bdev/nexus/nexus_fn_table.rs index 1d11b1a05..b53de5014 100644 --- a/mayastor/src/bdev/nexus/nexus_fn_table.rs +++ b/mayastor/src/bdev/nexus/nexus_fn_table.rs @@ -43,6 +43,7 @@ impl NexusFnTable { dump_info_json: Some(Self::dump_info_json), write_config_json: None, get_spin_time: None, + get_module_ctx: None, }; NexusFnTable { diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index 54c3f353d..1990a12e5 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -140,17 +140,8 @@ pub struct TcpTransportOpts { num_shared_buf: u32, /// cache size buf_cache_size: u32, - /// RDMA only - max_srq_depth: u32, - /// RDMA only - no_srq: bool, - /// optimize success - ch2_success: bool, /// dif dif_insert_or_strip: bool, - /// The socket priority of the connection owned by this transport (TCP - /// only) - sock_priority: u32, /// abort execution timeout abort_timeout_sec: u32, } @@ -162,15 +153,11 @@ impl Default for TcpTransportOpts { in_capsule_data_size: 4096, max_io_size: 131_072, io_unit_size: 131_072, - ch2_success: true, max_qpairs_per_ctrl: 128, num_shared_buf: 2048, buf_cache_size: 64, dif_insert_or_strip: false, max_aq_depth: 128, - max_srq_depth: 0, // RDMA - no_srq: false, // RDMA - sock_priority: 0, abort_timeout_sec: 1, } } @@ -194,6 +181,7 @@ impl From for spdk_nvmf_transport_opts { abort_timeout_sec: o.abort_timeout_sec, association_timeout: 120000, transport_specific: std::ptr::null(), + opts_size: std::mem::size_of::() as u64, } } } @@ -206,6 +194,8 @@ pub struct NvmeBdevOpts { pub action_on_timeout: u32, /// timeout for each command pub timeout_us: u64, + /// keep-alive timeout + pub keep_alive_timeout_ms: u32, /// retry count pub retry_count: u32, /// TODO @@ -250,6 +240,7 @@ impl Default for NvmeBdevOpts { Self { action_on_timeout: SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, timeout_us: 30_000_000, + keep_alive_timeout_ms: 10_000, retry_count: 3, arbitration_burst: 0, low_priority_weight: 0, @@ -268,6 +259,7 @@ impl From for NvmeBdevOpts { Self { action_on_timeout: o.action_on_timeout, timeout_us: o.timeout_us, + keep_alive_timeout_ms: o.keep_alive_timeout_ms, retry_count: o.retry_count, arbitration_burst: o.arbitration_burst, low_priority_weight: o.low_priority_weight, @@ -286,6 +278,7 @@ impl From<&NvmeBdevOpts> for spdk_bdev_nvme_opts { Self { action_on_timeout: o.action_on_timeout, timeout_us: o.timeout_us, + keep_alive_timeout_ms: o.keep_alive_timeout_ms, retry_count: o.retry_count, arbitration_burst: o.arbitration_burst, low_priority_weight: o.low_priority_weight, diff --git a/mayastor/src/target/iscsi.rs b/mayastor/src/target/iscsi.rs index 9f1a703c6..176e617b3 100644 --- a/mayastor/src/target/iscsi.rs +++ b/mayastor/src/target/iscsi.rs @@ -358,7 +358,7 @@ fn create_portal_group( return Err(Error::CreatePortal {}); } iscsi_portal_grp_add_portal(pg, p); - if iscsi_portal_grp_open(pg) != 0 { + if iscsi_portal_grp_open(pg, false) != 0 { iscsi_portal_grp_release(pg); return Err(Error::AddPortal {}); } diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index fb29368e6..d7f4d18b4 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -19,13 +19,13 @@ let # Derivation attributes for production version of libspdk drvAttrs = rec { - version = "20.10"; + version = "21.01-pre"; src = fetchFromGitHub { owner = "openebs"; repo = "spdk"; - rev = "46b25360887c5d19433f575c7ad14259721abc6f"; - sha256 = "0cjnpkqx95cgrk9kbm4drrd5piimprz7wsbiahsllm1j2avdzsfs"; + rev = "285a96fb4bd5fb53876635ec86ebe55089b1ffde"; + sha256 = "0bn40y28iafma19q7fh15ga651d7bcpx85ih5lyi4azvb0l0zjqv"; #sha256 = stdenv.lib.fakeSha256; fetchSubmodules = true; }; From 5497d8dcbee2967e117ee95bc1bdfeee7df62699 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Tue, 8 Dec 2020 09:17:33 +0000 Subject: [PATCH 33/85] Dynamic Provisioning CSI E2E tests Dynamic provisioning tests ported from https://github.com/kubernetes-csi/csi-driver-nfs https://github.com/kubernetes-csi/csi-driver-smb Workaround MSV leakage till CAS-566 is fixed. --- mayastor-test/csi-e2e/README.md | 41 + .../csi-e2e/check_driver_pods_restart.sh | 30 + mayastor-test/csi-e2e/driver/driver.go | 66 ++ .../csi-e2e/driver/mayastor_driver.go | 61 ++ .../csi-e2e/dynamic_provisioning_test.go | 270 ++++++ mayastor-test/csi-e2e/e2e_suite_test.go | 103 +++ mayastor-test/csi-e2e/go.mod | 62 ++ mayastor-test/csi-e2e/go.sum | 861 ++++++++++++++++++ mayastor-test/csi-e2e/runtest.sh | 8 + ...namically_provisioned_cmd_volume_tester.go | 50 + ...cally_provisioned_collocated_pod_tester.go | 57 ++ ...namically_provisioned_delete_pod_tester.go | 70 ++ ...ically_provisioned_pod_with_multiple_pv.go | 50 + ...lly_provisioned_read_only_volume_tester.go | 59 ++ ...cally_provisioned_reclaim_policy_tester.go | 49 + mayastor-test/csi-e2e/testsuites/specs.go | 161 ++++ .../csi-e2e/testsuites/testsuites.go | 560 ++++++++++++ 17 files changed, 2558 insertions(+) create mode 100644 mayastor-test/csi-e2e/README.md create mode 100755 mayastor-test/csi-e2e/check_driver_pods_restart.sh create mode 100644 mayastor-test/csi-e2e/driver/driver.go create mode 100644 mayastor-test/csi-e2e/driver/mayastor_driver.go create mode 100644 mayastor-test/csi-e2e/dynamic_provisioning_test.go create mode 100644 mayastor-test/csi-e2e/e2e_suite_test.go create mode 100644 mayastor-test/csi-e2e/go.mod create mode 100644 mayastor-test/csi-e2e/go.sum create mode 100755 mayastor-test/csi-e2e/runtest.sh create mode 100644 mayastor-test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go create mode 100644 mayastor-test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go create mode 100644 mayastor-test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go create mode 100644 mayastor-test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go create mode 100644 mayastor-test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go create mode 100644 mayastor-test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go create mode 100644 mayastor-test/csi-e2e/testsuites/specs.go create mode 100644 mayastor-test/csi-e2e/testsuites/testsuites.go diff --git a/mayastor-test/csi-e2e/README.md b/mayastor-test/csi-e2e/README.md new file mode 100644 index 000000000..bbeb2304e --- /dev/null +++ b/mayastor-test/csi-e2e/README.md @@ -0,0 +1,41 @@ +# CSI E2E Tests for Mayastor +These tests have been ported from kubernetes CSI NFS driver at https://github.com/kubernetes-csi/csi-driver-nfs + +## Prerequisites +`golang` must be installed on the system running the tests + +## Environment variables +* `MAYASTOR_CSI_DRIVER` - Override for driver name, defaults to `io.openebs.csi-mayastor` +* `SMALL_CLAIM_SIZE` - Size of small PVCs created by the testsuite, defaults to `50Mi` +* `LARGE_CLAIM_SIZE` - Size of large PVCs created by the testsuite, defaults to `500Mi` + +## Changes for mayastor +* Location of the test directory within the repo is `mayastor-test/csi-e2e` +* Naming from `csi-nfs` to `csi-mayastor` +* Claim sizes have been downsized from + * `10Gi` to `50Mi` + * `100Gi` to `500Mi` +* Claim sizes have been made configurable through environment variables. + +## Running the testsuite +Kubernetes config from `$HOME/.kube/config` is used. + +To run the tests execute `runtests.sh` from this directory. + +## Storage requirements +`6 * LARGE_CLAIM_SIZE` + +## List of dynamic provisioning tests +* should create a volume on demand with mount options +* should create multiple PV objects, bind to PVCs and attach all to different pods on the same node +* should create a volume on demand and mount it as readOnly in a pod +* should create a deployment object, write to and read from it, delete the pod and write to and read from it again +* should delete PV with reclaimPolicy "Delete" +* should retain PV with reclaimPolicy "Retain" +* should create a pod with multiple volumes + +### TODO +Remove workaround for side effect of running this test, when CAS-566 is fixed. +In `mayastor-test/csi-e2e/runtest.sh` all Mayastor Volumes are deleted after +the test run. Until CAS-566 is fixed this is required as this will have an +impact on tests run subsequently in particular the uninstall test. diff --git a/mayastor-test/csi-e2e/check_driver_pods_restart.sh b/mayastor-test/csi-e2e/check_driver_pods_restart.sh new file mode 100755 index 000000000..9ace55bc3 --- /dev/null +++ b/mayastor-test/csi-e2e/check_driver_pods_restart.sh @@ -0,0 +1,30 @@ +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +set -e + +echo "check the driver pods if restarts ..." +restarts=$(kubectl get pods -n mayastor | grep -e mayastor -e moac | awk '{print $4}') +for num in $restarts +do + if [ "$num" -ne "0" ] + then + echo "there is a driver pod which has restarted" + exit 3 + fi +done +echo "no driver pods have restarted" +echo "======================================================================================" diff --git a/mayastor-test/csi-e2e/driver/driver.go b/mayastor-test/csi-e2e/driver/driver.go new file mode 100644 index 000000000..7aab4d2ca --- /dev/null +++ b/mayastor-test/csi-e2e/driver/driver.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PVTestDriver interface { + DynamicPVTestDriver + // VolumeSnapshotTestDriver +} + +// DynamicPVTestDriver represents an interface for a CSI driver that supports DynamicPV +type DynamicPVTestDriver interface { + // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume + GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass +} + +func getStorageClass( + generateName string, + provisioner string, + parameters map[string]string, + mountOptions []string, + reclaimPolicy *v1.PersistentVolumeReclaimPolicy, + bindingMode *storagev1.VolumeBindingMode, + allowedTopologies []v1.TopologySelectorTerm, +) *storagev1.StorageClass { + if reclaimPolicy == nil { + defaultReclaimPolicy := v1.PersistentVolumeReclaimDelete + reclaimPolicy = &defaultReclaimPolicy + } + if bindingMode == nil { + defaultBindingMode := storagev1.VolumeBindingImmediate + bindingMode = &defaultBindingMode + } + allowVolumeExpansion := false + return &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: generateName, + }, + Provisioner: provisioner, + Parameters: parameters, + MountOptions: mountOptions, + ReclaimPolicy: reclaimPolicy, + VolumeBindingMode: bindingMode, + AllowedTopologies: allowedTopologies, + AllowVolumeExpansion: &allowVolumeExpansion, + } +} diff --git a/mayastor-test/csi-e2e/driver/mayastor_driver.go b/mayastor-test/csi-e2e/driver/mayastor_driver.go new file mode 100644 index 000000000..0be68bfff --- /dev/null +++ b/mayastor-test/csi-e2e/driver/mayastor_driver.go @@ -0,0 +1,61 @@ +package driver + +import ( + "fmt" + "os" + "strings" + + // "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + // "k8s.io/apimachinery/pkg/api/resource" + // metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +// MayastorDriverNameVar is the environment variable use to switch the driver to be used. +const MayastorDriverNameVar = "MAYASTOR_CSI_DRIVER" + +// MayastorDriver implements DynamicPVTestDriver interface +type MayastorDriver struct { + driverName string +} + +// InitMayastorDriver returns MayastorDriver that implements DynamicPVTestDriver interface +func InitMayastorDriver() PVTestDriver { + driverName := os.Getenv(MayastorDriverNameVar) + if driverName == "" { + driverName = "io.openebs.csi-mayastor" + } + + klog.Infof("Using Mayastor driver: %s", driverName) + return &MayastorDriver{ + driverName: driverName, + } +} + +// normalizeProvisioner replaces any '/' character in the provisioner name to '-'. +// StorageClass name cannot contain '/' character. +func normalizeProvisioner(provisioner string) string { + return strings.ReplaceAll(provisioner, "/", "-") +} + +func (d *MayastorDriver) GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass { + provisioner := d.driverName + generateName := fmt.Sprintf("%s-%s-dynamic-sc-", namespace, normalizeProvisioner(provisioner)) + return getStorageClass(generateName, provisioner, parameters, mountOptions, reclaimPolicy, bindingMode, nil) +} + +/* +func (d *MayastorDriver) GetVolumeSnapshotClass(namespace string) *v1beta1.VolumeSnapshotClass { + provisioner := d.driverName + generateName := fmt.Sprintf("%s-%s-dynamic-sc-", namespace, normalizeProvisioner(provisioner)) + return getVolumeSnapshotClass(generateName, provisioner) +} +*/ + +func GetParameters() map[string]string { + return map[string]string{ + "skuName": "Standard_LRS", + } +} diff --git a/mayastor-test/csi-e2e/dynamic_provisioning_test.go b/mayastor-test/csi-e2e/dynamic_provisioning_test.go new file mode 100644 index 000000000..d071855a7 --- /dev/null +++ b/mayastor-test/csi-e2e/dynamic_provisioning_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + clientset "k8s.io/client-go/kubernetes" + restclientset "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + "mayastor-csi-e2e/driver" + "mayastor-csi-e2e/testsuites" +) + +// TODO: Make configurable +// was 10Gi +var smallClaimSize = "50Mi" + +// was 100Gi +var largeClaimSize = "500Mi" + +var _ = ginkgo.Describe("Dynamic Provisioning", func() { + f := framework.NewDefaultFramework("mayastor") + + tmp := os.Getenv("SMALL_CLAIM_SIZE") + if tmp != "" { + smallClaimSize = tmp + } + tmp = os.Getenv("LARGE_CLAIM_SIZE") + if tmp != "" { + largeClaimSize = tmp + } + + var ( + cs clientset.Interface + ns *v1.Namespace + testDriver driver.PVTestDriver + ) + + ginkgo.BeforeEach(func() { + checkPodsRestart := testCmd{ + command: "sh", + args: []string{"mayastor-test/csi-e2e/check_driver_pods_restart.sh"}, + startLog: "Check driver pods for restarts", + endLog: "Check successful", + } + execTestCmd([]testCmd{checkPodsRestart}) + + cs = f.ClientSet + ns = f.Namespace + + var err error + _, err = restClient(testsuites.SnapshotAPIGroup, testsuites.APIVersionv1beta1) + if err != nil { + ginkgo.Fail(fmt.Sprintf("could not get rest clientset: %v", err)) + } + }) + + testDriver = driver.InitMayastorDriver() + + ginkgo.It("should create a volume on demand with mount options [mayastor-csi.openebs.io]", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedCmdVolumeTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: defaultStorageClassParameters, + } + + test.Run(cs, ns) + }) + + ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [mayastor-csi.openebs.io]", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + { + Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedCollocatedPodTest{ + CSIDriver: testDriver, + Pods: pods, + ColocatePods: true, + StorageClassParameters: defaultStorageClassParameters, + } + test.Run(cs, ns) + }) + + // Track issue https://github.com/kubernetes/kubernetes/issues/70505 + ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [mayastor-csi.openebs.io]", func() { + pods := []testsuites.PodDetails{ + { + Cmd: "touch /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + ReadOnly: true, + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: defaultStorageClassParameters, + } + test.Run(cs, ns) + }) + + ginkgo.It("should create a deployment object, write to and read from it, delete the pod and write to and read from it again [mayastor-csi.openebs.io]", func() { + pod := testsuites.PodDetails{ + Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + } + + podCheckCmd := []string{"cat", "/mnt/test-1/data"} + expectedString := "hello world\n" + + test := testsuites.DynamicallyProvisionedDeletePodTest{ + CSIDriver: testDriver, + Pod: pod, + PodCheck: &testsuites.PodExecCheck{ + Cmd: podCheckCmd, + ExpectedString: expectedString, // pod will be restarted so expect to see 2 instances of string + }, + StorageClassParameters: defaultStorageClassParameters, + } + test.Run(cs, ns) + }) + + ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [mayastor-csi.openebs.io]", v1.PersistentVolumeReclaimDelete), func() { + reclaimPolicy := v1.PersistentVolumeReclaimDelete + volumes := []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + ReclaimPolicy: &reclaimPolicy, + }, + } + test := testsuites.DynamicallyProvisionedReclaimPolicyTest{ + CSIDriver: testDriver, + Volumes: volumes, + StorageClassParameters: defaultStorageClassParameters, + } + test.Run(cs, ns) + }) + + ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [mayastor-csi.openebs.io]", v1.PersistentVolumeReclaimRetain), func() { + reclaimPolicy := v1.PersistentVolumeReclaimRetain + volumes := []testsuites.VolumeDetails{ + { + ClaimSize: smallClaimSize, + ReclaimPolicy: &reclaimPolicy, + }, + } + test := testsuites.DynamicallyProvisionedReclaimPolicyTest{ + CSIDriver: testDriver, + Volumes: volumes, + StorageClassParameters: defaultStorageClassParameters, + } + test.Run(cs, ns) + }) + + ginkgo.It("should create a pod with multiple volumes [mayastor-csi.openebs.io]", func() { + var cmds []string + volumes := []testsuites.VolumeDetails{} + for i := 1; i <= 6; i++ { + volume := testsuites.VolumeDetails{ + ClaimSize: largeClaimSize, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + } + volumes = append(volumes, volume) + cmds = append(cmds, + fmt.Sprintf("echo 'helloWorld' > /mnt/test-%d/data && grep 'helloWorld' /mnt/test-%d/data", + i, i)) + } + + pods := []testsuites.PodDetails{ + { + Cmd: strings.Join(cmds, " && "), + Volumes: volumes, + }, + } + test := testsuites.DynamicallyProvisionedPodWithMultiplePVsTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: defaultStorageClassParameters, + } + test.Run(cs, ns) + }) +}) + +func restClient(group string, version string) (restclientset.Interface, error) { + config, err := framework.LoadConfig() + if err != nil { + ginkgo.Fail(fmt.Sprintf("could not load config: %v", err)) + } + gv := schema.GroupVersion{Group: group, Version: version} + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: serializer.NewCodecFactory(runtime.NewScheme())} + return restclientset.RESTClientFor(config) +} diff --git a/mayastor-test/csi-e2e/e2e_suite_test.go b/mayastor-test/csi-e2e/e2e_suite_test.go new file mode 100644 index 000000000..835ad4443 --- /dev/null +++ b/mayastor-test/csi-e2e/e2e_suite_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "flag" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +// "github.com/pborman/uuid" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" +) + +const ( + kubeconfigEnvVar = "KUBECONFIG" +) + +var ( + defaultStorageClassParameters = map[string]string{ + "repl": "1", + "protocol": "nvmf", + } +) + +type testCmd struct { + command string + args []string + startLog string + endLog string +} + +var _ = ginkgo.BeforeSuite(func() { + // k8s.io/kubernetes/test/e2e/framework requires env KUBECONFIG to be set + // it does not fall back to defaults + if os.Getenv(kubeconfigEnvVar) == "" { + kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config") + _ = os.Setenv(kubeconfigEnvVar, kubeconfig) + } + handleFlags() + framework.AfterReadingAllFlags(&framework.TestContext) +}) + +var _ = ginkgo.AfterSuite(func() { + +}) + +// handleFlags sets up all flags and parses the command line. +func handleFlags() { + config.CopyFlags(config.Flags, flag.CommandLine) + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + flag.Parse() +} + +func execTestCmd(cmds []testCmd) { + err := os.Chdir("../..") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := os.Chdir("mayastor-test/csi-e2e") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + projectRoot, err := os.Getwd() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(strings.HasSuffix(projectRoot, "Mayastor")).To(gomega.Equal(true)) + + for _, cmd := range cmds { + log.Println(cmd.startLog) + cmdSh := exec.Command(cmd.command, cmd.args...) + cmdSh.Dir = projectRoot + cmdSh.Stdout = os.Stdout + cmdSh.Stderr = os.Stderr + err = cmdSh.Run() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Println(cmd.endLog) + } +} + +func TestE2E(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "E2E Suite") +} diff --git a/mayastor-test/csi-e2e/go.mod b/mayastor-test/csi-e2e/go.mod new file mode 100644 index 000000000..86a46a848 --- /dev/null +++ b/mayastor-test/csi-e2e/go.mod @@ -0,0 +1,62 @@ +module mayastor-csi-e2e + +go 1.15 + +require ( + github.com/container-storage-interface/spec v1.2.0 + github.com/onsi/ginkgo v1.12.1 + github.com/onsi/gomega v1.10.1 + github.com/stretchr/testify v1.5.1 // indirect + google.golang.org/protobuf v1.25.0 // indirect + k8s.io/api v0.19.0 + k8s.io/apimachinery v0.19.0 + k8s.io/client-go v0.19.0 + k8s.io/klog/v2 v2.4.0 + k8s.io/kubernetes v1.19.0 +) + +replace k8s.io/api => k8s.io/api v0.19.0 + +replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.0 + +replace k8s.io/apimachinery => k8s.io/apimachinery v0.19.0 + +replace k8s.io/apiserver => k8s.io/apiserver v0.19.0 + +replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.0 + +replace k8s.io/client-go => k8s.io/client-go v0.19.0 + +replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.0 + +replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.0 + +replace k8s.io/code-generator => k8s.io/code-generator v0.19.0 + +replace k8s.io/component-base => k8s.io/component-base v0.19.0 + +replace k8s.io/cri-api => k8s.io/cri-api v0.19.0 + +replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.0 + +replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.0 + +replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.0 + +replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.0 + +replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.0 + +replace k8s.io/kubectl => k8s.io/kubectl v0.19.0 + +replace k8s.io/kubelet => k8s.io/kubelet v0.19.0 + +replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.0 + +replace k8s.io/metrics => k8s.io/metrics v0.19.0 + +replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.0 + +replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.19.0 + +replace k8s.io/sample-controller => k8s.io/sample-controller v0.19.0 diff --git a/mayastor-test/csi-e2e/go.sum b/mayastor-test/csi-e2e/go.sum new file mode 100644 index 000000000..70b8ff4c2 --- /dev/null +++ b/mayastor-test/csi-e2e/go.sum @@ -0,0 +1,861 @@ +bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= +github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= +github.com/cilium/ebpf v0.0.0-20200601085316-9f1617e5c574/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= +github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cadvisor v0.37.0/go.mod h1:OhDE+goNVel0eGY8mR7Ifq1QUI1in5vJBIgIpcajK/I= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= +github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= +github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= +github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= +github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= +github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= +github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc= +k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= +k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= +k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= +k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apiserver v0.19.0 h1:jLhrL06wGAADbLUUQm8glSLnAGP6c7y5R3p19grkBoY= +k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= +k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= +k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= +k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= +k8s.io/cloud-provider v0.19.0 h1:Ae09nHr6BVPEzmAWbZedYC0gjsIPbt7YsIY0V/NHGr0= +k8s.io/cloud-provider v0.19.0/go.mod h1:TYh7b7kQ6wiqF7Ftb+u3lN4IwvgOPbBrcvC3TDAW4cw= +k8s.io/cluster-bootstrap v0.19.0/go.mod h1:kBn1DKyqoM245wzz+AAnGkuysJ+9GqVbPYveTo4KiaA= +k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.0 h1:OueXf1q3RW7NlLlUCj2Dimwt7E1ys6ZqRnq53l2YuoE= +k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= +k8s.io/cri-api v0.19.0/go.mod h1:UN/iU9Ua0iYdDREBXNE9vqCJ7MIh/FW3VIL0d8pw7Fw= +k8s.io/csi-translation-lib v0.19.0/go.mod h1:zGS1YqV8U2So/t4Hz8SoRXMx5y5/KSKnA6BXXxGuo4A= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= +k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= +k8s.io/kube-controller-manager v0.19.0/go.mod h1:uGZyiHK73NxNEN5EZv/Esm3fbCOzeq4ndttMexVZ1L0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-proxy v0.19.0/go.mod h1:7NoJCFgsWb7iiMB1F6bW1St5rEXC+ir2aWiJehASmTU= +k8s.io/kube-scheduler v0.19.0/go.mod h1:1XGjJUgstM0/0x8to+bSGSyCs3Dp3dbCEr3Io/mvd4s= +k8s.io/kubectl v0.19.0 h1:t9uxaZzGvqc2jY96mjnPSjFHtaKOxoUegeGZdaGT6aw= +k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= +k8s.io/kubelet v0.19.0/go.mod h1:cGds22piF/LnFzfAaIT+efvOYBHVYdunqka6NVuNw9g= +k8s.io/kubernetes v1.19.0 h1:ir53YuXsfsuVABmtYHCTUa3xjD41Htxv3o+xoQjJdUo= +k8s.io/kubernetes v1.19.0/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A= +k8s.io/legacy-cloud-providers v0.19.0/go.mod h1:Q5czDCPnStdpFohMpcbnqL+MLR75kUhIDIsnmwEm0/o= +k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= +k8s.io/sample-apiserver v0.19.0/go.mod h1:Bq9UulNoKnT72JqlkWF2JS14cXxJqcmvLtb5+EcwiNA= +k8s.io/system-validators v1.1.2/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= +k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/mayastor-test/csi-e2e/runtest.sh b/mayastor-test/csi-e2e/runtest.sh new file mode 100755 index 000000000..379fcf005 --- /dev/null +++ b/mayastor-test/csi-e2e/runtest.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +GINKGO_FLAGS="-ginkgo.v -ginkgo.progress" +go test -v -timeout=0 . ${GINKGO_FLAGS} + +# Required until CAS-566 +# "Mayastor volumes not destroyed when PV is destroyed if storage class reclaim policy is Retain" +# is fixed. +kubectl -n mayastor delete msv --all diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go new file mode 100644 index 000000000..ca9ae8c58 --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go @@ -0,0 +1,50 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "mayastor-csi-e2e/driver" + + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// DynamicallyProvisionedCmdVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing if the Pod(s) Cmd is run with a 0 exit code +type DynamicallyProvisionedCmdVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails + StorageClassParameters map[string]string +} + +func (t *DynamicallyProvisionedCmdVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + ginkgo.By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + ginkgo.By("checking that the pods command exits with no error") + tpod.WaitForSuccess() + } +} diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go new file mode 100644 index 000000000..dd9415b4f --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go @@ -0,0 +1,57 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "mayastor-csi-e2e/driver" +) + +// DynamicallyProvisionedCollocatedPodTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing if multiple Pod(s) can write simultaneously +type DynamicallyProvisionedCollocatedPodTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails + ColocatePods bool + StorageClassParameters map[string]string +} + +func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface, namespace *v1.Namespace) { + nodeName := "" + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + if t.ColocatePods && nodeName != "" { + tpod.SetNodeSelector(map[string]string{"name": nodeName}) + } + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + ginkgo.By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + + ginkgo.By("checking that the pod is running") + tpod.WaitForRunning() + nodeName = tpod.pod.Spec.NodeName + } + +} diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go new file mode 100644 index 000000000..f9c1dcb20 --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go @@ -0,0 +1,70 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "mayastor-csi-e2e/driver" +) + +// DynamicallyProvisionedDeletePodTest will provision required StorageClass and Deployment +// Testing if the Pod can write and read to mounted volumes +// Deleting a pod, and again testing if the Pod can write and read to mounted volumes +type DynamicallyProvisionedDeletePodTest struct { + CSIDriver driver.DynamicPVTestDriver + Pod PodDetails + PodCheck *PodExecCheck + StorageClassParameters map[string]string +} + +type PodExecCheck struct { + Cmd []string + ExpectedString string +} + +func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, namespace *v1.Namespace) { + tDeployment, cleanup := t.Pod.SetupDeployment(client, namespace, t.CSIDriver, t.StorageClassParameters) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + ginkgo.By("deploying the deployment") + tDeployment.Create() + + ginkgo.By("checking that the pod is running") + tDeployment.WaitForPodReady() + + if t.PodCheck != nil { + ginkgo.By("checking pod exec") + tDeployment.Exec(t.PodCheck.Cmd, t.PodCheck.ExpectedString) + } + + ginkgo.By("deleting the pod for deployment") + tDeployment.DeletePodAndWait() + + ginkgo.By("checking again that the pod is running") + tDeployment.WaitForPodReady() + + if t.PodCheck != nil { + ginkgo.By("checking pod exec") + // pod will be restarted so expect to see 2 instances of string + tDeployment.Exec(t.PodCheck.Cmd, t.PodCheck.ExpectedString+t.PodCheck.ExpectedString) + } +} diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go new file mode 100644 index 000000000..6129d0664 --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go @@ -0,0 +1,50 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "mayastor-csi-e2e/driver" +) + +// DynamicallyProvisionedPodWithMultiplePVsTest will provision +// one pod with multiple PVs +// Waiting for the PV provisioner to create a new PV +// Testing if the Pod(s) Cmd is run with a 0 exit code +type DynamicallyProvisionedPodWithMultiplePVsTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails + StorageClassParameters map[string]string +} + +func (t *DynamicallyProvisionedPodWithMultiplePVsTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicMultipleVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + ginkgo.By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + ginkgo.By("checking that the pods command exits with no error") + tpod.WaitForSuccess() + } +} diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go new file mode 100644 index 000000000..a40c74666 --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go @@ -0,0 +1,59 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "mayastor-csi-e2e/driver" +) + +// DynamicallyProvisionedReadOnlyVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Testing that the Pod(s) cannot write to the volume when mounted +type DynamicallyProvisionedReadOnlyVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails + StorageClassParameters map[string]string +} + +func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + expectedReadOnlyLog := "Read-only file system" + + tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i]() + } + + ginkgo.By("deploying the pod") + tpod.Create() + defer tpod.Cleanup() + ginkgo.By("checking that the pods command exits with an error") + tpod.WaitForFailure() + ginkgo.By("checking that pod logs contain expected message") + body, err := tpod.Logs() + framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err)) + gomega.Expect(string(body)).To(gomega.ContainSubstring(expectedReadOnlyLog)) + } +} diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go new file mode 100644 index 000000000..7e8d1074a --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go @@ -0,0 +1,49 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "mayastor-csi-e2e/driver" + + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// DynamicallyProvisionedReclaimPolicyTest will provision required PV(s) and PVC(s) +// Testing the correct behavior for different reclaimPolicies +type DynamicallyProvisionedReclaimPolicyTest struct { + CSIDriver driver.DynamicPVTestDriver + Volumes []VolumeDetails + StorageClassParameters map[string]string +} + +func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) { + for _, volume := range t.Volumes { + tPvc, cleanup := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver, t.StorageClassParameters) + for i := range cleanup { + defer cleanup[i]() + } + // will delete the PVC + // will also wait for PV to be deleted when reclaimPolicy=Delete + tPvc.Cleanup() + // first check PV stills exists, then manually delete it + if tPvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain { + tPvc.WaitForPersistentVolumePhase(v1.VolumeReleased) + tPvc.DeleteBoundPersistentVolume() + } + } +} diff --git a/mayastor-test/csi-e2e/testsuites/specs.go b/mayastor-test/csi-e2e/testsuites/specs.go new file mode 100644 index 000000000..c546fd19a --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/specs.go @@ -0,0 +1,161 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + clientset "k8s.io/client-go/kubernetes" + "mayastor-csi-e2e/driver" +) + +const ( + FileSystem VolumeMode = iota + Block +) + +const ( + VolumeSnapshotKind = "VolumeSnapshot" + VolumePVCKind = "PersistentVolumeClaim" + APIVersionv1beta1 = "v1beta1" + SnapshotAPIVersion = "snapshot.storage.k8s.io/" + APIVersionv1beta1 +) + +var ( + SnapshotAPIGroup = "snapshot.storage.k8s.io" +) + +type PodDetails struct { + Cmd string + Volumes []VolumeDetails +} + +type VolumeMode int + +type VolumeMountDetails struct { + NameGenerate string + MountPathGenerate string + ReadOnly bool +} + +type VolumeDeviceDetails struct { + NameGenerate string + DevicePath string +} + +type DataSource struct { + Name string +} + +type VolumeDetails struct { + VolumeType string + FSType string + Encrypted bool + MountOptions []string + ClaimSize string + ReclaimPolicy *v1.PersistentVolumeReclaimPolicy + VolumeBindingMode *storagev1.VolumeBindingMode + AllowedTopologyValues []string + VolumeMode VolumeMode + VolumeMount VolumeMountDetails + VolumeDevice VolumeDeviceDetails + // Optional, used with pre-provisioned volumes + VolumeID string + // Optional, used with PVCs created from snapshots + DataSource *DataSource + ShareName string + NodeStageSecretRef string +} + +func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPersistentVolumeClaim, []func()) { + cleanupFuncs := make([]func(), 0) + ginkgo.By("setting up the StorageClass") + storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) + tsc := NewTestStorageClass(client, namespace, storageClass) + createdStorageClass := tsc.Create() + cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) + ginkgo.By("setting up the PVC and PV") + var tpvc *TestPersistentVolumeClaim + if volume.DataSource != nil { + dataSource := &v1.TypedLocalObjectReference{ + Name: volume.DataSource.Name, + } + tpvc = NewTestPersistentVolumeClaimWithDataSource(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass, dataSource) + } else { + tpvc = NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) + } + tpvc.Create() + cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) + // PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer + if volume.VolumeBindingMode == nil || *volume.VolumeBindingMode == storagev1.VolumeBindingImmediate { + tpvc.WaitForBound() + tpvc.ValidateProvisionedPersistentVolume() + } + + return tpvc, cleanupFuncs +} + +func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { + tpod := NewTestPod(client, namespace, pod.Cmd) + cleanupFuncs := make([]func(), 0) + for n, v := range pod.Volumes { + tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) + cleanupFuncs = append(cleanupFuncs, funcs...) + tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly) + } + return tpod, cleanupFuncs +} + +func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func()) { + cleanupFuncs := make([]func(), 0) + volume := pod.Volumes[0] + ginkgo.By("setting up the StorageClass") + storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) + tsc := NewTestStorageClass(client, namespace, storageClass) + createdStorageClass := tsc.Create() + cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) + ginkgo.By("setting up the PVC") + tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) + tpvc.Create() + tpvc.WaitForBound() + tpvc.ValidateProvisionedPersistentVolume() + cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) + ginkgo.By("setting up the Deployment") + tDeployment := NewTestDeployment(client, namespace, pod.Cmd, tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", volume.VolumeMount.NameGenerate, 1), fmt.Sprintf("%s%d", volume.VolumeMount.MountPathGenerate, 1), volume.VolumeMount.ReadOnly) + + cleanupFuncs = append(cleanupFuncs, tDeployment.Cleanup) + return tDeployment, cleanupFuncs +} + +// SetupWithDynamicMultipleVolumes each pod will be mounted with multiple volumes +func (pod *PodDetails) SetupWithDynamicMultipleVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { + tpod := NewTestPod(client, namespace, pod.Cmd) + cleanupFuncs := make([]func(), 0) + for n, v := range pod.Volumes { + tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) + cleanupFuncs = append(cleanupFuncs, funcs...) + if v.VolumeMode == Block { + tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath) + } else { + tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly) + } + } + return tpod, cleanupFuncs +} diff --git a/mayastor-test/csi-e2e/testsuites/testsuites.go b/mayastor-test/csi-e2e/testsuites/testsuites.go new file mode 100644 index 000000000..f12e0ce1b --- /dev/null +++ b/mayastor-test/csi-e2e/testsuites/testsuites.go @@ -0,0 +1,560 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/deployment" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +const ( + execTimeout = 10 * time.Second + // Some pods can take much longer to get ready due to volume attach/detach latency. + slowPodStartTimeout = 15 * time.Minute + // Description that will printed during tests + failedConditionDescription = "Error status code" +) + +type TestStorageClass struct { + client clientset.Interface + storageClass *storagev1.StorageClass + namespace *v1.Namespace +} + +// Ideally this would be in "k8s.io/kubernetes/test/e2e/framework" +// Similar to framework.WaitForPodSuccessInNamespaceSlow +var podFailedCondition = func(pod *v1.Pod) (bool, error) { + switch pod.Status.Phase { + case v1.PodFailed: + ginkgo.By("Saw pod failure") + return true, nil + case v1.PodSucceeded: + return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message) + default: + return false, nil + } +} + +type TestPersistentVolumeClaim struct { + client clientset.Interface + claimSize string + volumeMode v1.PersistentVolumeMode + storageClass *storagev1.StorageClass + namespace *v1.Namespace + persistentVolume *v1.PersistentVolume + persistentVolumeClaim *v1.PersistentVolumeClaim + requestedPersistentVolumeClaim *v1.PersistentVolumeClaim + dataSource *v1.TypedLocalObjectReference +} + +type TestPod struct { + client clientset.Interface + pod *v1.Pod + namespace *v1.Namespace +} + +func (t *TestStorageClass) Cleanup() { + e2elog.Logf("deleting StorageClass %s", t.storageClass.Name) + err := t.client.StorageV1().StorageClasses().Delete(context.TODO(), t.storageClass.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err) +} + +func (t *TestStorageClass) Create() storagev1.StorageClass { + var err error + + ginkgo.By("creating a StorageClass " + t.storageClass.Name) + t.storageClass, err = t.client.StorageV1().StorageClasses().Create(context.TODO(), t.storageClass, metav1.CreateOptions{}) + framework.ExpectNoError(err) + return *t.storageClass +} + +func NewTestStorageClass(c clientset.Interface, ns *v1.Namespace, sc *storagev1.StorageClass) *TestStorageClass { + return &TestStorageClass{ + client: c, + storageClass: sc, + namespace: ns, + } +} + +func NewTestPersistentVolumeClaimWithDataSource(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass, dataSource *v1.TypedLocalObjectReference) *TestPersistentVolumeClaim { + mode := v1.PersistentVolumeFilesystem + if volumeMode == Block { + mode = v1.PersistentVolumeBlock + } + return &TestPersistentVolumeClaim{ + client: c, + claimSize: claimSize, + volumeMode: mode, + namespace: ns, + storageClass: sc, + dataSource: dataSource, + } +} + +func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim { + mode := v1.PersistentVolumeFilesystem + if volumeMode == Block { + mode = v1.PersistentVolumeBlock + } + return &TestPersistentVolumeClaim{ + client: c, + claimSize: claimSize, + volumeMode: mode, + namespace: ns, + storageClass: sc, + } +} + +func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.PersistentVolumeMode, dataSource *v1.TypedLocalObjectReference) *v1.PersistentVolumeClaim { + return &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-", + Namespace: namespace, + }, + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize), + }, + }, + VolumeMode: &volumeMode, + DataSource: dataSource, + }, + } +} + +func (t *TestPersistentVolumeClaim) Create() { + var err error + + ginkgo.By("creating a PVC") + storageClassName := "" + if t.storageClass != nil { + storageClassName = t.storageClass.Name + } + t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource) + t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(context.TODO(), t.requestedPersistentVolumeClaim, metav1.CreateOptions{}) + framework.ExpectNoError(err) +} + +// removeFinalizers is a workaround to solve the problem that PV is stuck at terminating after PVC is deleted. +// Related issue: https://github.com/kubernetes/kubernetes/issues/69697 +func (t *TestPersistentVolumeClaim) removeFinalizers() { + pv, err := t.client.CoreV1().PersistentVolumes().Get(context.TODO(), t.persistentVolume.Name, metav1.GetOptions{}) + // Because the pv might be deleted successfully, if so, ignore the error. + if err != nil && strings.Contains(err.Error(), "not found") { + return + } + framework.ExpectNoError(err) + + pvClone := pv.DeepCopy() + + oldData, err := json.Marshal(pvClone) + framework.ExpectNoError(err) + + pvClone.ObjectMeta.Finalizers = nil + + newData, err := json.Marshal(pvClone) + framework.ExpectNoError(err) + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvClone) + framework.ExpectNoError(err) + + _, err = t.client.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + // Because the pv might be deleted successfully before patched, if so, ignore the error. + if err != nil && strings.Contains(err.Error(), "not found") { + return + } + framework.ExpectNoError(err) +} + +func (t *TestPersistentVolumeClaim) Cleanup() { + e2elog.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name) + err := e2epv.DeletePersistentVolumeClaim(t.client, t.persistentVolumeClaim.Name, t.namespace.Name) + framework.ExpectNoError(err) + // Wait for the PV to get deleted if reclaim policy is Delete. (If it's + // Retain, there's no use waiting because the PV won't be auto-deleted and + // it's expected for the caller to do it.) Technically, the first few delete + // attempts may fail, as the volume is still attached to a node because + // kubelet is slowly cleaning up the previous pod, however it should succeed + // in a couple of minutes. + if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { + if t.persistentVolume.Spec.CSI != nil { + // only workaround in CSI driver tests + t.removeFinalizers() + } + ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) + err := e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + framework.ExpectNoError(err) + } + // Wait for the PVC to be deleted + err = waitForPersistentVolumeClaimDeleted(t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute) + framework.ExpectNoError(err) +} + +// waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. +func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { + framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { + _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) + return nil + } + framework.Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err) + } + } + return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout) +} + +func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim { + var err error + + ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound)) + err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) + framework.ExpectNoError(err) + + ginkgo.By("checking the PVC") + // Get new copy of the claim + t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(context.TODO(), t.persistentVolumeClaim.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + return *t.persistentVolumeClaim +} + +func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { + var err error + + // Get the bound PersistentVolume + ginkgo.By("validating provisioned PV") + t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(context.TODO(), t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + + // Check sizes + expectedCapacity := t.requestedPersistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + claimCapacity := t.persistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + gomega.Expect(claimCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "claimCapacity is not equal to requestedCapacity") + + pvCapacity := t.persistentVolume.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] + gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to requestedCapacity") + + // Check PV properties + ginkgo.By("checking the PV") + expectedAccessModes := t.requestedPersistentVolumeClaim.Spec.AccessModes + gomega.Expect(t.persistentVolume.Spec.AccessModes).To(gomega.Equal(expectedAccessModes)) + gomega.Expect(t.persistentVolume.Spec.ClaimRef.Name).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Name)) + gomega.Expect(t.persistentVolume.Spec.ClaimRef.Namespace).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Namespace)) + // If storageClass is nil, PV was pre-provisioned with these values already set + if t.storageClass != nil { + gomega.Expect(t.persistentVolume.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*t.storageClass.ReclaimPolicy)) + gomega.Expect(t.persistentVolume.Spec.MountOptions).To(gomega.Equal(t.storageClass.MountOptions)) + if *t.storageClass.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values). + To(gomega.HaveLen(1)) + } + if len(t.storageClass.AllowedTopologies) > 0 { + gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key). + To(gomega.Equal(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Key)) + for _, v := range t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values { + gomega.Expect(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(gomega.ContainElement(v)) + } + + } + } +} + +func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) { + t.pod.Spec.NodeSelector = nodeSelector +} + +func (t *TestPod) WaitForFailure() { + err := e2epod.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition) + framework.ExpectNoError(err) +} + +func NewTestPod(c clientset.Interface, ns *v1.Namespace, command string) *TestPod { + testPod := &TestPod{ + client: c, + namespace: ns, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "mayastor-volume-tester-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "volume-tester", + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{"/bin/sh"}, + Args: []string{"-c", command}, + VolumeMounts: make([]v1.VolumeMount, 0), + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: make([]v1.Volume, 0), + }, + }, + } + + return testPod +} + +func (t *TestPod) SetupRawBlockVolume(pvc *v1.PersistentVolumeClaim, name, devicePath string) { + volumeDevice := v1.VolumeDevice{ + Name: name, + DevicePath: devicePath, + } + t.pod.Spec.Containers[0].VolumeDevices = append(t.pod.Spec.Containers[0].VolumeDevices, volumeDevice) + + volume := v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + } + t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume) +} + +func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath string, readOnly bool) { + volumeMount := v1.VolumeMount{ + Name: name, + MountPath: mountPath, + ReadOnly: readOnly, + } + t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount) + + volume := v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + } + t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume) +} + +func (t *TestPod) Logs() ([]byte, error) { + return podLogs(t.client, t.pod.Name, t.namespace.Name) +} + +func cleanupPodOrFail(client clientset.Interface, name, namespace string) { + e2elog.Logf("deleting Pod %q/%q", namespace, name) + body, err := podLogs(client, name, namespace) + if err != nil { + e2elog.Logf("Error getting logs for pod %s: %v", name, err) + } else { + e2elog.Logf("Pod %s has the following logs: %s", name, body) + } + e2epod.DeletePodOrFail(client, namespace, name) +} + +func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) { + return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() +} + +func (t *TestPod) Create() { + var err error + + t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(context.TODO(), t.pod, metav1.CreateOptions{}) + framework.ExpectNoError(err) +} + +func (t *TestPod) WaitForSuccess() { + err := e2epod.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name) + framework.ExpectNoError(err) +} + +func (t *TestPod) WaitForRunning() { + err := e2epod.WaitForPodRunningInNamespace(t.client, t.pod) + framework.ExpectNoError(err) +} + +func (t *TestPod) Cleanup() { + cleanupPodOrFail(t.client, t.pod.Name, t.namespace.Name) +} + +type TestDeployment struct { + client clientset.Interface + deployment *apps.Deployment + namespace *v1.Namespace + podName string +} + +func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string, pvc *v1.PersistentVolumeClaim, volumeName, mountPath string, readOnly bool) *TestDeployment { + generateName := "mayastor-volume-tester-" + selectorValue := fmt.Sprintf("%s%d", generateName, rand.Int()) + replicas := int32(1) + testDeployment := &TestDeployment{ + client: c, + namespace: ns, + deployment: &apps.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: generateName, + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": selectorValue}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": selectorValue}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "volume-tester", + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{"/bin/sh"}, + Args: []string{"-c", command}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: mountPath, + ReadOnly: readOnly, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyAlways, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + + return testDeployment +} + +func (t *TestDeployment) Create() { + var err error + t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(context.TODO(), t.deployment, metav1.CreateOptions{}) + framework.ExpectNoError(err) + err = deployment.WaitForDeploymentComplete(t.client, t.deployment) + framework.ExpectNoError(err) + pods, err := deployment.GetPodsForDeployment(t.client, t.deployment) + framework.ExpectNoError(err) + // always get first pod as there should only be one + t.podName = pods.Items[0].Name +} + +func (t *TestDeployment) WaitForPodReady() { + pods, err := deployment.GetPodsForDeployment(t.client, t.deployment) + framework.ExpectNoError(err) + // always get first pod as there should only be one + pod := pods.Items[0] + t.podName = pod.Name + err = e2epod.WaitForPodRunningInNamespace(t.client, &pod) + framework.ExpectNoError(err) +} + +func (t *TestDeployment) Exec(command []string, expectedString string) { + _, err := framework.LookForStringInPodExec(t.namespace.Name, t.podName, command, expectedString, execTimeout) + framework.ExpectNoError(err) +} + +func (t *TestDeployment) DeletePodAndWait() { + e2elog.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name) + err := t.client.CoreV1().Pods(t.namespace.Name).Delete(context.TODO(), t.podName, metav1.DeleteOptions{}) + if err != nil { + if !apierrs.IsNotFound(err) { + framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err)) + } + return + } + e2elog.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name) + err = e2epod.WaitForPodNoLongerRunningInNamespace(t.client, t.podName, t.namespace.Name) + if err != nil { + if !apierrs.IsNotFound(err) { + framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %v", t.podName, err)) + } + } +} + +func (t *TestDeployment) Cleanup() { + e2elog.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name) + body, err := t.Logs() + if err != nil { + e2elog.Logf("Error getting logs for pod %s: %v", t.podName, err) + } else { + e2elog.Logf("Pod %s has the following logs: %s", t.podName, body) + } + err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(context.TODO(), t.deployment.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err) +} + +func (t *TestDeployment) Logs() ([]byte, error) { + return podLogs(t.client, t.podName, t.namespace.Name) +} + +func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy { + return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy +} + +func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) { + err := e2epv.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + framework.ExpectNoError(err) +} + +func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() { + ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name)) + err := e2epv.DeletePersistentVolume(t.client, t.persistentVolume.Name) + framework.ExpectNoError(err) + ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) + err = e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) + framework.ExpectNoError(err) +} + From 80fc7839eac51c378e125a63cea89bbb7af465e2 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Fri, 11 Dec 2020 14:29:07 +0000 Subject: [PATCH 34/85] Add replica re-assignment test. This test causes a replica node to be faulted by IO packet rejection, and then waits for the control plane to provision a new replica. The test requires 4 mayastor nodes. --- mayastor-test/e2e/node_disconnect/README.md | 3 +- .../lib/node_disconnect_lib.go | 52 +++++++++++++++++- .../nvmf_reject_reassign_test.go | 55 +++++++++++++++++++ mayastor-test/e2e/node_disconnect/test.sh | 4 +- 4 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 mayastor-test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go diff --git a/mayastor-test/e2e/node_disconnect/README.md b/mayastor-test/e2e/node_disconnect/README.md index a235bf702..ae3a0ce39 100644 --- a/mayastor-test/e2e/node_disconnect/README.md +++ b/mayastor-test/e2e/node_disconnect/README.md @@ -2,8 +2,9 @@ The tests in this folder are not currently deployable by the CI system as the test assumes a vagrant installation -## Pre-requisites for this test +## Pre-requisites for these tests * A Kubernetes cluster with at least 3 nodes, with mayastor installed. +* The re-assignment test requires at least 4 nodes * The cluster is deployed using vagrant and KUBESPRAY_REPO is correctly defined in ./lib/io_connect_node.sh diff --git a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go index 60845096a..b36c6c07d 100644 --- a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -12,6 +12,7 @@ import ( var ( defTimeoutSecs = "90s" disconnectionTimeoutSecs = "90s" + repairTimeoutSecs = "90s" ) // disconnect a node from the other nodes in the cluster @@ -79,12 +80,12 @@ func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod str DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) - fmt.Printf("waiting up to 90s for disconnection to affect the nexus\n") + fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { return common.GetMsvState(uuid) }, - 90*time.Second, // timeout - "1s", // polling interval + disconnectionTimeoutSecs, // timeout + "1s", // polling interval ).Should(Equal("degraded")) fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) @@ -125,6 +126,51 @@ func LossWhenIdleTest(nodeToIsolate string, otherNodes []string, disconnectionMe common.RunFio("fio", 20) } +// Run fio against the cluster while a replica node is being removed, +// wait for the volume to become degraded, then wait for it to be repaired. +// Run fio against repaired volume, and again after node is reconnected. +func ReplicaReassignTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { + // This test needs at least 4 nodes, a refuge node, a mayastor node to isolate, and 2 other mayastor nodes + Expect(len(otherNodes)).To(BeNumerically(">=", 3)) + + fmt.Printf("running spawned fio\n") + go common.RunFio("fio", 20) + + time.Sleep(5 * time.Second) + fmt.Printf("disconnecting \"%s\"\n", nodeToIsolate) + + DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) + + fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) + Eventually(func() string { + return common.GetMsvState(uuid) + }, + disconnectionTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal("degraded")) + + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + + fmt.Printf("waiting up to %s for the volume to be repaired\n", repairTimeoutSecs) + Eventually(func() string { + return common.GetMsvState(uuid) + }, + repairTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal("healthy")) + + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + + fmt.Printf("running fio while node is disconnected\n") + common.RunFio("fio", 20) + + fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) + ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) + + fmt.Printf("running fio when node is reconnected\n") + common.RunFio("fio", 20) +} + // Common steps required when setting up the test func Setup(pvc_name string, storage_class_name string) string { uuid := common.MkPVC(fmt.Sprintf(pvc_name), storage_class_name) diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go b/mayastor-test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go new file mode 100644 index 000000000..2a7c76037 --- /dev/null +++ b/mayastor-test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go @@ -0,0 +1,55 @@ +package node_disconnect_nvmf_reassign_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + g_nodeToIsolate = "" + g_otherNodes []string + g_uuid = "" + g_disconnectMethod = "REJECT" +) + +func lossTest() { + g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) + disconnect_lib.ReplicaReassignTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) +} + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Loss NVMF assign new replica") +} + +var _ = Describe("Mayastor node loss test", func() { + It("should verify nvmf nexus repair of volume when a node becomes inaccessible", func() { + lossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) + disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") + common.TeardownTestEnv() +}) diff --git a/mayastor-test/e2e/node_disconnect/test.sh b/mayastor-test/e2e/node_disconnect/test.sh index 9c3f18896..c50bd0da7 100755 --- a/mayastor-test/e2e/node_disconnect/test.sh +++ b/mayastor-test/e2e/node_disconnect/test.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -e -timeout=200 +timeout=500 (cd setup && go test -timeout "${timeout}s") @@ -15,5 +15,7 @@ timeout=200 (cd nvmf_drop && go test -c -timeout "${timeout}s") (cd iscsi_drop && go test -c -timeout "${timeout}s") +(cd nvmf_reject_reassign && go test -timeout "${timeout}s") + (cd teardown && go test -timeout "${timeout}s") From c3381b48ded7646b088541ea43369e6c751c57d0 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 10 Dec 2020 14:21:04 +0000 Subject: [PATCH 35/85] Integrate install & uninstall e2e tests to jenkins It is just a starting point. There are many things to improve. For example: * reworking test directory structure, * dynamic spawn of k8s cluster, * making it possible to run multiple e2e tests in parallel, * eliminate pool template file in install test * generate deployment yaml files to a different dir than deploy/ * adding more e2e tests --- Jenkinsfile | 7 +++-- mayastor-test/e2e/install/README.md | 2 +- mayastor-test/e2e/install/install_test.go | 27 +++++++++-------- scripts/e2e-test.sh | 36 +++++++++++++++++++++++ shell.nix | 3 ++ 5 files changed, 60 insertions(+), 15 deletions(-) create mode 100755 scripts/e2e-test.sh diff --git a/Jenkinsfile b/Jenkinsfile index 877958c2d..9368e0a57 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -138,12 +138,15 @@ pipeline { stage('e2e tests') { agent { label 'nixos-mayastor' } steps { - // build images (REGISTRY is set in jenkin's global configuration) - sh "./scripts/release.sh --debug --alias-tag ci --registry ${env.REGISTRY}" + // Build images (REGISTRY is set in jenkin's global configuration). + // Note: We might want to build and test dev images that have more + // assertions instead but that complicates e2e tests a bit. + sh "./scripts/release.sh --alias-tag ci --registry ${env.REGISTRY}" // save space by removing docker images that are never reused sh 'nix-store --delete /nix/store/*docker-image*' withCredentials([file(credentialsId: 'kubeconfig', variable: 'KUBECONFIG')]) { sh 'kubectl get nodes -o wide' + sh "nix-shell --run './scripts/e2e-test.sh ${env.REGISTRY}'" } } } diff --git a/mayastor-test/e2e/install/README.md b/mayastor-test/e2e/install/README.md index 99fe347cd..ab6956237 100644 --- a/mayastor-test/e2e/install/README.md +++ b/mayastor-test/e2e/install/README.md @@ -33,5 +33,5 @@ go test Or ```sh cd Maystor/e2e/install -e2e_docker_registry='192.168.122.1:5000' e2e_pool_yaml_files='/e2e/pools.yaml' go test +e2e_docker_registry='192.168.122.1:5000' e2e_pool_device='/dev/nvme1n1' go test ``` \ No newline at end of file diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go index db9ecd02a..3e1df91f6 100644 --- a/mayastor-test/e2e/install/install_test.go +++ b/mayastor-test/e2e/install/install_test.go @@ -113,8 +113,8 @@ func getDeployYamlDir() string { func applyDeployYaml(filename string) { cmd := exec.Command("kubectl", "apply", "-f", filename) cmd.Dir = getDeployYamlDir() - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } // Encapsulate the logic to find where the templated yamls are @@ -130,8 +130,8 @@ func makeImageName(registryAddress string, imagename string, imageversion string func generateYamls(registryAddress string) { bashcmd := "../../../scripts/generate-deploy-yamls.sh ci " + registryAddress cmd := exec.Command("bash", "-c", bashcmd) - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } func applyTemplatedYaml(filename string, imagename string, registryAddress string) { @@ -139,8 +139,8 @@ func applyTemplatedYaml(filename string, imagename string, registryAddress strin bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl apply -f -" cmd := exec.Command("bash", "-c", bashcmd) cmd.Dir = getTemplateYamlDir() - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } // We expect this to fail a few times before it succeeds, @@ -164,6 +164,9 @@ func moacReadyPodCount() int { } // create pools for the cluster +// +// TODO: Ideally there should be one way how to create pools without using +// two env variables to do a similar thing. func createPools(mayastorNodes []string) { envPoolYamls := os.Getenv("e2e_pool_yaml_files") poolDevice := os.Getenv("e2e_pool_device") @@ -186,11 +189,11 @@ func createPools(mayastorNodes []string) { bashcmd := "NODE_NAME=" + mayastorNode + " POOL_DEVICE=" + poolDevice + " envsubst < " + "pool.yaml.template" + " | kubectl apply -f -" cmd := exec.Command("bash", "-c", bashcmd) cmd.Dir = getTemplateYamlDir() - _, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred()) + out, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred(), "%s", out) } } else { - // No pools created + Expect(false).To(BeTrue(), "Neither e2e_pool_yaml_files nor e2e_pool_device specified") } } @@ -211,13 +214,13 @@ func installMayastor() { applyDeployYaml("nats-deployment.yaml") generateYamls(registryAddress) applyDeployYaml("csi-daemonset.yaml") - applyDeployYaml("moac-deployment") - applyDeployYaml("mayastor-daemonset") + applyDeployYaml("moac-deployment.yaml") + applyDeployYaml("mayastor-daemonset.yaml") // Given the yamls and the environment described in the test readme, // we expect mayastor to be running on exactly 2 nodes. Eventually(mayastorReadyPodCount, - "120s", // timeout + "180s", // timeout "1s", // polling interval ).Should(Equal(numMayastorInstances)) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh new file mode 100755 index 000000000..d8b9ae841 --- /dev/null +++ b/scripts/e2e-test.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env sh + +set -e + +SCRIPTDIR=$(dirname "$(realpath "$0")") +REGISTRY=$1 +TESTS="install" + +# TODO: Add proper argument parser +if [ -z "$REGISTRY" ]; then + echo "Missing parameter registry" + exit 1 +fi + +test_failed= +export e2e_docker_registry="$REGISTRY" +export e2e_pool_device=/dev/nvme1n1 + +for dir in $TESTS; do + cd "$SCRIPTDIR/../mayastor-test/e2e/$dir" + if ! go test; then + test_failed=1 + break + fi +done + +# must always run uninstall test in order to clean up the cluster +cd "$SCRIPTDIR/../mayastor-test/e2e/uninstall" +go test + +if [ -n "$test_failed" ]; then + exit 1 +fi + +echo "All tests have passed" +exit 0 \ No newline at end of file diff --git a/shell.nix b/shell.nix index 1f1b0a36d..0f59be465 100644 --- a/shell.nix +++ b/shell.nix @@ -26,8 +26,11 @@ mkShell { cowsay e2fsprogs fio + envsubst # for e2e tests gdb + go gptfdisk + kubernetes-helm libaio libiscsi libiscsi.bin From 4e0066392d37807fed50d43b67ba0b671c936629 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Mon, 14 Dec 2020 16:33:06 +0000 Subject: [PATCH 36/85] Rework directory structure of the tests. NodeJS mayastor tests are in test/grpc directory. The name grpc was chosen because they test gRPC API of mayastor. E2E tests are in test/e2e directory. Third-party CSI E2E tests are in test/csi-e2e directory. --- .dockerignore | 2 +- Jenkinsfile | 4 +- doc/build.md | 2 +- doc/mayastor-client.md | 2 +- doc/vscode.md | 4 +- mayastor-test/e2e/install/README.md | 37 ----------- scripts/e2e-test.sh | 4 +- scripts/{node-test.sh => grpc-test.sh} | 4 +- scripts/js-check.sh | 4 +- scripts/test.sh | 6 -- {mayastor-test => test}/csi-e2e/README.md | 4 +- .../csi-e2e/check_driver_pods_restart.sh | 0 .../csi-e2e/driver/driver.go | 0 .../csi-e2e/driver/mayastor_driver.go | 0 .../csi-e2e/dynamic_provisioning_test.go | 2 +- .../csi-e2e/e2e_suite_test.go | 2 +- {mayastor-test => test}/csi-e2e/go.mod | 0 {mayastor-test => test}/csi-e2e/go.sum | 0 {mayastor-test => test}/csi-e2e/runtest.sh | 0 ...namically_provisioned_cmd_volume_tester.go | 0 ...cally_provisioned_collocated_pod_tester.go | 0 ...namically_provisioned_delete_pod_tester.go | 0 ...ically_provisioned_pod_with_multiple_pv.go | 0 ...lly_provisioned_read_only_volume_tester.go | 0 ...cally_provisioned_reclaim_policy_tester.go | 0 .../csi-e2e/testsuites/specs.go | 0 .../csi-e2e/testsuites/testsuites.go | 0 test/e2e/README.md | 50 +++++++++++++++ {mayastor-test => test}/e2e/common/test.go | 0 {mayastor-test => test}/e2e/common/util.go | 0 .../e2e/example-parallel.sh | 0 {mayastor-test => test}/e2e/example-simple.sh | 0 {mayastor-test => test}/e2e/go.mod | 0 {mayastor-test => test}/e2e/go.sum | 0 test/e2e/install/README.md | 15 +++++ .../e2e/install/deploy/README.md | 0 .../e2e/install/deploy/pool.yaml.template | 0 .../e2e/install/install_test.go | 0 {mayastor-test => test}/e2e/nightly/README.md | 0 .../e2e/nightly/pvc_stress/pvc_stress_test.go | 0 .../pvc_stress_fio/pvc_stress_fio_test.go | 0 {mayastor-test => test}/e2e/nightly/test.sh | 0 .../e2e/node_disconnect/README.md | 0 .../node_disconnect_iscsi_drop_test.go | 0 .../node_disconnect_iscsi_reject_test.go | 0 .../node_disconnect_iscsi_reject_idle_test.go | 0 .../node_disconnect/lib/io_connect_node.sh | 0 .../lib/node_disconnect_lib.go | 0 .../node_disconnect_nvmf_drop_test.go | 0 .../node_disconnect_nvmf_reject_test.go | 0 .../node_disconnect_nvmf_reject_idle_test.go | 0 .../nvmf_reject_reassign_test.go | 0 .../setup/node_disconnect_setup_test.go | 0 .../teardown/node_disconnect_teardown_test.go | 0 .../e2e/node_disconnect/test.sh | 0 .../e2e/nvmf_vol/deploy/fio_nvmf.yaml | 0 .../e2e/nvmf_vol/deploy/pvc_nvmf.yaml | 0 .../e2e/nvmf_vol/nvmf_vol_test.go | 0 {mayastor-test => test}/e2e/setup/README.md | 0 .../e2e/setup/bringup-cluster.sh | 0 .../e2e/setup/test-registry.yaml | 1 - .../e2e/uninstall/uninstall_test.go | 0 {mayastor-test => test/grpc}/.gitignore | 0 {mayastor-test => test/grpc}/README.md | 6 +- {mayastor-test => test/grpc}/grpc_enums.js | 2 +- .../grpc}/multi_reporter.js | 0 .../grpc}/package-lock.json | 0 {mayastor-test => test/grpc}/package.json | 0 {mayastor-test => test/grpc}/sudo.js | 24 +++---- {mayastor-test => test/grpc}/test_cli.js | 4 +- {mayastor-test => test/grpc}/test_common.js | 19 +++--- {mayastor-test => test/grpc}/test_csi.js | 64 +++++++++---------- {mayastor-test => test/grpc}/test_nats.js | 13 ++-- {mayastor-test => test/grpc}/test_nexus.js | 15 +++-- {mayastor-test => test/grpc}/test_rebuild.js | 12 ++-- {mayastor-test => test/grpc}/test_replica.js | 17 +++-- {mayastor-test => test/grpc}/test_snapshot.js | 10 +-- 77 files changed, 178 insertions(+), 151 deletions(-) delete mode 100644 mayastor-test/e2e/install/README.md rename scripts/{node-test.sh => grpc-test.sh} (72%) delete mode 100755 scripts/test.sh rename {mayastor-test => test}/csi-e2e/README.md (91%) rename {mayastor-test => test}/csi-e2e/check_driver_pods_restart.sh (100%) rename {mayastor-test => test}/csi-e2e/driver/driver.go (100%) rename {mayastor-test => test}/csi-e2e/driver/mayastor_driver.go (100%) rename {mayastor-test => test}/csi-e2e/dynamic_provisioning_test.go (99%) rename {mayastor-test => test}/csi-e2e/e2e_suite_test.go (98%) rename {mayastor-test => test}/csi-e2e/go.mod (100%) rename {mayastor-test => test}/csi-e2e/go.sum (100%) rename {mayastor-test => test}/csi-e2e/runtest.sh (100%) rename {mayastor-test => test}/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/specs.go (100%) rename {mayastor-test => test}/csi-e2e/testsuites/testsuites.go (100%) create mode 100644 test/e2e/README.md rename {mayastor-test => test}/e2e/common/test.go (100%) rename {mayastor-test => test}/e2e/common/util.go (100%) rename {mayastor-test => test}/e2e/example-parallel.sh (100%) rename {mayastor-test => test}/e2e/example-simple.sh (100%) rename {mayastor-test => test}/e2e/go.mod (100%) rename {mayastor-test => test}/e2e/go.sum (100%) create mode 100644 test/e2e/install/README.md rename {mayastor-test => test}/e2e/install/deploy/README.md (100%) rename {mayastor-test => test}/e2e/install/deploy/pool.yaml.template (100%) rename {mayastor-test => test}/e2e/install/install_test.go (100%) rename {mayastor-test => test}/e2e/nightly/README.md (100%) rename {mayastor-test => test}/e2e/nightly/pvc_stress/pvc_stress_test.go (100%) rename {mayastor-test => test}/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go (100%) rename {mayastor-test => test}/e2e/nightly/test.sh (100%) rename {mayastor-test => test}/e2e/node_disconnect/README.md (100%) rename {mayastor-test => test}/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/lib/io_connect_node.sh (100%) rename {mayastor-test => test}/e2e/node_disconnect/lib/node_disconnect_lib.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/setup/node_disconnect_setup_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go (100%) rename {mayastor-test => test}/e2e/node_disconnect/test.sh (100%) rename {mayastor-test => test}/e2e/nvmf_vol/deploy/fio_nvmf.yaml (100%) rename {mayastor-test => test}/e2e/nvmf_vol/deploy/pvc_nvmf.yaml (100%) rename {mayastor-test => test}/e2e/nvmf_vol/nvmf_vol_test.go (100%) rename {mayastor-test => test}/e2e/setup/README.md (100%) rename {mayastor-test => test}/e2e/setup/bringup-cluster.sh (100%) rename {mayastor-test => test}/e2e/setup/test-registry.yaml (98%) rename {mayastor-test => test}/e2e/uninstall/uninstall_test.go (100%) rename {mayastor-test => test/grpc}/.gitignore (100%) rename {mayastor-test => test/grpc}/README.md (98%) rename {mayastor-test => test/grpc}/grpc_enums.js (89%) rename {mayastor-test => test/grpc}/multi_reporter.js (100%) rename {mayastor-test => test/grpc}/package-lock.json (100%) rename {mayastor-test => test/grpc}/package.json (100%) rename {mayastor-test => test/grpc}/sudo.js (80%) rename {mayastor-test => test/grpc}/test_cli.js (99%) rename {mayastor-test => test/grpc}/test_common.js (96%) rename {mayastor-test => test/grpc}/test_csi.js (96%) rename {mayastor-test => test/grpc}/test_nats.js (94%) rename {mayastor-test => test/grpc}/test_nexus.js (99%) rename {mayastor-test => test/grpc}/test_rebuild.js (96%) rename {mayastor-test => test/grpc}/test_replica.js (97%) rename {mayastor-test => test/grpc}/test_snapshot.js (99%) diff --git a/.dockerignore b/.dockerignore index 460138963..9daeafb98 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1 @@ -mayastor-test +test diff --git a/Jenkinsfile b/Jenkinsfile index 9368e0a57..249061219 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -109,11 +109,11 @@ pipeline { } } } - stage('mocha api tests') { + stage('grpc tests') { agent { label 'nixos-mayastor' } steps { sh 'printenv' - sh 'nix-shell --run "./scripts/node-test.sh"' + sh 'nix-shell --run "./scripts/grpc-test.sh"' } post { always { diff --git a/doc/build.md b/doc/build.md index c9b512c86..b129ed728 100644 --- a/doc/build.md +++ b/doc/build.md @@ -133,7 +133,7 @@ echo 512 | sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages Then, for example: ```bash -root@gilakubuntu:/MayaStor/mayastor-test# ./node_modules/mocha/bin/mocha test_csi.js +root@gilakubuntu:/MayaStor/test/grpc# ./node_modules/mocha/bin/mocha test_csi.js csi identity ✓ probe diff --git a/doc/mayastor-client.md b/doc/mayastor-client.md index 2a19ee417..6fd9e2750 100644 --- a/doc/mayastor-client.md +++ b/doc/mayastor-client.md @@ -95,7 +95,7 @@ NAME PATH SIZE STATE REBUILDS CHILDREN Now this is not all that exciting, but as we you can see in [pool.rs](../mayastor/src/pool.rs) we can actually thin provision volumes out of the disks. You can also have a look into our test case that demonstrates -that [here](../mayastor-test/test_cli.js). We can also add files to the mix and the Nexus would be +that [here](../test/grpc/test_cli.js). We can also add files to the mix and the Nexus would be fine writing to it as it were a local disk. ```bash diff --git a/doc/vscode.md b/doc/vscode.md index 45fce577b..95653e24a 100644 --- a/doc/vscode.md +++ b/doc/vscode.md @@ -27,7 +27,7 @@ be mounted at /workspace from the host. There are two components in mayastor project that are written in JS: * moac -* mayastor-test +* test/grpc For both of them the same vscode configuration applies. We will be using [standardjs vscode plugin](https://marketplace.visualstudio.com/items?itemName=chenxsan.vscode-standardjs). @@ -45,7 +45,7 @@ used to configure the plugin: "standard.semistandard": true, "standard.workingDirectories": [ "csi/moac", - "mayastor-test" + "test/grpc" ] } ``` diff --git a/mayastor-test/e2e/install/README.md b/mayastor-test/e2e/install/README.md deleted file mode 100644 index ab6956237..000000000 --- a/mayastor-test/e2e/install/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Pre-requisites - -The test doesn't yet manage the lifecycle of the cluster being tested, -therefore the test hosts' kubeconfig must point to a Kubernetes cluster. -You can verify that the kubeconfig is setup correctly simply with -`kubectl get nodes`. - -The cluster under test must meet the following requirements: -* Have 3 nodes -* Each node must be configured per the quick start: - * At least 512 2MiB hugepages available - * Each node must be labelled for use by mayastor (ie "openebs.io/engine=mayastor") - -The test host must have the following installed: -* go (>= v1.15) -* ginkgo (tested with v1.2) -* kubectl (tested with v1.18) - -# Running the tests -Environment variables -* e2e_docker_registry - * The IP address:port of the registry to be used. - * If unspecified then the assumption is that test registry has been deployed in the cluster on port 30291, a suitable IP address is selected. -* e2e_pool_yaml_files - * The list of yaml files defining pools for the cluster, comma separated, absolute paths. -* e2e_pool_device - * This environment variable is used if `e2e_pool_yaml_files` is undefined. - * pools are created for each node running mayastor, using the template file and the specified pool device. -```sh -cd Mayastor/e2e/install -go test -``` -Or -```sh -cd Maystor/e2e/install -e2e_docker_registry='192.168.122.1:5000' e2e_pool_device='/dev/nvme1n1' go test -``` \ No newline at end of file diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index d8b9ae841..282017850 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -17,7 +17,7 @@ export e2e_docker_registry="$REGISTRY" export e2e_pool_device=/dev/nvme1n1 for dir in $TESTS; do - cd "$SCRIPTDIR/../mayastor-test/e2e/$dir" + cd "$SCRIPTDIR/../test/e2e/$dir" if ! go test; then test_failed=1 break @@ -25,7 +25,7 @@ for dir in $TESTS; do done # must always run uninstall test in order to clean up the cluster -cd "$SCRIPTDIR/../mayastor-test/e2e/uninstall" +cd "$SCRIPTDIR/../test/e2e/uninstall" go test if [ -n "$test_failed" ]; then diff --git a/scripts/node-test.sh b/scripts/grpc-test.sh similarity index 72% rename from scripts/node-test.sh rename to scripts/grpc-test.sh index 06cfc147f..097a7ed92 100755 --- a/scripts/node-test.sh +++ b/scripts/grpc-test.sh @@ -5,11 +5,11 @@ set -euxo pipefail export PATH=$PATH:${HOME}/.cargo/bin cargo build --all -cd mayastor-test +cd test/grpc npm install for ts in cli replica nexus csi rebuild snapshot nats; do ./node_modules/mocha/bin/mocha test_${ts}.js \ --reporter ./multi_reporter.js \ - --reporter-options reporters="xunit spec",output=../${ts}-xunit-report.xml + --reporter-options reporters="xunit spec",output=../../${ts}-xunit-report.xml done \ No newline at end of file diff --git a/scripts/js-check.sh b/scripts/js-check.sh index 3b7b3c4d9..4828927d4 100755 --- a/scripts/js-check.sh +++ b/scripts/js-check.sh @@ -14,7 +14,7 @@ for path in "$@"; do if [ "$rel_path" != "$path" ]; then MOAC_FILES="$MOAC_FILES $rel_path" else - rel_path=`echo $path | sed 's,mayastor-test/,,'` + rel_path=`echo $path | sed 's,test/grpc/,,'` if [ "$rel_path" != "$path" ]; then MS_TEST_FILES="$MS_TEST_FILES $rel_path" fi @@ -27,5 +27,5 @@ if [ -n "$MOAC_FILES" ]; then fi if [ -n "$MS_TEST_FILES" ]; then - ( cd $SCRIPTDIR/../mayastor-test && npx semistandard --fix $MS_TEST_FILES ) + ( cd $SCRIPTDIR/../test/grpc && npx semistandard --fix $MS_TEST_FILES ) fi diff --git a/scripts/test.sh b/scripts/test.sh deleted file mode 100755 index 867dd2c14..000000000 --- a/scripts/test.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env sh -set -euxo pipefail -export PATH=$PATH:${HOME}/.cargo/bin -cargo build --all -./scripts/cargo-test.sh -./scripts/node-test.sh diff --git a/mayastor-test/csi-e2e/README.md b/test/csi-e2e/README.md similarity index 91% rename from mayastor-test/csi-e2e/README.md rename to test/csi-e2e/README.md index bbeb2304e..6668a2392 100644 --- a/mayastor-test/csi-e2e/README.md +++ b/test/csi-e2e/README.md @@ -10,7 +10,7 @@ These tests have been ported from kubernetes CSI NFS driver at https://github.co * `LARGE_CLAIM_SIZE` - Size of large PVCs created by the testsuite, defaults to `500Mi` ## Changes for mayastor -* Location of the test directory within the repo is `mayastor-test/csi-e2e` +* Location of the test directory within the repo is `test/csi-e2e` * Naming from `csi-nfs` to `csi-mayastor` * Claim sizes have been downsized from * `10Gi` to `50Mi` @@ -36,6 +36,6 @@ To run the tests execute `runtests.sh` from this directory. ### TODO Remove workaround for side effect of running this test, when CAS-566 is fixed. -In `mayastor-test/csi-e2e/runtest.sh` all Mayastor Volumes are deleted after +In `test/csi-e2e/runtest.sh` all Mayastor Volumes are deleted after the test run. Until CAS-566 is fixed this is required as this will have an impact on tests run subsequently in particular the uninstall test. diff --git a/mayastor-test/csi-e2e/check_driver_pods_restart.sh b/test/csi-e2e/check_driver_pods_restart.sh similarity index 100% rename from mayastor-test/csi-e2e/check_driver_pods_restart.sh rename to test/csi-e2e/check_driver_pods_restart.sh diff --git a/mayastor-test/csi-e2e/driver/driver.go b/test/csi-e2e/driver/driver.go similarity index 100% rename from mayastor-test/csi-e2e/driver/driver.go rename to test/csi-e2e/driver/driver.go diff --git a/mayastor-test/csi-e2e/driver/mayastor_driver.go b/test/csi-e2e/driver/mayastor_driver.go similarity index 100% rename from mayastor-test/csi-e2e/driver/mayastor_driver.go rename to test/csi-e2e/driver/mayastor_driver.go diff --git a/mayastor-test/csi-e2e/dynamic_provisioning_test.go b/test/csi-e2e/dynamic_provisioning_test.go similarity index 99% rename from mayastor-test/csi-e2e/dynamic_provisioning_test.go rename to test/csi-e2e/dynamic_provisioning_test.go index d071855a7..5ef395609 100644 --- a/mayastor-test/csi-e2e/dynamic_provisioning_test.go +++ b/test/csi-e2e/dynamic_provisioning_test.go @@ -61,7 +61,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ginkgo.BeforeEach(func() { checkPodsRestart := testCmd{ command: "sh", - args: []string{"mayastor-test/csi-e2e/check_driver_pods_restart.sh"}, + args: []string{"test/csi-e2e/check_driver_pods_restart.sh"}, startLog: "Check driver pods for restarts", endLog: "Check successful", } diff --git a/mayastor-test/csi-e2e/e2e_suite_test.go b/test/csi-e2e/e2e_suite_test.go similarity index 98% rename from mayastor-test/csi-e2e/e2e_suite_test.go rename to test/csi-e2e/e2e_suite_test.go index 835ad4443..ce3eeae0f 100644 --- a/mayastor-test/csi-e2e/e2e_suite_test.go +++ b/test/csi-e2e/e2e_suite_test.go @@ -77,7 +77,7 @@ func execTestCmd(cmds []testCmd) { err := os.Chdir("../..") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := os.Chdir("mayastor-test/csi-e2e") + err := os.Chdir("test/csi-e2e") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() diff --git a/mayastor-test/csi-e2e/go.mod b/test/csi-e2e/go.mod similarity index 100% rename from mayastor-test/csi-e2e/go.mod rename to test/csi-e2e/go.mod diff --git a/mayastor-test/csi-e2e/go.sum b/test/csi-e2e/go.sum similarity index 100% rename from mayastor-test/csi-e2e/go.sum rename to test/csi-e2e/go.sum diff --git a/mayastor-test/csi-e2e/runtest.sh b/test/csi-e2e/runtest.sh similarity index 100% rename from mayastor-test/csi-e2e/runtest.sh rename to test/csi-e2e/runtest.sh diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go b/test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go rename to test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go b/test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go rename to test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go rename to test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go b/test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go rename to test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go rename to test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go diff --git a/mayastor-test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go b/test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go rename to test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go diff --git a/mayastor-test/csi-e2e/testsuites/specs.go b/test/csi-e2e/testsuites/specs.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/specs.go rename to test/csi-e2e/testsuites/specs.go diff --git a/mayastor-test/csi-e2e/testsuites/testsuites.go b/test/csi-e2e/testsuites/testsuites.go similarity index 100% rename from mayastor-test/csi-e2e/testsuites/testsuites.go rename to test/csi-e2e/testsuites/testsuites.go diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 000000000..ddf4fd4e1 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,50 @@ +# Pre-requisites + +The test doesn't yet manage the lifecycle of the cluster being tested, +therefore the test hosts' kubeconfig must point to a Kubernetes cluster. +You can verify that the kubeconfig is setup correctly simply with +`kubectl get nodes`. + +The cluster under test must meet the following requirements: +* Have 3 nodes (4 nodes if running reassignment test) +* Each node must be configured per the quick start: + * At least 1GiB of hugepages available (e.g. 512 2MiB hugepages) + * Each node must be labelled for use by mayastor (ie "openebs.io/engine=mayastor") + +The test host must have the following installed: +* go (>= v1.15) +* kubectl (tested with v1.18) +* helm + +# Setting up the cluster + +### Create a cluster in public cloud + +Use terraform script in `aws-kubeadm/` from +[terraform repo](https://github.com/mayadata-io/mayastor-terraform-playground) to +set up a cluster in AWS suitable for running the tests. The configuration file for +terraform could look like this (replace docker registry value by yours - must +be reachable from the cluster): + +``` +cluster_name = "-e2e-test-cluster" +deploy_mayastor = false +num_workers = 3 +ebs_volume_size = 5 +mayastor_use_develop_images = true +aws_instance_root_size_gb = 10 +docker_insecure_registry = "52.58.174.24:5000" +``` + +### Create local cluster + +Many possibilities here. You could use libvirt based cluster created by +terraform (in terraform/ in this repo). Or you could use the script from +`setup` subdirectory using vagrant and kubespray. + +# Running the tests + +If you'd like to run the tests as a whole (as they are run in our CI/CD +pipeline) then use the script `./scripts/e2e-test.sh`. + +To run particular test cd to the directory with tests and type `go test`. diff --git a/mayastor-test/e2e/common/test.go b/test/e2e/common/test.go similarity index 100% rename from mayastor-test/e2e/common/test.go rename to test/e2e/common/test.go diff --git a/mayastor-test/e2e/common/util.go b/test/e2e/common/util.go similarity index 100% rename from mayastor-test/e2e/common/util.go rename to test/e2e/common/util.go diff --git a/mayastor-test/e2e/example-parallel.sh b/test/e2e/example-parallel.sh similarity index 100% rename from mayastor-test/e2e/example-parallel.sh rename to test/e2e/example-parallel.sh diff --git a/mayastor-test/e2e/example-simple.sh b/test/e2e/example-simple.sh similarity index 100% rename from mayastor-test/e2e/example-simple.sh rename to test/e2e/example-simple.sh diff --git a/mayastor-test/e2e/go.mod b/test/e2e/go.mod similarity index 100% rename from mayastor-test/e2e/go.mod rename to test/e2e/go.mod diff --git a/mayastor-test/e2e/go.sum b/test/e2e/go.sum similarity index 100% rename from mayastor-test/e2e/go.sum rename to test/e2e/go.sum diff --git a/test/e2e/install/README.md b/test/e2e/install/README.md new file mode 100644 index 000000000..a00cd5eb8 --- /dev/null +++ b/test/e2e/install/README.md @@ -0,0 +1,15 @@ +# Running the install test + +Environment variables +* `e2e_docker_registry` + * The IP address:port of the registry to be used. + * If unspecified then the assumption is that test registry has been deployed in the cluster on port 30291, a suitable IP address is selected. +* `e2e_pool_yaml_files` + * The list of yaml files defining pools for the cluster, comma separated, absolute paths. +* `e2e_pool_device` + * This environment variable is used if `e2e_pool_yaml_files` is undefined. + * pools are created for each node running mayastor, using the template file and the specified pool device. + +```sh +e2e_docker_registry='192.168.122.1:5000' e2e_pool_device='/dev/nvme1n1' go test +``` diff --git a/mayastor-test/e2e/install/deploy/README.md b/test/e2e/install/deploy/README.md similarity index 100% rename from mayastor-test/e2e/install/deploy/README.md rename to test/e2e/install/deploy/README.md diff --git a/mayastor-test/e2e/install/deploy/pool.yaml.template b/test/e2e/install/deploy/pool.yaml.template similarity index 100% rename from mayastor-test/e2e/install/deploy/pool.yaml.template rename to test/e2e/install/deploy/pool.yaml.template diff --git a/mayastor-test/e2e/install/install_test.go b/test/e2e/install/install_test.go similarity index 100% rename from mayastor-test/e2e/install/install_test.go rename to test/e2e/install/install_test.go diff --git a/mayastor-test/e2e/nightly/README.md b/test/e2e/nightly/README.md similarity index 100% rename from mayastor-test/e2e/nightly/README.md rename to test/e2e/nightly/README.md diff --git a/mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go b/test/e2e/nightly/pvc_stress/pvc_stress_test.go similarity index 100% rename from mayastor-test/e2e/nightly/pvc_stress/pvc_stress_test.go rename to test/e2e/nightly/pvc_stress/pvc_stress_test.go diff --git a/mayastor-test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go b/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go similarity index 100% rename from mayastor-test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go rename to test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go diff --git a/mayastor-test/e2e/nightly/test.sh b/test/e2e/nightly/test.sh similarity index 100% rename from mayastor-test/e2e/nightly/test.sh rename to test/e2e/nightly/test.sh diff --git a/mayastor-test/e2e/node_disconnect/README.md b/test/e2e/node_disconnect/README.md similarity index 100% rename from mayastor-test/e2e/node_disconnect/README.md rename to test/e2e/node_disconnect/README.md diff --git a/mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go b/test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go rename to test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go diff --git a/mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go b/test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go rename to test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go diff --git a/mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go b/test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go rename to test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go diff --git a/mayastor-test/e2e/node_disconnect/lib/io_connect_node.sh b/test/e2e/node_disconnect/lib/io_connect_node.sh similarity index 100% rename from mayastor-test/e2e/node_disconnect/lib/io_connect_node.sh rename to test/e2e/node_disconnect/lib/io_connect_node.sh diff --git a/mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go b/test/e2e/node_disconnect/lib/node_disconnect_lib.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/lib/node_disconnect_lib.go rename to test/e2e/node_disconnect/lib/node_disconnect_lib.go diff --git a/mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go b/test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go rename to test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go b/test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go rename to test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go b/test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go rename to test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go diff --git a/mayastor-test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go b/test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go rename to test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go diff --git a/mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go b/test/e2e/node_disconnect/setup/node_disconnect_setup_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/setup/node_disconnect_setup_test.go rename to test/e2e/node_disconnect/setup/node_disconnect_setup_test.go diff --git a/mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go b/test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go similarity index 100% rename from mayastor-test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go rename to test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go diff --git a/mayastor-test/e2e/node_disconnect/test.sh b/test/e2e/node_disconnect/test.sh similarity index 100% rename from mayastor-test/e2e/node_disconnect/test.sh rename to test/e2e/node_disconnect/test.sh diff --git a/mayastor-test/e2e/nvmf_vol/deploy/fio_nvmf.yaml b/test/e2e/nvmf_vol/deploy/fio_nvmf.yaml similarity index 100% rename from mayastor-test/e2e/nvmf_vol/deploy/fio_nvmf.yaml rename to test/e2e/nvmf_vol/deploy/fio_nvmf.yaml diff --git a/mayastor-test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml b/test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml similarity index 100% rename from mayastor-test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml rename to test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml diff --git a/mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go b/test/e2e/nvmf_vol/nvmf_vol_test.go similarity index 100% rename from mayastor-test/e2e/nvmf_vol/nvmf_vol_test.go rename to test/e2e/nvmf_vol/nvmf_vol_test.go diff --git a/mayastor-test/e2e/setup/README.md b/test/e2e/setup/README.md similarity index 100% rename from mayastor-test/e2e/setup/README.md rename to test/e2e/setup/README.md diff --git a/mayastor-test/e2e/setup/bringup-cluster.sh b/test/e2e/setup/bringup-cluster.sh similarity index 100% rename from mayastor-test/e2e/setup/bringup-cluster.sh rename to test/e2e/setup/bringup-cluster.sh diff --git a/mayastor-test/e2e/setup/test-registry.yaml b/test/e2e/setup/test-registry.yaml similarity index 98% rename from mayastor-test/e2e/setup/test-registry.yaml rename to test/e2e/setup/test-registry.yaml index c4ab7eae0..ede267c91 100644 --- a/mayastor-test/e2e/setup/test-registry.yaml +++ b/test/e2e/setup/test-registry.yaml @@ -29,4 +29,3 @@ spec: containers: - name: test-registry image: registry:2 - \ No newline at end of file diff --git a/mayastor-test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go similarity index 100% rename from mayastor-test/e2e/uninstall/uninstall_test.go rename to test/e2e/uninstall/uninstall_test.go diff --git a/mayastor-test/.gitignore b/test/grpc/.gitignore similarity index 100% rename from mayastor-test/.gitignore rename to test/grpc/.gitignore diff --git a/mayastor-test/README.md b/test/grpc/README.md similarity index 98% rename from mayastor-test/README.md rename to test/grpc/README.md index a1d4cfa6e..3ab8050a2 100644 --- a/mayastor-test/README.md +++ b/test/grpc/README.md @@ -11,21 +11,20 @@ nix-shell The within the Nix shell you opened above: ```sh -git submodule update --init cargo build --all ``` ## Running All Tests ```sh -./test.sh +./scripts/grpc-test.sh ``` ## Running Individual Tests Test use mocha. To set this up: ```sh -cd mayastor-test +cd test/grpc npm install ``` @@ -51,7 +50,6 @@ note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace. Failures like that above are generally because you're building outside of a Nix shell. ```sh -cd Mayastor nix-shell ``` diff --git a/mayastor-test/grpc_enums.js b/test/grpc/grpc_enums.js similarity index 89% rename from mayastor-test/grpc_enums.js rename to test/grpc/grpc_enums.js index 8dfb21ee3..5403977a9 100644 --- a/mayastor-test/grpc_enums.js +++ b/test/grpc/grpc_enums.js @@ -9,7 +9,7 @@ const constants = {}; const defs = Object.values( grpc.loadPackageDefinition( protoLoader.loadSync( - path.join(__dirname, '..', 'rpc', 'proto', 'mayastor.proto'), + path.join(__dirname, '..', '..', 'rpc', 'proto', 'mayastor.proto'), { // this is to load google/descriptor.proto includeDirs: ['./node_modules/protobufjs'] diff --git a/mayastor-test/multi_reporter.js b/test/grpc/multi_reporter.js similarity index 100% rename from mayastor-test/multi_reporter.js rename to test/grpc/multi_reporter.js diff --git a/mayastor-test/package-lock.json b/test/grpc/package-lock.json similarity index 100% rename from mayastor-test/package-lock.json rename to test/grpc/package-lock.json diff --git a/mayastor-test/package.json b/test/grpc/package.json similarity index 100% rename from mayastor-test/package.json rename to test/grpc/package.json diff --git a/mayastor-test/sudo.js b/test/grpc/sudo.js similarity index 80% rename from mayastor-test/sudo.js rename to test/grpc/sudo.js index 05685ae15..1c8f23442 100644 --- a/mayastor-test/sudo.js +++ b/test/grpc/sudo.js @@ -1,27 +1,27 @@ 'use strict'; -var spawn = require('child_process').spawn; -var path = require('path'); -var read = require('read'); -var inpathSync = require('inpath').sync; -var pidof = require('pidof'); +const spawn = require('child_process').spawn; +const path = require('path'); +const read = require('read'); +const inpathSync = require('inpath').sync; +const pidof = require('pidof'); const sudoBin = inpathSync('sudo', process.env.PATH.split(':')); -var cachedPassword; +let cachedPassword; function sudo (command, options, nameInPs) { - var prompt = '#node-sudo-passwd#'; - var prompts = 0; + const prompt = '#node-sudo-passwd#'; + let prompts = 0; nameInPs = nameInPs || path.basename(command[0]); - var args = ['-S', '-E', '-p', prompt]; + const args = ['-S', '-E', '-p', prompt]; args.push.apply(args, command); options = options || {}; - var spawnOptions = options.spawnOptions || {}; + const spawnOptions = options.spawnOptions || {}; spawnOptions.stdio = 'pipe'; - var child = spawn(sudoBin, args, spawnOptions); + const child = spawn(sudoBin, args, spawnOptions); // Wait for the sudo:d binary to start up function waitForStartup (err, pid) { @@ -43,7 +43,7 @@ function sudo (command, options, nameInPs) { // FIXME: Remove this handler when the child has successfully started child.stderr.on('data', function (data) { - var lines = data + const lines = data .toString() .trim() .split('\n'); diff --git a/mayastor-test/test_cli.js b/test/grpc/test_cli.js similarity index 99% rename from mayastor-test/test_cli.js rename to test/grpc/test_cli.js index dca3e1ee7..3e1561711 100644 --- a/mayastor-test/test_cli.js +++ b/test/grpc/test_cli.js @@ -23,13 +23,14 @@ const UUID3 = '753b391c-9b04-4ce3-9c74-9d949152e543'; const CLIENT_CMD = path.join( __dirname, '..', + '..', 'target', 'debug', 'mayastor-client' ); const EGRESS_CMD = CLIENT_CMD + ' -p ' + EGRESS_PORT; -var mayastorMockServer; +let mayastorMockServer; // Here we initialize gRPC mock server with predefined replies for requests // we use in the tests below. Note that the request must exactly match the @@ -40,6 +41,7 @@ function runMockServer (rules) { protoPath: path.join( __dirname, '..', + '..', 'rpc', 'proto', 'mayastor.proto' diff --git a/mayastor-test/test_common.js b/test/grpc/test_common.js similarity index 96% rename from mayastor-test/test_common.js rename to test/grpc/test_common.js index 519cf9274..45028ed65 100644 --- a/mayastor-test/test_common.js +++ b/test/grpc/test_common.js @@ -20,15 +20,15 @@ const CSI_ENDPOINT = '/tmp/mayastor_csi_test.sock'; const CSI_ID = 'test-node-id'; const LOCALHOST = '127.0.0.1'; -var testPort = process.env.TEST_PORT || GRPC_PORT; -var myIp = getMyIp() || LOCALHOST; -var grpcEndpoint = myIp + ':' + testPort; +const testPort = process.env.TEST_PORT || GRPC_PORT; +const myIp = getMyIp() || LOCALHOST; +const grpcEndpoint = myIp + ':' + testPort; // started processes indexed by the program name -var procs = {}; +let procs = {}; // Construct path to a rust binary in target/debug/... dir. function getCmdPath (name) { - return path.join(__dirname, '..', 'target', 'debug', name); + return path.join(__dirname, '..', '..', 'target', 'debug', name); } // Run the command as root. We use sudo to gain root privileges. @@ -211,7 +211,7 @@ function startMayastorCsi () { function killSudoedProcess (name, pid, done) { find('name', name).then((res) => { - var whichPid; + let whichPid; if (process.geteuid() === 0) { whichPid = 'pid'; } else { @@ -330,7 +330,7 @@ function jsonrpcCommand (sock, method, args, done) { done = args; args = null; } - var cmd = getCmdPath('jsonrpc') + ' -s ' + sock + ' raw' + ' ' + method; + let cmd = getCmdPath('jsonrpc') + ' -s ' + sock + ' raw' + ' ' + method; if (args !== null && args !== undefined) { cmd += " '" + JSON.stringify(args) + "'"; } @@ -346,11 +346,12 @@ function jsonrpcCommand (sock, method, args, done) { // Create mayastor grpc client. Must be closed by the user when not used anymore. function createGrpcClient (endpoint) { endpoint = endpoint || grpcEndpoint; - var client = createClient( + const client = createClient( { protoPath: path.join( __dirname, '..', + '..', 'rpc', 'proto', 'mayastor.proto' @@ -375,7 +376,7 @@ function createGrpcClient (endpoint) { // Create mayastor grpc client, call a method and return the result of it. function callGrpcMethod (method, args, done) { - var client; + let client; try { client = createGrpcClient(); } catch (err) { diff --git a/mayastor-test/test_csi.js b/test/grpc/test_csi.js similarity index 96% rename from mayastor-test/test_csi.js rename to test/grpc/test_csi.js index bb26ac338..592eaaddc 100644 --- a/mayastor-test/test_csi.js +++ b/test/grpc/test_csi.js @@ -26,7 +26,7 @@ const grpc = require('grpc-uds'); const common = require('./test_common'); const enums = require('./grpc_enums'); -var csiSock = common.CSI_ENDPOINT; +const csiSock = common.CSI_ENDPOINT; // One big malloc bdev which we put lvol store on. const CONFIG = ` @@ -50,7 +50,7 @@ const UUID5 = BASE_UUID + '4'; function createCsiClient (service) { const pkgDef = grpc.loadPackageDefinition( protoLoader.loadSync( - path.join(__dirname, '..', 'csi', 'proto', 'csi.proto'), + path.join(__dirname, '..', '..', 'csi', 'proto', 'csi.proto'), { // this is to load google/descriptor.proto includeDirs: ['./node_modules/protobufjs'], @@ -152,7 +152,7 @@ describe('csi', function () { common.startMayastor(CONFIG); common.startMayastorCsi(); - var client = common.createGrpcClient(); + const client = common.createGrpcClient(); async.series( [ @@ -209,7 +209,7 @@ describe('csi', function () { describe('general', function () { it('should start even if there is a stale csi socket file', (done) => { - var client = createCsiClient('Identity'); + const client = createCsiClient('Identity'); async.series( [ @@ -233,7 +233,7 @@ describe('csi', function () { }); describe('identity', function () { - var client; + let client; before(() => { client = createCsiClient('Identity'); @@ -282,7 +282,7 @@ describe('csi', function () { }); describe('node', function () { - var client; + let client; before(() => { client = createCsiClient('Node'); @@ -336,7 +336,7 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { // NOTE: Don't use mayastor in setup - we test CSI interface and we don't want // to depend on correct function of mayastor iface in order to test CSI. before((done) => { - var client = common.createGrpcClient(); + const client = common.createGrpcClient(); async.times( 5, (n, next) => { @@ -355,7 +355,7 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { if (err) { return done(err); } - for (var n in results) { + for (const n in results) { const uuid = BASE_UUID + n; // stash the published URIs in a map indexed // on the uuid of the volume. @@ -368,7 +368,7 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { // stop mayastor server if it was started by us after((done) => { - var client = common.createGrpcClient(); + const client = common.createGrpcClient(); async.times( 5, function (n, next) { @@ -385,8 +385,8 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('stage and unstage xfs volume', function () { - var client; - var mountTarget = '/tmp/target0'; + let client; + const mountTarget = '/tmp/target0'; // get default args for stage op with xfs fs function getDefaultArgs () { @@ -526,8 +526,8 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('stage and unstage ext4 volume', function () { - var client; - var mountTarget = '/tmp/target1'; + let client; + const mountTarget = '/tmp/target1'; before((done) => { client = createCsiClient('Node'); @@ -586,8 +586,8 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('stage misc', function () { - var client; - var mountTarget = '/tmp/target2'; + let client; + const mountTarget = '/tmp/target2'; before((done) => { client = createCsiClient('Node'); @@ -628,7 +628,7 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { // The combinations of ro/rw and access mode flags are quite confusing. // See the source code for more info on how this should work. describe('publish and unpublish', function () { - var client; + let client; before(() => { client = createCsiClient('Node'); @@ -641,9 +641,9 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('MULTI_NODE_READER_ONLY staged volume', function () { - var mountTarget = '/tmp/target3'; - var bindTarget1 = '/tmp/bind1'; - var bindTarget2 = '/tmp/bind2'; + const mountTarget = '/tmp/target3'; + const bindTarget1 = '/tmp/bind1'; + const bindTarget2 = '/tmp/bind2'; before((done) => { const stageArgs = { @@ -822,9 +822,9 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('MULTI_NODE_SINGLE_WRITER staged volume', function () { - var mountTarget = '/tmp/target4'; - var bindTarget1 = '/tmp/bind1'; - var bindTarget2 = '/tmp/bind2'; + const mountTarget = '/tmp/target4'; + const bindTarget1 = '/tmp/bind1'; + const bindTarget2 = '/tmp/bind2'; before((done) => { const stageArgs = { @@ -956,8 +956,8 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('stage and unstage block volume', function () { - var client; - var mountTarget = '/tmp/target2'; + let client; + const mountTarget = '/tmp/target2'; before((done) => { client = createCsiClient('Node'); @@ -1017,7 +1017,7 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { // The combinations of ro/rw and access mode flags are quite confusing. // See the source code for more info on how this should work. describe('publish and unpublish block volumes', function () { - var client; + let client; before(() => { client = createCsiClient('Node'); @@ -1030,10 +1030,10 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('MULTI_NODE_READER_ONLY staged volume', function () { - var stagingPath = '/tmp/target3'; - var stagingPath2 = '/tmp/target4'; - var publishPath1 = '/tmp/blockvol1'; - var publishPath2 = '/tmp/blockvol2'; + const stagingPath = '/tmp/target3'; + const stagingPath2 = '/tmp/target4'; + const publishPath1 = '/tmp/blockvol1'; + const publishPath2 = '/tmp/blockvol2'; before((done) => { const stageArgs = { @@ -1276,9 +1276,9 @@ function csiProtocolTest (protoname, shareType, timeoutMillis) { }); describe('MULTI_NODE_SINGLE_WRITER staged volume', function () { - var stagingPath = '/tmp/target4'; - var publishPath1 = '/tmp/blockvol1'; - var publishPath2 = '/tmp/blockvol2'; + const stagingPath = '/tmp/target4'; + const publishPath1 = '/tmp/blockvol1'; + const publishPath2 = '/tmp/blockvol2'; before((done) => { const stageArgs = { diff --git a/mayastor-test/test_nats.js b/test/grpc/test_nats.js similarity index 94% rename from mayastor-test/test_nats.js rename to test/grpc/test_nats.js index f56d0d8f7..717e4a8cc 100644 --- a/mayastor-test/test_nats.js +++ b/test/grpc/test_nats.js @@ -6,20 +6,19 @@ const assert = require('chai').assert; const { spawn } = require('child_process'); const common = require('./test_common'); const nats = require('nats'); -const util = require('util') const HB_INTERVAL = 1; const NATS_PORT = 14222; const NATS_ENDPOINT = common.getMyIp() + ':' + NATS_PORT; const NODE_NAME = 'weird-node-name'; -var natsProc; +let natsProc; // start nats server function startNats (done) { natsProc = spawn('nats-server', ['-a', common.getMyIp(), '-p', NATS_PORT]); - var doneCalled = false; - var stderr = ''; + let doneCalled = false; + let stderr = ''; natsProc.stderr.on('data', (data) => { stderr += data.toString(); @@ -53,7 +52,7 @@ function stopNats (done) { } function assertRegisterMessage (msg) { - assert(JSON.parse(msg).id == "v0/register" ); + assert.strictEqual(JSON.parse(msg).id, 'v0/register'); const args = JSON.parse(msg).data; assert.hasAllKeys(args, ['id', 'grpcEndpoint']); assert.strictEqual(args.id, NODE_NAME); @@ -64,7 +63,7 @@ function assertRegisterMessage (msg) { // of the tests and setting the right environment for each test would be // tedious. describe('nats', function () { - var client; + let client; // longer timeout - the tests wait for register messages this.timeout(5000); @@ -126,7 +125,7 @@ describe('nats', function () { it('should send a deregistration message when mayastor is shut down', (done) => { const sid = client.subscribe('v0/registry', (msg) => { client.unsubscribe(sid); - assert(JSON.parse(msg).id == "v0/deregister" ); + assert.strictEqual(JSON.parse(msg).id, 'v0/deregister'); const args = JSON.parse(msg).data; assert.hasAllKeys(args, ['id']); assert.strictEqual(args.id, NODE_NAME); diff --git a/mayastor-test/test_nexus.js b/test/grpc/test_nexus.js similarity index 99% rename from mayastor-test/test_nexus.js rename to test/grpc/test_nexus.js index 8d068dde2..47fab8de7 100644 --- a/mayastor-test/test_nexus.js +++ b/test/grpc/test_nexus.js @@ -66,7 +66,7 @@ iscsi_tgt_conf: implicit_share_base: true `; -var client; +let client; function controlPlaneTest (thisProtocol) { it('should publish the nexus', (done) => { @@ -173,9 +173,9 @@ function controlPlaneTest (thisProtocol) { }); } -var doUring = (function () { - var executed = false; - var supportsUring = false; +const doUring = (function () { + let executed = false; + let supportsUring = false; return function () { if (!executed) { executed = true; @@ -183,6 +183,7 @@ var doUring = (function () { const URING_SUPPORT_CMD = path.join( __dirname, '..', + '..', 'target', 'debug', 'uring-support' @@ -519,7 +520,7 @@ describe('nexus', function () { }); // End describe('nbd control') describe('nbd datapath', function () { - var nbdDeviceUri; + let nbdDeviceUri; it('should publish the nexus', (done) => { client.publishNexus( @@ -572,7 +573,7 @@ describe('nexus', function () { }); // End describe('iscsi control') describe('iscsi datapath', function () { - var uri; + let uri; it('should publish the nexus', (done) => { client.publishNexus( @@ -635,7 +636,7 @@ describe('nexus', function () { ); }); - var uri; + let uri; it('should publish the nexus', (done) => { client.publishNexus( { diff --git a/mayastor-test/test_rebuild.js b/test/grpc/test_rebuild.js similarity index 96% rename from mayastor-test/test_rebuild.js rename to test/grpc/test_rebuild.js index ad77ba924..ef1d4437f 100644 --- a/mayastor-test/test_rebuild.js +++ b/test/grpc/test_rebuild.js @@ -50,7 +50,7 @@ const childOfflineArgs = { }; function createGrpcClient () { - const PROTO_PATH = path.join(__dirname, '/../rpc/proto/mayastor.proto'); + const PROTO_PATH = path.join(__dirname, '/../../rpc/proto/mayastor.proto'); // Load mayastor proto file with mayastor service const packageDefinition = protoLoader.loadSync(PROTO_PATH, { @@ -72,11 +72,11 @@ function createGrpcClient () { } describe('rebuild tests', function () { - var client; + let client; this.timeout(10000); // for network tests we need long timeouts - var ObjectType = { + const ObjectType = { NEXUS: 0, SOURCE_CHILD: 1, DESTINATION_CHILD: 2 @@ -183,10 +183,10 @@ describe('rebuild tests', function () { common.stopAll, common.restoreNbdPerms, (next) => { - fs.unlink(child1, (err) => next()); // eslint-disable-line handle-callback-err + fs.unlink(child1, () => next()); }, (next) => { - fs.unlink(child2, (err) => next()); // eslint-disable-line handle-callback-err + fs.unlink(child2, () => next()); }, (next) => { client @@ -195,7 +195,7 @@ describe('rebuild tests', function () { .then(() => { next(); }) - .catch((err) => { // eslint-disable-line handle-callback-err + .catch(() => { done(); }) .catch(done); diff --git a/mayastor-test/test_replica.js b/test/grpc/test_replica.js similarity index 97% rename from mayastor-test/test_replica.js rename to test/grpc/test_replica.js index e570c450c..b3b50ba34 100644 --- a/mayastor-test/test_replica.js +++ b/test/grpc/test_replica.js @@ -24,10 +24,15 @@ const BASE_UUID = 'c35fa4dd-d527-4b7b-9cf0-436b8bb0ba7'; const NVMF_URI = /^nvmf:\/\/(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d{1,5}\/nqn.2019-05.io.openebs:/; // tunables of the test suite -var endpoint = process.env.MAYASTOR_ENDPOINT; -var disks = process.env.MAYASTOR_DISKS; +// +// TODO: sort of a dead code or dead functionality that would be probably +// better to remove originally made to run tests on any mayastor instance, +// currently test suite always starts mayastor and provides necessary +// resources like disks. +let endpoint = process.env.MAYASTOR_ENDPOINT; +let disks = process.env.MAYASTOR_DISKS; -var implicitDisk; +let implicitDisk; // Create fake disk device used for testing (size 100M) function createTestDisk (diskFile, done) { @@ -58,7 +63,7 @@ function destroyTestDisk (diskFile, loopDev, done) { } describe('replica', function () { - var client; + let client; this.timeout(10000); // for network tests we need long timeouts @@ -78,7 +83,6 @@ describe('replica', function () { if (!endpoint) { endpoint = common.grpcEndpoint; common.startMayastor(); - } else { } }); @@ -436,11 +440,12 @@ describe('replica', function () { const URING_SUPPORT_CMD = path.join( __dirname, '..', + '..', 'target', 'debug', 'uring-support' ); - var self = this; + const self = this; exec(URING_SUPPORT_CMD, (error) => { if (error) { self.skip(); diff --git a/mayastor-test/test_snapshot.js b/test/grpc/test_snapshot.js similarity index 99% rename from mayastor-test/test_snapshot.js rename to test/grpc/test_snapshot.js index 2ffc8768d..d06870e38 100644 --- a/mayastor-test/test_snapshot.js +++ b/test/grpc/test_snapshot.js @@ -39,11 +39,11 @@ pools: replicas: [] `; -var client, client2; -var disks, disks2; +let client, client2; +let disks, disks2; // URI of Nexus published over NVMf -var nexusUri; +let nexusUri; describe('snapshot', function () { this.timeout(10000); // for network tests we need long timeouts @@ -314,7 +314,7 @@ describe('snapshot', function () { res = res.replicas.filter((ent) => ent.pool === poolName); assert.lengthOf(res, 3); - var i; + let i; for (i = 1; i < 3; i++) { assert.equal(res[i].uuid.startsWith(replicaUuid + '-snap-'), true); assert.equal(res[i].share, 'REPLICA_NONE'); @@ -330,7 +330,7 @@ describe('snapshot', function () { res = res.replicas.filter((ent) => ent.pool === pool2Name); assert.lengthOf(res, 3); - var i; + let i; for (i = 1; i < 3; i++) { assert.equal(res[i].uuid.startsWith(replicaUuid + '-snap-'), true); assert.equal(res[i].share, 'REPLICA_NONE'); From 885445467613d34e634822f2fc74a6454464b61a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Dec 2020 21:32:58 +0000 Subject: [PATCH 37/85] Bump systeminformation from 4.30.5 to 4.31.1 in /test/grpc Bumps [systeminformation](https://github.com/sebhildebrandt/systeminformation) from 4.30.5 to 4.31.1. - [Release notes](https://github.com/sebhildebrandt/systeminformation/releases) - [Changelog](https://github.com/sebhildebrandt/systeminformation/blob/master/CHANGELOG.md) - [Commits](https://github.com/sebhildebrandt/systeminformation/commits) Signed-off-by: dependabot[bot] --- test/grpc/package-lock.json | 6 +++--- test/grpc/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/grpc/package-lock.json b/test/grpc/package-lock.json index 3e84020c7..b804c188b 100644 --- a/test/grpc/package-lock.json +++ b/test/grpc/package-lock.json @@ -2763,9 +2763,9 @@ } }, "systeminformation": { - "version": "4.30.5", - "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.30.5.tgz", - "integrity": "sha512-aYWs8yttl8ePpr6VOQ/Ak8cznuc9L/NQODVhbOKhInX73ZMLvV2BS86Mzr7LLfmUteVFR36CTDNQgiJgRqq+SQ==" + "version": "4.31.1", + "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-4.31.1.tgz", + "integrity": "sha512-dVCDWNMN8ncMZo5vbMCA5dpAdMgzafK2ucuJy5LFmGtp1cG6farnPg8QNvoOSky9SkFoEX1Aw0XhcOFV6TnLYA==" }, "table": { "version": "5.4.6", diff --git a/test/grpc/package.json b/test/grpc/package.json index d8bb02e9f..624b7067e 100644 --- a/test/grpc/package.json +++ b/test/grpc/package.json @@ -24,7 +24,7 @@ "read": "^1.0.7", "semistandard": "^14.2.0", "sleep-promise": "^8.0.1", - "systeminformation": "^4.30.5", + "systeminformation": "^4.31.1", "wtfnode": "^0.8.1" }, "author": "Jan Kryl ", From a006480cb167a496328a6dfe2bbb9f584b155279 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Wed, 16 Dec 2020 16:24:47 +0000 Subject: [PATCH 38/85] Set nexus state to degraded when adding replica When adding a replica to a nexus, ensure that the nexus state in the MSV is set to degraded. --- csi/moac/nexus.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/csi/moac/nexus.ts b/csi/moac/nexus.ts index 58a5fb2be..f351295d1 100644 --- a/csi/moac/nexus.ts +++ b/csi/moac/nexus.ts @@ -262,6 +262,7 @@ export class Nexus { // confirmation back from the nexus, set it as pending this.children.push(new Child(childInfo.uri, childInfo.state)); this.children.sort(compareChildren); + this.state = "NEXUS_DEGRADED" log.info(`Replica uri "${uri}" added to the nexus "${this}"`); this._emitMod(); } From f0aa3e1a3e016dd7a913d5e8227232bacb1017db Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 17 Dec 2020 08:01:09 +0000 Subject: [PATCH 39/85] Volume IO Tests CAS-505 CAS-506 Add basic volume IO tests for mayastor volumes shared over NVMe-oF TCP and iSCSI Remove redundant test in nvmf_vol Add basic-volume-io test to the e2e test regimen Make check for MSV impervious to liveness temporal jitter. --- scripts/e2e-test.sh | 10 +- test/e2e/basic_volume_io/README.md | 33 +++++++ .../basic_volume_io/basic_volume_io_test.go | 93 +++++++++++++++++++ test/e2e/common/util.go | 9 +- test/e2e/nvmf_vol/deploy/fio_nvmf.yaml | 19 ---- test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml | 12 --- test/e2e/nvmf_vol/nvmf_vol_test.go | 65 ------------- 7 files changed, 138 insertions(+), 103 deletions(-) create mode 100644 test/e2e/basic_volume_io/README.md create mode 100644 test/e2e/basic_volume_io/basic_volume_io_test.go delete mode 100644 test/e2e/nvmf_vol/deploy/fio_nvmf.yaml delete mode 100644 test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml delete mode 100644 test/e2e/nvmf_vol/nvmf_vol_test.go diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 282017850..c7c3e26d0 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -1,10 +1,10 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash -set -e +set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") REGISTRY=$1 -TESTS="install" +TESTS="install basic_volume_io" # TODO: Add proper argument parser if [ -z "$REGISTRY" ]; then @@ -18,7 +18,7 @@ export e2e_pool_device=/dev/nvme1n1 for dir in $TESTS; do cd "$SCRIPTDIR/../test/e2e/$dir" - if ! go test; then + if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0 ; then test_failed=1 break fi @@ -33,4 +33,4 @@ if [ -n "$test_failed" ]; then fi echo "All tests have passed" -exit 0 \ No newline at end of file +exit 0 diff --git a/test/e2e/basic_volume_io/README.md b/test/e2e/basic_volume_io/README.md new file mode 100644 index 000000000..3d1adc96f --- /dev/null +++ b/test/e2e/basic_volume_io/README.md @@ -0,0 +1,33 @@ +## About +Basic volume IO tests + +## Tests +### E2E: Volume IO Test (iSCSI) + +Feature: Export of Mayastor Volume over iSCSI + +Scenario: An application pod mounts a Mayastor Volume + +Given: Mayastor is deployed on a k8s cluster + And: One or more Storage Pools are configured and all have the status 'Healthy' +When: A PVC for a Mayastor-provisioned StorageClass is declared via a k8s client + And: The StorageClass parameters section has a key named protocol with the value iscsi + And: A Pod resource which consumes that PVC is declared via a k8s client +Then: The Pod is able to mount the volume + And: The Pod enters the 'Running' state + And: It can be verified that an application in the Pod can perform R/W I/O on the Mayastor Volume + +### E2E: Volume IO Test (NVMeF-TCP) + +Feature: Export of Mayastor Volume over NVMeF-TCP + +Scenario: An application pod can mount and use a Mayastor Volume exported over NVMF + +Given: Mayastor is deployed on a k8s cluster + And: One or more Storage Pools are configured and all have the status ‘Healthy' +When: A PVC for a Mayastor-provisioned StorageClass is declared via a k8s client + And: The StorageClass parameters section has a key named protocol with the value nvmf + And: A Pod resource which consumes the PVC is declared via a k8s client +Then: The Pod is able to mount the volume + And: The Pod enters the 'Running' state + And: An application in the Pod can perform R/W I/O on the Mayastor Volume \ No newline at end of file diff --git a/test/e2e/basic_volume_io/basic_volume_io_test.go b/test/e2e/basic_volume_io/basic_volume_io_test.go new file mode 100644 index 000000000..afd21c15f --- /dev/null +++ b/test/e2e/basic_volume_io/basic_volume_io_test.go @@ -0,0 +1,93 @@ +// JIRA: CAS-505 +// JIRA: CAS-506 +package basic_volume_io_test + +import ( + "e2e-basic/common" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var defTimeoutSecs = "90s" + +type volSc struct { + volName string + scName string +} + +var podNames []string +var volNames []volSc + +func TestBasicVolumeIO(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Basic volume IO tests, NVMe-oF TCP and iSCSI") +} + +func basicVolumeIOTest(scName string) { + volName := "basic-vol-io-test-" + scName + // Create the volume + common.MkPVC(volName, scName) + tmp := volSc{volName, scName} + volNames = append(volNames, tmp) + + // Create the fio Pod + fioPodName := "fio-" + volName + pod, err := common.CreateFioPod(fioPodName, volName) + Expect(err).ToNot(HaveOccurred()) + Expect(pod).ToNot(BeNil()) + podNames = append(podNames, fioPodName) + + // Wait for the fio Pod to transition to running + Eventually(func() bool { + return common.IsPodRunning(fioPodName) + }, + defTimeoutSecs, + "1s", + ).Should(Equal(true)) + + // Run the fio test + common.RunFio(fioPodName, 20) + podNames = podNames[:len(podNames)-1] + + // Delete the fio pod + err = common.DeletePod(fioPodName) + Expect(err).ToNot(HaveOccurred()) + + // Delete the volume + common.RmPVC(volName, scName) + volNames = volNames[:len(volNames)-1] +} + +var _ = Describe("Mayastor Volume IO test", func() { + It("should verify an NVMe-oF TCP volume can process IO", func() { + basicVolumeIOTest("mayastor-nvmf") + }) + It("should verify an iSCSI volume can process IO", func() { + basicVolumeIOTest("mayastor-iscsi") + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) + common.SetupTestEnv() + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // Cleanup resources leftover in the event of failure. + for _, pod := range podNames { + _ = common.DeletePod(pod) + } + for _, vol := range volNames { + common.RmPVC(vol.volName, vol.scName) + } + + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. By("tearing down the test environment") + common.TeardownTestEnv() +}) diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 5b9ea7cf3..4b756a657 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -291,8 +291,13 @@ func MkPVC(volName string, scName string) string { "1s", // polling interval ).Should(Equal(corev1.VolumeBound)) - msv := GetMSV(string(pvc.ObjectMeta.UID)) - Expect(msv).ToNot(BeNil()) + Eventually(func() *MayastorVolStatus { + return GetMSV(string(pvc.ObjectMeta.UID)) + }, + defTimeoutSecs, + "1s", + ).Should(Not(BeNil())) + return string(pvc.ObjectMeta.UID) } diff --git a/test/e2e/nvmf_vol/deploy/fio_nvmf.yaml b/test/e2e/nvmf_vol/deploy/fio_nvmf.yaml deleted file mode 100644 index 9cef19e7b..000000000 --- a/test/e2e/nvmf_vol/deploy/fio_nvmf.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: Pod -apiVersion: v1 -metadata: - name: fio -spec: - volumes: - - name: ms-volume - persistentVolumeClaim: - claimName: vol-test-pvc-nvmf - containers: - - name: fio - image: nixery.dev/shell/fio/tini - command: [ "tini", "--" ] - args: - - sleep - - "1000000" - volumeMounts: - - mountPath: "/volume" - name: ms-volume diff --git a/test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml b/test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml deleted file mode 100644 index 7f80e30ae..000000000 --- a/test/e2e/nvmf_vol/deploy/pvc_nvmf.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: vol-test-pvc-nvmf -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 64Mi - # storageClassName: mayastor-iscsi - storageClassName: mayastor-nvmf diff --git a/test/e2e/nvmf_vol/nvmf_vol_test.go b/test/e2e/nvmf_vol/nvmf_vol_test.go deleted file mode 100644 index 79e077a07..000000000 --- a/test/e2e/nvmf_vol/nvmf_vol_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package nvmf_vol_test - -import ( - "fmt" - "testing" - - "e2e-basic/common" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var defTimeoutSecs = "90s" - -func nvmfTest() { - fmt.Printf("running fio\n") - common.RunFio("fio", 20) -} - -func TestNvmfVol(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss Test Suite") -} - -var _ = Describe("Mayastor nvmf IO test", func() { - It("should verify an nvmf volume can process IO", func() { - nvmfTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - common.SetupTestEnv() - - common.MkPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf") - common.ApplyDeployYaml("deploy/fio_nvmf.yaml") - - fmt.Printf("waiting for fio\n") - Eventually(func() bool { - return common.FioReadyPod() - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - fmt.Printf("removing fio pod\n") - common.DeleteDeployYaml("deploy/fio_nvmf.yaml") - - fmt.Printf("removing pvc\n") - common.RmPVC(fmt.Sprintf("vol-test-pvc-nvmf"), "mayastor-nvmf") - - common.TeardownTestEnv() -}) From dbf253686b727a7c1bb651e4ba3b3fc06e877a3a Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Thu, 17 Dec 2020 12:41:13 +0000 Subject: [PATCH 40/85] CAS-574 and cas-509 Refactored e2e disconnect tests for more reliable clean up on error and reduction of duplicated code. --- test/e2e/common/util.go | 2 +- .../node_disconnect_iscsi_drop_test.go | 54 ------- .../node_disconnect_iscsi_reject_test.go | 54 ------- .../node_disconnect_iscsi_reject_idle_test.go | 55 ------- .../lib/node_disconnect_lib.go | 143 ++++++++++-------- .../node_disconnect_setup.go} | 73 ++++----- .../node_disconnect_nvmf_drop_test.go | 54 ------- .../node_disconnect_nvmf_reject_test.go | 55 ------- .../node_disconnect_nvmf_reject_idle_test.go | 55 ------- .../nvmf_reject_reassign_test.go | 55 ------- .../replica_disconnection_test.go | 101 +++++++++++++ .../replica_reassign/replica_reassign_test.go | 65 ++++++++ .../teardown/node_disconnect_teardown_test.go | 71 --------- test/e2e/node_disconnect/test.sh | 19 +-- 14 files changed, 286 insertions(+), 570 deletions(-) delete mode 100644 test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go delete mode 100644 test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go delete mode 100644 test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go rename test/e2e/node_disconnect/{setup/node_disconnect_setup_test.go => lib/node_disconnect_setup.go} (60%) delete mode 100644 test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go delete mode 100644 test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go delete mode 100644 test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go delete mode 100644 test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go create mode 100644 test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go create mode 100644 test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go delete mode 100644 test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 4b756a657..a875ab323 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -213,7 +213,7 @@ func GetPvStatusPhase(volname string) (phase corev1.PersistentVolumePhase) { func GetMsvState(uuid string) string { msv := GetMSV(uuid) Expect(msv).ToNot(BeNil()) - return fmt.Sprintf("%s", msv.State) + return msv.State } // Retrieve the nexus node hosting the Mayastor Volume, diff --git a/test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go b/test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go deleted file mode 100644 index e2cea7062..000000000 --- a/test/e2e/node_disconnect/iscsi_drop/node_disconnect_iscsi_drop_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package node_disconnect_iscsi_drop_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "DROP" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss iSCSI drop") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go b/test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go deleted file mode 100644 index 07d728263..000000000 --- a/test/e2e/node_disconnect/iscsi_reject/node_disconnect_iscsi_reject_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package node_disconnect_iscsi_reject_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "REJECT" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss iSCSI reject") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go b/test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go deleted file mode 100644 index c80ace128..000000000 --- a/test/e2e/node_disconnect/iscsi_reject_idle/node_disconnect_iscsi_reject_idle_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package node_disconnect_iscsi_reject_idle_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "REJECT" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.LossWhenIdleTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss iSCSI reject when idle") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify iscsi nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-iscsi", "mayastor-iscsi-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/lib/node_disconnect_lib.go b/test/e2e/node_disconnect/lib/node_disconnect_lib.go index b36c6c07d..9359f7767 100644 --- a/test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -9,16 +9,34 @@ import ( . "github.com/onsi/gomega" ) -var ( +const ( defTimeoutSecs = "90s" disconnectionTimeoutSecs = "90s" repairTimeoutSecs = "90s" ) +type DisconnectEnv struct { + nodeToIsolate string + otherNodes []string + uuid string + disconnectMethod string + volToDelete string + storageClass string + fioPodName string +} + +// Deploy an instance of fio on a node labelled as "podrefuge" +func createFioOnRefugeNode(podName string, volClaimName string) { + podObj := common.CreateFioPodDef(podName, volClaimName) + common.ApplyNodeSelectorToPodObject(podObj, "openebs.io/podrefuge", "true") + _, err := common.CreatePod(podObj) + Expect(err).ToNot(HaveOccurred()) +} + // disconnect a node from the other nodes in the cluster -func DisconnectNode(vmname string, otherNodes []string, method string) { +func DisconnectNode(nodeName string, otherNodes []string, method string) { for _, targetIP := range otherNodes { - cmd := exec.Command("bash", "../lib/io_connect_node.sh", vmname, targetIP, "DISCONNECT", method) + cmd := exec.Command("bash", "../lib/io_connect_node.sh", nodeName, targetIP, "DISCONNECT", method) cmd.Dir = "./" _, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred()) @@ -26,9 +44,9 @@ func DisconnectNode(vmname string, otherNodes []string, method string) { } // reconnect a node to the other nodes in the cluster -func ReconnectNode(vmname string, otherNodes []string, checkError bool, method string) { - for _, targetIP := range otherNodes { - cmd := exec.Command("bash", "../lib/io_connect_node.sh", vmname, targetIP, "RECONNECT", method) +func (env *DisconnectEnv) ReconnectNode(checkError bool) { + for _, targetIP := range env.otherNodes { + cmd := exec.Command("bash", "../lib/io_connect_node.sh", env.nodeToIsolate, targetIP, "RECONNECT", env.disconnectMethod) cmd.Dir = "./" _, err := cmd.CombinedOutput() if checkError { @@ -38,7 +56,7 @@ func ReconnectNode(vmname string, otherNodes []string, checkError bool, method s } // return the node name to isolate and a vector of IP addresses to isolate -func GetNodes(uuid string) (string, []string) { +func getNodes(uuid string) (string, []string) { nodeList, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) @@ -71,111 +89,118 @@ func GetNodes(uuid string) (string, []string) { } // Run fio against the cluster while a replica is being removed and reconnected to the network -func LossTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { +func (env *DisconnectEnv) LossTest() { fmt.Printf("running spawned fio\n") - go common.RunFio("fio", 20) + go common.RunFio(env.fioPodName, 20) time.Sleep(5 * time.Second) - fmt.Printf("disconnecting \"%s\"\n", nodeToIsolate) + fmt.Printf("disconnecting \"%s\"\n", env.nodeToIsolate) - DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) + DisconnectNode(env.nodeToIsolate, env.otherNodes, env.disconnectMethod) fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { - return common.GetMsvState(uuid) + return common.GetMsvState(env.uuid) }, disconnectionTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal("degraded")) - fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(env.uuid)) fmt.Printf("running fio while node is disconnected\n") - common.RunFio("fio", 20) + common.RunFio(env.fioPodName, 20) - fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) - ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) + fmt.Printf("reconnecting \"%s\"\n", env.nodeToIsolate) + env.ReconnectNode(true) fmt.Printf("running fio when node is reconnected\n") - common.RunFio("fio", 20) + common.RunFio(env.fioPodName, 20) } // Remove the replica without running IO and verify that the volume becomes degraded but is still functional -func LossWhenIdleTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { - fmt.Printf("disconnecting \"%s\"\n", nodeToIsolate) +func (env *DisconnectEnv) LossWhenIdleTest() { + fmt.Printf("disconnecting \"%s\"\n", env.nodeToIsolate) - DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) + DisconnectNode(env.nodeToIsolate, env.otherNodes, env.disconnectMethod) fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { - return common.GetMsvState(uuid) + return common.GetMsvState(env.uuid) }, disconnectionTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal("degraded")) - fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(env.uuid)) fmt.Printf("running fio while node is disconnected\n") - common.RunFio("fio", 20) + common.RunFio(env.fioPodName, 20) - fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) - ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) + fmt.Printf("reconnecting \"%s\"\n", env.nodeToIsolate) + env.ReconnectNode(true) fmt.Printf("running fio when node is reconnected\n") - common.RunFio("fio", 20) + common.RunFio(env.fioPodName, 20) } // Run fio against the cluster while a replica node is being removed, // wait for the volume to become degraded, then wait for it to be repaired. // Run fio against repaired volume, and again after node is reconnected. -func ReplicaReassignTest(nodeToIsolate string, otherNodes []string, disconnectionMethod string, uuid string) { +func (env *DisconnectEnv) ReplicaReassignTest() { // This test needs at least 4 nodes, a refuge node, a mayastor node to isolate, and 2 other mayastor nodes - Expect(len(otherNodes)).To(BeNumerically(">=", 3)) + Expect(len(env.otherNodes)).To(BeNumerically(">=", 3)) fmt.Printf("running spawned fio\n") - go common.RunFio("fio", 20) + go common.RunFio(env.fioPodName, 20) time.Sleep(5 * time.Second) - fmt.Printf("disconnecting \"%s\"\n", nodeToIsolate) + fmt.Printf("disconnecting \"%s\"\n", env.nodeToIsolate) - DisconnectNode(nodeToIsolate, otherNodes, disconnectionMethod) + DisconnectNode(env.nodeToIsolate, env.otherNodes, env.disconnectMethod) fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { - return common.GetMsvState(uuid) + return common.GetMsvState(env.uuid) }, disconnectionTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal("degraded")) - fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(env.uuid)) fmt.Printf("waiting up to %s for the volume to be repaired\n", repairTimeoutSecs) Eventually(func() string { - return common.GetMsvState(uuid) + return common.GetMsvState(env.uuid) }, repairTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal("healthy")) - fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(uuid)) + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(env.uuid)) fmt.Printf("running fio while node is disconnected\n") - common.RunFio("fio", 20) + common.RunFio(env.fioPodName, 20) - fmt.Printf("reconnecting \"%s\"\n", nodeToIsolate) - ReconnectNode(nodeToIsolate, otherNodes, true, disconnectionMethod) + fmt.Printf("reconnecting \"%s\"\n", env.nodeToIsolate) + env.ReconnectNode(true) fmt.Printf("running fio when node is reconnected\n") - common.RunFio("fio", 20) + common.RunFio(env.fioPodName, 20) } -// Common steps required when setting up the test -func Setup(pvc_name string, storage_class_name string) string { - uuid := common.MkPVC(fmt.Sprintf(pvc_name), storage_class_name) +// Common steps required when setting up the test. +// Creates the PVC, deploys fio, determines the nodes used by the volume +// and selects a non-nexus replica to isolate +func Setup(pvcName string, storageClassName string, fioPodName string, disconnectMethod string) DisconnectEnv { + env := DisconnectEnv{} - CreateFioOnRefugeNode("fio", pvc_name) + env.uuid = common.MkPVC(pvcName, storageClassName) + env.volToDelete = pvcName + env.storageClass = storageClassName + env.disconnectMethod = disconnectMethod + + createFioOnRefugeNode(fioPodName, pvcName) fmt.Printf("waiting for fio\n") Eventually(func() bool { @@ -184,24 +209,22 @@ func Setup(pvc_name string, storage_class_name string) string { defTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal(true)) - return uuid -} + env.fioPodName = fioPodName -// Common steps required when tearing down the test -func Teardown(pvcName string, storageClassName string) { - - fmt.Printf("removing fio pod\n") - err := common.DeletePod("fio") - Expect(err).ToNot(HaveOccurred()) - - fmt.Printf("removing pvc\n") - common.RmPVC(fmt.Sprintf(pvcName), storageClassName) + env.nodeToIsolate, env.otherNodes = getNodes(env.uuid) + return env } -// Deploy an instance of fio on a node labelled as "podrefuge" -func CreateFioOnRefugeNode(podName string, vol_claim_name string) { - podObj := common.CreateFioPodDef(podName, vol_claim_name) - common.ApplyNodeSelectorToPodObject(podObj, "openebs.io/podrefuge", "true") - _, err := common.CreatePod(podObj) - Expect(err).ToNot(HaveOccurred()) +// Common steps required when tearing down the test +func (env *DisconnectEnv) Teardown() { + if env.fioPodName != "" { + fmt.Printf("removing fio pod\n") + err := common.DeletePod(env.fioPodName) + Expect(err).ToNot(HaveOccurred()) + env.fioPodName = "" + } + if env.volToDelete != "" { + common.RmPVC(env.volToDelete, env.storageClass) + env.volToDelete = "" + } } diff --git a/test/e2e/node_disconnect/setup/node_disconnect_setup_test.go b/test/e2e/node_disconnect/lib/node_disconnect_setup.go similarity index 60% rename from test/e2e/node_disconnect/setup/node_disconnect_setup_test.go rename to test/e2e/node_disconnect/lib/node_disconnect_setup.go index ad3f16fcb..03aa23e6e 100644 --- a/test/e2e/node_disconnect/setup/node_disconnect_setup_test.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_setup.go @@ -1,31 +1,22 @@ -package node_disconnect_setup_test +package node_disconnect_lib import ( "e2e-basic/common" "fmt" "sort" - "testing" - - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -const mayastor_regexp = "^mayastor-.....$" -const moac_regexp = "^moac-..........-.....$" +const mayastorRegexp = "^mayastor-.....$" +const moacRegexp = "^moac-..........-.....$" const namespace = "mayastor" const timeoutSeconds = 100 +// DisconnectSetup // Set up for disconnection tests. Ensure moac is on the refuge node but // no mayastor instances are -func disconnectSetupTest() { - // ensure we are using 2 replicas - common.MkStorageClass("mayastor-iscsi-2", 2, "iscsi", "io.openebs.csi-mayastor") - common.MkStorageClass("mayastor-nvmf-2", 2, "nvmf", "io.openebs.csi-mayastor") - +func DisconnectSetup() { nodeList, err := common.GetNodeLocs() Expect(err).ToNot(HaveOccurred()) Expect(len(nodeList)).To(BeNumerically(">=", 3)) @@ -45,7 +36,7 @@ func disconnectSetupTest() { } Expect(refugeNode).NotTo(Equal("")) - moacOnRefugeNode := common.PodPresentOnNode(moac_regexp, namespace, refugeNode) + moacOnRefugeNode := common.PodPresentOnNode(moacRegexp, namespace, refugeNode) // Update moac to ensure it stays on the refuge node (even if it currently is) fmt.Printf("apply moac node selector for node \"%s\"\n", refugeNode) @@ -62,7 +53,7 @@ func disconnectSetupTest() { // wait for moac to disappear from the cluster for _, node := range nodeList { fmt.Printf("waiting for moac absence from %s\n", node.NodeName) - err = common.WaitForPodAbsentFromNode(moac_regexp, namespace, node.NodeName, timeoutSeconds) + err = common.WaitForPodAbsentFromNode(moacRegexp, namespace, node.NodeName, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } @@ -72,47 +63,45 @@ func disconnectSetupTest() { // wait for moac to be running on the refuge node fmt.Printf("waiting for moac presence on %s\n", refugeNode) - err = common.WaitForPodRunningOnNode(moac_regexp, namespace, refugeNode, timeoutSeconds) + err = common.WaitForPodRunningOnNode(moacRegexp, namespace, refugeNode, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } // wait until all mayastor pods are in state "Running" and only on the non-refuge nodes fmt.Printf("waiting for mayastor absence from %s\n", refugeNode) - err = common.WaitForPodAbsentFromNode(mayastor_regexp, namespace, refugeNode, timeoutSeconds) + err = common.WaitForPodAbsentFromNode(mayastorRegexp, namespace, refugeNode, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) for _, node := range nodeList { if node.NodeName != refugeNode { fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) - err = common.WaitForPodRunningOnNode(mayastor_regexp, namespace, node.NodeName, timeoutSeconds) + err = common.WaitForPodRunningOnNode(mayastorRegexp, namespace, node.NodeName, timeoutSeconds) Expect(err).ToNot(HaveOccurred()) } } } -func TestNodeLossSetup(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss Test Setup") -} - -var _ = Describe("Mayastor disconnect setup", func() { - It("should correctly set up the cluster for disconnection testing", func() { - disconnectSetupTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - common.SetupTestEnv() +// DisconnectTeardown +// Remove the node selector modifications done in DisconnectSetup +func DisconnectTeardown() { + nodeList, err := common.GetNodeLocs() + Expect(err).ToNot(HaveOccurred()) + Expect(len(nodeList)).To(BeNumerically(">=", 3)) - close(done) -}, 60) + // apply/remove the labels whether present or not + // An error will not occur if the label is already present/absent + for _, node := range nodeList { + common.LabelNode(node.NodeName, "openebs.io/engine=mayastor") + common.UnlabelNode(node.NodeName, "openebs.io/podrefuge") + } -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") + fmt.Printf("remove moac node affinity\n") + common.RemoveAllNodeSelectorsFromDeployment("moac", namespace) - common.TeardownTestEnv() -}) + // wait until all nodes have mayastor pods in state "Running" + for _, node := range nodeList { + fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) + err = common.WaitForPodRunningOnNode(mayastorRegexp, namespace, node.NodeName, timeoutSeconds) + Expect(err).ToNot(HaveOccurred()) + } +} diff --git a/test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go b/test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go deleted file mode 100644 index 006154316..000000000 --- a/test/e2e/node_disconnect/nvmf_drop/node_disconnect_nvmf_drop_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package node_disconnect_nvmf_drop_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "DROP" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss NVMF drop") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go b/test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go deleted file mode 100644 index 7da49c833..000000000 --- a/test/e2e/node_disconnect/nvmf_reject/node_disconnect_nvmf_reject_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package node_disconnect_nvmf_reject_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "REJECT" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.LossTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss NVMF reject") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go b/test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go deleted file mode 100644 index ecae43cf4..000000000 --- a/test/e2e/node_disconnect/nvmf_reject_idle/node_disconnect_nvmf_reject_idle_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package node_disconnect_nvmf_reject_idle_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "REJECT" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.LossWhenIdleTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss NVMF reject when idle") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify nvmf nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go b/test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go deleted file mode 100644 index 2a7c76037..000000000 --- a/test/e2e/node_disconnect/nvmf_reject_reassign/nvmf_reject_reassign_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package node_disconnect_nvmf_reassign_test - -import ( - "e2e-basic/common" - disconnect_lib "e2e-basic/node_disconnect/lib" - - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - g_nodeToIsolate = "" - g_otherNodes []string - g_uuid = "" - g_disconnectMethod = "REJECT" -) - -func lossTest() { - g_nodeToIsolate, g_otherNodes = disconnect_lib.GetNodes(g_uuid) - disconnect_lib.ReplicaReassignTest(g_nodeToIsolate, g_otherNodes, g_disconnectMethod, g_uuid) -} - -func TestNodeLoss(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss NVMF assign new replica") -} - -var _ = Describe("Mayastor node loss test", func() { - It("should verify nvmf nexus repair of volume when a node becomes inaccessible", func() { - lossTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - common.SetupTestEnv() - g_uuid = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2") - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - // ensure node is reconnected in the event of a test failure - disconnect_lib.ReconnectNode(g_nodeToIsolate, g_otherNodes, false, g_disconnectMethod) - disconnect_lib.Teardown("loss-test-pvc-nvmf", "mayastor-nvmf-2") - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go b/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go new file mode 100644 index 000000000..0b42aaa63 --- /dev/null +++ b/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go @@ -0,0 +1,101 @@ +package replica_disconnection_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var gStorageClasses []string + +var env disconnect_lib.DisconnectEnv + +const reject = "REJECT" +const drop = "DROP" +const run_drop = false + +func TestNodeLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Replica disconnection tests") +} + +var _ = Describe("Mayastor replica disconnection test", func() { + + It("should create a refuge node and wait for the pods to re-deploy", func() { + disconnect_lib.DisconnectSetup() + }) + + It("should define the storage classes to use", func() { + common.MkStorageClass("mayastor-iscsi-2", 2, "iscsi", "io.openebs.csi-mayastor") + gStorageClasses = append(gStorageClasses, "mayastor-iscsi-2") + common.MkStorageClass("mayastor-nvmf-2", 2, "nvmf", "io.openebs.csi-mayastor") + gStorageClasses = append(gStorageClasses, "mayastor-nvmf-2") + }) + + It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { + env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) + env.LossTest() + env.Teardown() + }) + + It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { + env = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", reject) + env.LossTest() + env.Teardown() + }) + + if run_drop { + It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { + env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", drop) + env.LossTest() + env.Teardown() + }) + + It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { + env = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", drop) + env.LossTest() + env.Teardown() + }) + } + + It("should verify nvmf nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { + env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) + env.LossWhenIdleTest() + env.Teardown() + }) + + It("should verify iscsi nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { + env = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", reject) + env.LossWhenIdleTest() + env.Teardown() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + env.ReconnectNode(false) + env.Teardown() + + for _, sc := range gStorageClasses { + common.RmStorageClass(sc) + } + disconnect_lib.DisconnectTeardown() + common.TeardownTestEnv() +}) diff --git a/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go b/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go new file mode 100644 index 000000000..e21e306e8 --- /dev/null +++ b/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go @@ -0,0 +1,65 @@ +package replica_reassignment_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var gStorageClass string + +var env disconnect_lib.DisconnectEnv + +const reject = "REJECT" + +func TestReplicaReassign(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Replica reassignment test") +} + +var _ = Describe("Mayastor replica reassignment test", func() { + + It("should create a refuge node and wait for the pods to re-deploy", func() { + disconnect_lib.DisconnectSetup() + }) + + It("should define the storage class to use", func() { + common.MkStorageClass("mayastor-nvmf-2", 2, "nvmf", "io.openebs.csi-mayastor") + gStorageClass = "mayastor-nvmf-2" + }) + + It("should verify nvmf nexus repair of volume when a node becomes inaccessible", func() { + env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) + env.ReplicaReassignTest() + env.Teardown() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + + // ensure node is reconnected in the event of a test failure + env.ReconnectNode(false) + env.Teardown() + + if gStorageClass != "" { + common.RmStorageClass(gStorageClass) + } + disconnect_lib.DisconnectTeardown() + common.TeardownTestEnv() +}) diff --git a/test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go b/test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go deleted file mode 100644 index 22ca26e1c..000000000 --- a/test/e2e/node_disconnect/teardown/node_disconnect_teardown_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package node_disconnect_teardown_test - -import ( - "e2e-basic/common" - "fmt" - - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -const mayastor_regexp = "^mayastor-.....$" -const namespace = "mayastor" -const timeoutSeconds = 100 - -func disconnectTeardownTest() { - common.RmStorageClass("mayastor-iscsi-2") - common.RmStorageClass("mayastor-nvmf-2") - - nodeList, err := common.GetNodeLocs() - Expect(err).ToNot(HaveOccurred()) - Expect(len(nodeList)).To(BeNumerically(">=", 3)) - - // apply/remove the labels whether present or not - // An error will not occur if the label is already present/absent - for _, node := range nodeList { - common.LabelNode(node.NodeName, "openebs.io/engine=mayastor") - common.UnlabelNode(node.NodeName, "openebs.io/podrefuge") - } - - fmt.Printf("remove moac node affinity\n") - common.RemoveAllNodeSelectorsFromDeployment("moac", namespace) - - // wait until all nodes have mayastor pods in state "Running" - for _, node := range nodeList { - fmt.Printf("waiting for mayastor presence on %s\n", node.NodeName) - err = common.WaitForPodRunningOnNode(mayastor_regexp, namespace, node.NodeName, timeoutSeconds) - Expect(err).ToNot(HaveOccurred()) - } -} - -func TestNodeLossTeardown(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Loss Test Teardown") -} - -var _ = Describe("Mayastor disconnect setup", func() { - It("should correctly tear down the cluster after disconnection testing", func() { - disconnectTeardownTest() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - common.SetupTestEnv() - - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - - common.TeardownTestEnv() -}) diff --git a/test/e2e/node_disconnect/test.sh b/test/e2e/node_disconnect/test.sh index c50bd0da7..3f64cf1bb 100755 --- a/test/e2e/node_disconnect/test.sh +++ b/test/e2e/node_disconnect/test.sh @@ -1,21 +1,12 @@ #!/usr/bin/env bash set -e -timeout=500 -(cd setup && go test -timeout "${timeout}s") +cd "$(dirname ${BASH_SOURCE[0]})" -(cd nvmf_reject && go test -timeout "${timeout}s") -(cd iscsi_reject && go test -timeout "${timeout}s") +timeout=1000 -(cd nvmf_reject_idle && go test -timeout "${timeout}s") -(cd iscsi_reject_idle && go test -timeout "${timeout}s") - -# These two tests currently fail so are run with -c (compile only) -(cd nvmf_drop && go test -c -timeout "${timeout}s") -(cd iscsi_drop && go test -c -timeout "${timeout}s") - -(cd nvmf_reject_reassign && go test -timeout "${timeout}s") - -(cd teardown && go test -timeout "${timeout}s") +(cd replica_disconnect && go test -timeout "${timeout}s") +# the following test requires a cluster with at least 4 nodes +(cd replica_reassign && go test -timeout "${timeout}s") From 9e1e0a109d5508643d8e37607a31a105670074d7 Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Thu, 17 Dec 2020 19:57:44 +0100 Subject: [PATCH 41/85] CAS-595: Do not overwrite versioned files in tests --- .gitignore | 1 + scripts/generate-deploy-yamls.sh | 22 +++++++++++++++++----- test/e2e/install/install_test.go | 9 +++++---- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 05d9fc517..670502f6a 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ **/*.tfstate* mayastor/local-randrw-0-verify.state mayastor/local-write_verify-0-verify.state +test-yamls/* diff --git a/scripts/generate-deploy-yamls.sh b/scripts/generate-deploy-yamls.sh index 8ac8d1d7a..21fb5d7b5 100755 --- a/scripts/generate-deploy-yamls.sh +++ b/scripts/generate-deploy-yamls.sh @@ -4,13 +4,27 @@ set -e if [ "x$1" = x ]; then cat < [] +USAGE: $0 [-t ] [] Generate (some) deployment YAMLs from the helm chart and store them to deploy/ -in the repo. +in the repo. If -t is specified do not put them to deploy/ but rather to the +directory given. EOF exit 1 fi + +SCRIPTDIR="$(realpath "$(dirname "$0")")" + +if [ "$1" = "-t" ]; then + TARGET_DIR="$2" + shift 2 +else + TARGET_DIR="$SCRIPTDIR/../deploy" +fi +if [ ! -d "$TARGET_DIR" ]; then + mkdir -p "$TARGET_DIR" +fi + if [ "x$2" = x ]; then mayastor_images_repo="NONE" else @@ -25,8 +39,6 @@ if ! which helm > /dev/null 2>&1; then exit 1 fi -SCRIPTDIR="$(realpath "$(dirname "$0")")" - tmpd=$(mktemp -d /tmp/generate-deploy.sh.XXXXXXXX) # shellcheck disable=SC2064 trap "rm -fr '$tmpd'" HUP QUIT EXIT TERM INT @@ -37,4 +49,4 @@ else helm template --set "mayastorImagesTag=$1,mayastorImagesRepo=$mayastor_images_repo" mayastor "$SCRIPTDIR/../chart" --output-dir="$tmpd" --namespace mayastor fi -mv "$tmpd"/mayastor/templates/*.yaml "$SCRIPTDIR/../deploy/" +mv "$tmpd"/mayastor/templates/*.yaml "$TARGET_DIR" diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index 3e1df91f6..b06570972 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -128,7 +128,7 @@ func makeImageName(registryAddress string, imagename string, imageversion string } func generateYamls(registryAddress string) { - bashcmd := "../../../scripts/generate-deploy-yamls.sh ci " + registryAddress + bashcmd := "../../../scripts/generate-deploy-yamls.sh -t ../../../test-yamls ci " + registryAddress cmd := exec.Command("bash", "-c", bashcmd) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) @@ -207,15 +207,16 @@ func installMayastor() { fmt.Printf("registry address %v, number of mayastor instances=%v\n", registryAddress, numMayastorInstances) + // FIXME use absolute paths, do not depend on CWD applyDeployYaml("namespace.yaml") applyDeployYaml("storage-class.yaml") applyDeployYaml("moac-rbac.yaml") applyDeployYaml("mayastorpoolcrd.yaml") applyDeployYaml("nats-deployment.yaml") generateYamls(registryAddress) - applyDeployYaml("csi-daemonset.yaml") - applyDeployYaml("moac-deployment.yaml") - applyDeployYaml("mayastor-daemonset.yaml") + applyDeployYaml("../test-yamls/csi-daemonset.yaml") + applyDeployYaml("../test-yamls/moac-deployment.yaml") + applyDeployYaml("../test-yamls/mayastor-daemonset.yaml") // Given the yamls and the environment described in the test readme, // we expect mayastor to be running on exactly 2 nodes. From 84ecbb86ccd57d6f2830602a1b1e374ed4e8f0d8 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Wed, 16 Dec 2020 15:54:19 +0000 Subject: [PATCH 42/85] Be able to test a specific tag of images in e2e tests --- Jenkinsfile | 9 ++++- scripts/e2e-test.sh | 63 +++++++++++++++++++++++++++++--- test/e2e/README.md | 2 + test/e2e/install/README.md | 4 +- test/e2e/install/install_test.go | 35 ++++++++---------- 5 files changed, 85 insertions(+), 28 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 249061219..0dfb39f2b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -137,6 +137,13 @@ pipeline { } stage('e2e tests') { agent { label 'nixos-mayastor' } + environment { + GIT_COMMIT_SHORT = sh( + // using printf to get rid of trailing newline + script: "printf \$(git rev-parse --short ${GIT_COMMIT})", + returnStdout: true + ) + } steps { // Build images (REGISTRY is set in jenkin's global configuration). // Note: We might want to build and test dev images that have more @@ -146,7 +153,7 @@ pipeline { sh 'nix-store --delete /nix/store/*docker-image*' withCredentials([file(credentialsId: 'kubeconfig', variable: 'KUBECONFIG')]) { sh 'kubectl get nodes -o wide' - sh "nix-shell --run './scripts/e2e-test.sh ${env.REGISTRY}'" + sh "nix-shell --run './scripts/e2e-test.sh --device /dev/nvme1n1 --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\"'" } } } diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index c7c3e26d0..9a225b73f 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -3,18 +3,69 @@ set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") -REGISTRY=$1 TESTS="install basic_volume_io" +DEVICE= +REGISTRY= +TAG= -# TODO: Add proper argument parser -if [ -z "$REGISTRY" ]; then - echo "Missing parameter registry" +help() { + cat < Device path to use for storage pools. + --registry Registry to pull the mayastor images from. + --tag Docker image tag of mayastor images (default "ci") + +Examples: + $0 --registry 127.0.0.1:5000 --tag a80ce0c +EOF +} + +# Parse arguments +while [ "$#" -gt 0 ]; do + case "$1" in + -d|--device) + shift + DEVICE=$1 + shift + ;; + -r|--registry) + shift + REGISTRY=$1 + shift + ;; + -t|--tag) + shift + TAG=$1 + shift + ;; + -h|--help) + help + exit 0 + ;; + *) + echo "Unknown option: $1" + help + exit 1 + ;; + esac +done + +if [ -z "$DEVICE" ]; then + echo "Device for storage pools must be specified" + help exit 1 fi +export e2e_pool_device=$DEVICE +if [ -n "$TAG" ]; then + export e2e_image_tag="$TAG" +fi +if [ -n "$REGISTRY" ]; then + export e2e_docker_registry="$REGISTRY" +fi test_failed= -export e2e_docker_registry="$REGISTRY" -export e2e_pool_device=/dev/nvme1n1 for dir in $TESTS; do cd "$SCRIPTDIR/../test/e2e/$dir" diff --git a/test/e2e/README.md b/test/e2e/README.md index ddf4fd4e1..6edd3d19c 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -48,3 +48,5 @@ If you'd like to run the tests as a whole (as they are run in our CI/CD pipeline) then use the script `./scripts/e2e-test.sh`. To run particular test cd to the directory with tests and type `go test`. +Most of the tests assume that mayastor is already installed. `install` test +can be run to do that. diff --git a/test/e2e/install/README.md b/test/e2e/install/README.md index a00cd5eb8..bd1aa6422 100644 --- a/test/e2e/install/README.md +++ b/test/e2e/install/README.md @@ -1,6 +1,8 @@ # Running the install test Environment variables +* `e2e_image_tag` + * Docker image tag used for mayastor images (the default is "ci") * `e2e_docker_registry` * The IP address:port of the registry to be used. * If unspecified then the assumption is that test registry has been deployed in the cluster on port 30291, a suitable IP address is selected. @@ -11,5 +13,5 @@ Environment variables * pools are created for each node running mayastor, using the template file and the specified pool device. ```sh -e2e_docker_registry='192.168.122.1:5000' e2e_pool_device='/dev/nvme1n1' go test +e2e_image_tag="ci" e2e_docker_registry='192.168.122.1:5000' e2e_pool_device='/dev/nvme1n1' go test ``` diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index b06570972..aaef6d5db 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -39,12 +39,12 @@ var testEnv *envtest.Environment /// The assumption is that the test-registry is accessible via the IP addr of the master, /// or any node in the cluster if the master noe does not exist /// TODO Refine how we workout the address of the test-registry -func getTestClusterDetails() (string, int, []string, error) { +func getTestClusterDetails() (string, string, int, []string, error) { var master = "" var nme = 0 nodeList := coreV1.NodeList{} if (k8sClient.List(context.TODO(), &nodeList, &client.ListOptions{}) != nil) { - return master, nme, nil, errors.New("failed to list nodes") + return "", "", 0, nil, errors.New("failed to list nodes") } nodeIPs := make([]string, len(nodeList.Items)) for ix, k8node := range nodeList.Items { @@ -65,7 +65,7 @@ func getTestClusterDetails() (string, int, []string, error) { // At least one node where mayastor can be deployed must exist if nme == 0 { - return "", 0, nil, errors.New("no usable nodes found for the mayastor engine") + return "", "", 0, nil, errors.New("no usable nodes found for the mayastor engine") } mayastorNodes := make([]string, nme) @@ -86,9 +86,13 @@ func getTestClusterDetails() (string, int, []string, error) { // Redundant check, but keep it anyway, we are writing a test after all. // We should have found at least one node! if len(nodeIPs) == 0 { - return "", 0, nil, errors.New("no usable nodes found") + return "", "", 0, nil, errors.New("no usable nodes found") } + tag := os.Getenv("e2e_image_tag") + if len(tag) == 0 { + tag = "ci" + } registry := os.Getenv("e2e_docker_registry") if len(registry) == 0 { // a registry was not specified @@ -100,7 +104,7 @@ func getTestClusterDetails() (string, int, []string, error) { registry = nodeIPs[0] + ":30291" } } - return registry, nme, mayastorNodes, nil + return tag, registry, nme, mayastorNodes, nil } // Encapsulate the logic to find where the deploy yamls are @@ -127,22 +131,13 @@ func makeImageName(registryAddress string, imagename string, imageversion string return registryAddress + "/mayadata/" + imagename + ":" + imageversion } -func generateYamls(registryAddress string) { - bashcmd := "../../../scripts/generate-deploy-yamls.sh -t ../../../test-yamls ci " + registryAddress +func generateYamls(imageTag string, registryAddress string) { + bashcmd := fmt.Sprintf("../../../scripts/generate-deploy-yamls.sh -t ../../../test-yamls %s %s", imageTag, registryAddress) cmd := exec.Command("bash", "-c", bashcmd) out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred(), "%s", out) } -func applyTemplatedYaml(filename string, imagename string, registryAddress string) { - fullimagename := makeImageName(registryAddress, imagename, "ci") - bashcmd := "IMAGE_NAME=" + fullimagename + " envsubst < " + filename + " | kubectl apply -f -" - cmd := exec.Command("bash", "-c", bashcmd) - cmd.Dir = getTemplateYamlDir() - out, err := cmd.CombinedOutput() - Expect(err).ToNot(HaveOccurred(), "%s", out) -} - // We expect this to fail a few times before it succeeds, // so no throwing errors from here. func mayastorReadyPodCount() int { @@ -201,11 +196,11 @@ func createPools(mayastorNodes []string) { // We deliberately call out to kubectl, rather than constructing the client-go // objects, so that we can verfiy the local deploy yamls are correct. func installMayastor() { - registryAddress, numMayastorInstances, mayastorNodes, err := getTestClusterDetails() + imageTag, registryAddress, numMayastorInstances, mayastorNodes, err := getTestClusterDetails() Expect(err).ToNot(HaveOccurred()) Expect(numMayastorInstances).ToNot(Equal(0)) - fmt.Printf("registry address %v, number of mayastor instances=%v\n", registryAddress, numMayastorInstances) + fmt.Printf("tag %v, registry %v, # of mayastor instances=%v\n", imageTag, registryAddress, numMayastorInstances) // FIXME use absolute paths, do not depend on CWD applyDeployYaml("namespace.yaml") @@ -213,7 +208,7 @@ func installMayastor() { applyDeployYaml("moac-rbac.yaml") applyDeployYaml("mayastorpoolcrd.yaml") applyDeployYaml("nats-deployment.yaml") - generateYamls(registryAddress) + generateYamls(imageTag, registryAddress) applyDeployYaml("../test-yamls/csi-daemonset.yaml") applyDeployYaml("../test-yamls/moac-deployment.yaml") applyDeployYaml("../test-yamls/mayastor-daemonset.yaml") @@ -226,7 +221,7 @@ func installMayastor() { ).Should(Equal(numMayastorInstances)) Eventually(moacReadyPodCount(), - "60s", // timeout + "180s", // timeout "1s", // polling interval ).Should(Equal(1)) From 27c875e0532c6d2735e34d9bc5419e91a545acf2 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Mon, 21 Dec 2020 12:39:16 +0000 Subject: [PATCH 43/85] jenkins worker is running out of space Jenkins worker was running out of space because docker images were not pruned. I decided to put all space reclaiming into a single script that is run before each e2e stage. So nix GC is no longer run as a service on worker nodes. The advantage is that we can synchronize GC with running jobs that way. --- Jenkinsfile | 14 ++++++++++++-- doc/jenkins.md | 8 -------- scripts/reclaim-space.sh | 30 ++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 10 deletions(-) create mode 100755 scripts/reclaim-space.sh diff --git a/Jenkinsfile b/Jenkinsfile index 0dfb39f2b..c327d41a6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -145,17 +145,27 @@ pipeline { ) } steps { + // e2e tests are the most demanding step for space on the disk so we + // test the free space here rather than repeating the same code in all + // stages. + sh "./scripts/reclaim-space.sh 10" // Build images (REGISTRY is set in jenkin's global configuration). // Note: We might want to build and test dev images that have more // assertions instead but that complicates e2e tests a bit. sh "./scripts/release.sh --alias-tag ci --registry ${env.REGISTRY}" - // save space by removing docker images that are never reused - sh 'nix-store --delete /nix/store/*docker-image*' withCredentials([file(credentialsId: 'kubeconfig', variable: 'KUBECONFIG')]) { sh 'kubectl get nodes -o wide' sh "nix-shell --run './scripts/e2e-test.sh --device /dev/nvme1n1 --tag \"${env.GIT_COMMIT_SHORT}\" --registry \"${env.REGISTRY}\"'" } } + // Always remove all docker images because they are usually used just once + // and underlaying pkgs are already cached by nix so they can be easily + // recreated. + post { + always { + sh 'docker image prune --all --force' + } + } } stage('push images') { agent { label 'nixos-mayastor' } diff --git a/doc/jenkins.md b/doc/jenkins.md index 12044b27c..77ab0f574 100644 --- a/doc/jenkins.md +++ b/doc/jenkins.md @@ -150,14 +150,6 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). boot.kernelModules = [ "nbd" "xfs" "nvme_tcp" "kvm_intel" ]; boot.extraModprobeConfig = "options kvm_intel nested=1"; - nix.gc = { - automatic = true; - dates = "daily"; - }; - nix.extraOptions = '' - min-free = ${toString (10 * 1024 * 1024 * 1024)} - ''; - virtualisation.docker.enable = true; networking.firewall.enable = false; diff --git a/scripts/reclaim-space.sh b/scripts/reclaim-space.sh new file mode 100755 index 000000000..c6f027d2d --- /dev/null +++ b/scripts/reclaim-space.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env sh + +# +# The script tries to free as much space as possible by removing nix packages +# and docker images that aren't used. +# + +set -e + +MIN_FREE_GIB=$1 + +get_avail_gib() { + echo $(( $(df --output=avail / | awk 'NR == 2 { print $1 }' ) / 1024 / 1024 )) +} + +free=$(get_avail_gib) +echo "Available space in root partition: $free GiB" + +if [ -n "$MIN_FREE_GIB" ]; then + if [ "$free" -gt "$MIN_FREE_GIB" ]; then + exit 0 + fi +fi + +set -x +nix-collect-garbage +docker prune --force --all +set +x + +echo "Available space after cleanup: $(get_avail_gib) GiB" \ No newline at end of file From d496b0267313f9f35230052a493ca41f2fc7188d Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 23 Dec 2020 13:07:05 +0100 Subject: [PATCH 44/85] CAS-597: zero-copy should be configurable This changes allows configuring zero copy along with any other socket options available. --- mayastor/src/subsys/config/mod.rs | 5 ++ mayastor/src/subsys/config/opts.rs | 75 ++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/mayastor/src/subsys/config/mod.rs b/mayastor/src/subsys/config/mod.rs index bdee98f91..cd9830416 100644 --- a/mayastor/src/subsys/config/mod.rs +++ b/mayastor/src/subsys/config/mod.rs @@ -53,6 +53,7 @@ use crate::{ NexusOpts, NvmeBdevOpts, NvmfTgtConfig, + PosixSocketOpts, }, NvmfSubsystem, }, @@ -169,6 +170,7 @@ pub struct Config { pub implicit_share_base: bool, /// flag to enable or disable config sync pub sync_disable: bool, + pub socket_opts: PosixSocketOpts, } impl Default for Config { @@ -186,6 +188,7 @@ impl Default for Config { pools: None, implicit_share_base: false, sync_disable: false, + socket_opts: Default::default(), } } } @@ -258,6 +261,7 @@ impl Config { implicit_share_base: self.implicit_share_base, err_store_opts: self.err_store_opts.get(), sync_disable: self.sync_disable, + socket_opts: self.socket_opts.get(), }; // collect nexus bdevs and insert them into the config @@ -352,6 +356,7 @@ impl Config { /// default trait for that structure. pub fn apply(&self) { info!("Applying Mayastor configuration settings"); + assert_eq!(self.socket_opts.set(), true); // note: nvmf target does not have a set method assert_eq!(self.nvme_bdev_opts.set(), true); self.bdev_opts.set(); diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index 1990a12e5..b609c60f8 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -16,6 +16,9 @@ use spdk_sys::{ spdk_iscsi_opts, spdk_nvmf_target_opts, spdk_nvmf_transport_opts, + spdk_sock_impl_get_opts, + spdk_sock_impl_opts, + spdk_sock_impl_set_opts, SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, }; @@ -482,6 +485,78 @@ impl GetOpts for IscsiTgtOpts { } } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct PosixSocketOpts { + recv_buf_size: u32, + send_buf_size: u32, + enable_recv_pipe: bool, + enable_zero_copy_send: bool, + enable_quickack: bool, + enable_placement_id: bool, +} + +impl Default for PosixSocketOpts { + fn default() -> Self { + Self { + recv_buf_size: 2097152, + send_buf_size: 2097152, + enable_recv_pipe: true, + enable_zero_copy_send: true, + enable_quickack: true, + enable_placement_id: true, + } + } +} + +impl GetOpts for PosixSocketOpts { + fn get(&self) -> Self { + let opts = spdk_sock_impl_opts::default(); + + unsafe { + let name = std::ffi::CString::new("posix").unwrap(); + let mut size = std::mem::size_of::() as u64; + let rc = spdk_sock_impl_get_opts( + name.as_ptr(), + &opts as *const _ as *mut spdk_sock_impl_opts, + &mut size, + ); + assert_eq!(rc, 0); + }; + + Self { + recv_buf_size: opts.recv_buf_size, + send_buf_size: opts.send_buf_size, + enable_recv_pipe: opts.enable_recv_pipe, + enable_zero_copy_send: opts.enable_zerocopy_send, + enable_quickack: opts.enable_quickack, + enable_placement_id: opts.enable_placement_id, + } + } + + fn set(&self) -> bool { + let opts = spdk_sock_impl_opts { + recv_buf_size: self.recv_buf_size, + send_buf_size: self.send_buf_size, + enable_recv_pipe: self.enable_recv_pipe, + enable_zerocopy_send: self.enable_zero_copy_send, + enable_quickack: self.enable_quickack, + enable_placement_id: self.enable_placement_id, + }; + + let size = std::mem::size_of::() as u64; + unsafe { + let name = std::ffi::CString::new("posix").unwrap(); + let rc = spdk_sock_impl_set_opts( + name.as_ptr(), + &opts as *const _ as *mut spdk_sock_impl_opts, + size, + ); + rc == 0 + } + } +} + #[serde(default, deny_unknown_fields)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ErrStoreOpts { From ca1948965cbf40ec116e65a68fd2417ee4870594 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Mon, 28 Dec 2020 14:34:42 +0100 Subject: [PATCH 45/85] CAS-598: Want ENV support for config options Setting environment variables is common pattern within the container ecosystem. This change allows for manipulating most commonly tuned variables by setting environment KEY=value. When a YAML file is used, the configuration in the YAML file wins. --- mayastor/src/subsys/config/mod.rs | 13 +++-- mayastor/src/subsys/config/opts.rs | 76 +++++++++++++++++++++--------- 2 files changed, 62 insertions(+), 27 deletions(-) diff --git a/mayastor/src/subsys/config/mod.rs b/mayastor/src/subsys/config/mod.rs index cd9830416..ecc7ffbc2 100644 --- a/mayastor/src/subsys/config/mod.rs +++ b/mayastor/src/subsys/config/mod.rs @@ -352,15 +352,20 @@ impl Config { } /// apply the hybrid configuration that is loaded from YAML. Hybrid in the - /// sense that options not defined, will default to values defined by the - /// default trait for that structure. + /// sense that options not defined, will default to the impl of Default. + /// + /// Note; for nvmf there is no set/get option. This is because the way + /// transports are constructed. The target accepts an opt parameter, thus + /// it does not consult a global (mutable) data structure pub fn apply(&self) { info!("Applying Mayastor configuration settings"); assert_eq!(self.socket_opts.set(), true); - // note: nvmf target does not have a set method assert_eq!(self.nvme_bdev_opts.set(), true); - self.bdev_opts.set(); + assert_eq!(self.bdev_opts.set(), true); + + // no way to validate this self.iscsi_tgt_conf.set(); + debug!("{:#?}", self); } /// create any nexus bdevs any failure will be logged, but we will silently diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index b609c60f8..05cc97b73 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -23,6 +23,10 @@ use spdk_sys::{ }; use crate::bdev::ActionType; +use std::{ + fmt::{Debug, Display}, + str::FromStr, +}; pub trait GetOpts { fn get(&self) -> Self; @@ -89,7 +93,7 @@ pub struct NvmfTgtConfig { /// the max number of namespaces this target should allow for pub max_namespaces: u32, /// TCP transport options - pub opts: TcpTransportOpts, + pub opts: NvmfTcpTransportOpts, } impl From for Box { @@ -112,7 +116,7 @@ impl Default for NvmfTgtConfig { Self { name: "mayastor_target".to_string(), max_namespaces: 110, - opts: TcpTransportOpts::default(), + opts: NvmfTcpTransportOpts::default(), } } } @@ -126,7 +130,7 @@ impl GetOpts for NvmfTgtConfig { /// Settings for the TCP transport #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] -pub struct TcpTransportOpts { +pub struct NvmfTcpTransportOpts { /// max queue depth max_queue_depth: u16, /// max qpairs per controller @@ -149,16 +153,39 @@ pub struct TcpTransportOpts { abort_timeout_sec: u32, } -impl Default for TcpTransportOpts { +/// try to read an env variable or returns the default when not found +fn try_from_env(name: &str, default: T) -> T +where + T: FromStr + Display + Copy, + ::Err: Debug + Display, +{ + std::env::var(name).map_or_else( + |_| default, + |v| { + match v.parse::() { + Ok(val) => { + info!("Overriding {} value to '{}'", name, val); + val + }, + Err(e) => { + error!("Invalid value: {} (error {}) specified for {}. Reverting to default ({})", v, e, name, default); + default + } + } + }, + ) +} + +impl Default for NvmfTcpTransportOpts { fn default() -> Self { Self { - max_queue_depth: 64, + max_queue_depth: try_from_env("NVMF_TCP_MAX_QUEUE_DEPTH", 64), in_capsule_data_size: 4096, max_io_size: 131_072, io_unit_size: 131_072, max_qpairs_per_ctrl: 128, - num_shared_buf: 2048, - buf_cache_size: 64, + num_shared_buf: try_from_env("NVMF_TCP_NUM_SHARED_BUF", 2048), + buf_cache_size: try_from_env("NVMF_TCP_BUF_CACHE_SIZE", 64), dif_insert_or_strip: false, max_aq_depth: 128, abort_timeout_sec: 1, @@ -169,8 +196,8 @@ impl Default for TcpTransportOpts { /// we cannot add derives for YAML to these structs directly, so we need to /// copy them. The upside though, is that if the FFI structures change, we will /// know about it during compile time. -impl From for spdk_nvmf_transport_opts { - fn from(o: TcpTransportOpts) -> Self { +impl From for spdk_nvmf_transport_opts { + fn from(o: NvmfTcpTransportOpts) -> Self { Self { max_queue_depth: o.max_queue_depth, max_qpairs_per_ctrlr: o.max_qpairs_per_ctrl, @@ -242,15 +269,18 @@ impl Default for NvmeBdevOpts { fn default() -> Self { Self { action_on_timeout: SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, - timeout_us: 30_000_000, - keep_alive_timeout_ms: 10_000, - retry_count: 3, + timeout_us: try_from_env("NVME_TIMEOUT_US", 30_000_000), + keep_alive_timeout_ms: try_from_env("NVME_KATO_MS", 10_000), + retry_count: try_from_env("NVME_RETRY_COUNT", 3), arbitration_burst: 0, low_priority_weight: 0, medium_priority_weight: 0, high_priority_weight: 0, - nvme_adminq_poll_period_us: 0, - nvme_ioq_poll_period_us: 0, + nvme_adminq_poll_period_us: try_from_env( + "NVME_ADMINQ_POLL_PERIOD_US", + 0, + ), + nvme_ioq_poll_period_us: try_from_env("NVME_IOQ_POLL_PERIOD_US", 0), io_queue_requests: 0, delay_cmd_submit: true, } @@ -327,8 +357,8 @@ impl GetOpts for BdevOpts { impl Default for BdevOpts { fn default() -> Self { Self { - bdev_io_pool_size: 65535, - bdev_io_cache_size: 512, + bdev_io_pool_size: try_from_env("BDEV_IO_POOL_SIZE", 65535), + bdev_io_cache_size: try_from_env("BDEV_IO_CACHE_SIZE", 512), } } } @@ -401,7 +431,7 @@ impl Default for IscsiTgtOpts { Self { authfile: "".to_string(), nodebase: "iqn.2019-05.io.openebs".to_string(), - timeout: 5, + timeout: try_from_env("ISCSI_TIMEOUT_SEC", 30), nop_ininterval: 1, disable_chap: false, require_chap: false, @@ -499,12 +529,12 @@ pub struct PosixSocketOpts { impl Default for PosixSocketOpts { fn default() -> Self { Self { - recv_buf_size: 2097152, - send_buf_size: 2097152, - enable_recv_pipe: true, - enable_zero_copy_send: true, - enable_quickack: true, - enable_placement_id: true, + recv_buf_size: try_from_env("SOCK_RECV_BUF_SIZE", 2097152), + send_buf_size: try_from_env("SOCK_SEND_BUF_SIZE", 2097152), + enable_recv_pipe: try_from_env("SOCK_ENABLE_RECV_PIPE", true), + enable_zero_copy_send: try_from_env("SOCK_ZERO_COPY_SEND", true), + enable_quickack: try_from_env("SOCK_ENABLE_QUICKACK", true), + enable_placement_id: try_from_env("SOCK_ENABLE_PLACEMENT_ID", true), } } } From f94ad186d8f6ca5f33016cef6ed97ed2f597a091 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Fri, 18 Dec 2020 14:20:42 +0000 Subject: [PATCH 46/85] Fixes for pvc stress test Refactor to move tests for Create/Delete (CD) and Create/Read/Update/Delete (CRUD) into a single file. CD tests are performed prior to CRUD The number of cycles for CD and CRUD tests can now be set using environment variables Update README.md Remove test runner script --- test/e2e/nightly/README.md | 38 +++- .../e2e/nightly/pvc_stress/pvc_stress_test.go | 168 ------------------ .../pvc_stress_fio/pvc_stress_fio_test.go | 132 ++++++++++---- test/e2e/nightly/test.sh | 19 -- 4 files changed, 136 insertions(+), 221 deletions(-) delete mode 100644 test/e2e/nightly/pvc_stress/pvc_stress_test.go delete mode 100755 test/e2e/nightly/test.sh diff --git a/test/e2e/nightly/README.md b/test/e2e/nightly/README.md index 513072921..0a1dbef1a 100644 --- a/test/e2e/nightly/README.md +++ b/test/e2e/nightly/README.md @@ -6,7 +6,7 @@ To run the tests use the `test.sh` file. When adding a test make sure to bump the timeout value suitably. ## Tests -### pvc_stress +### pvc_stress_fio ``` Do { @@ -31,3 +31,39 @@ Then: The PVC and its corresponding PV should be removed } While (<100 cycles) ``` +``` +Do { + +Scenario: A Mayastor deployment should respond correctly to new Mayastor PVC declarations + Given: Mayastor is deployed on a Kubernetes cluster + And: A StorageClass resource is defined for the Mayastor CSI plugin provisioner + When: A new, valid PVC resource which references that StorageClass is declared via a k8s client + Then: A corresponding PV should be dynamically provisioned + And: The reported status of the PVC and PV resources should become ‘Bound’ + And: A corresponding MayastorVoume CR should be created + And: The reported status of the MayastorVolume should become 'healthy' + And: Then a test application can mount provisioned volume successfully. + And: The test application can read and write to the mounted volume successfully. + +Scenario: A Mayastor deployment should respond correctly to the deletion of PVC resources + +Given: A Mayastor deployment with PVs which have been dynamically provisioned by the Mayastor CSI plugin +When: A volume provisioned by Mayastor is mounted by an application +Then: The provisioned volume can be unmounted + And: The PVC resource is deleted via a k8s client + And: The PVC is not mounted by a pod + And: The PVC references a StorageClass which is provisioned by Mayastor +Then: The PVC and its corresponding PV should be removed + And: x The MayastorVolume CR should be removed + +} While (<100 cycles) +``` + +Note: For development purposes the number of cycles for each test can be changed through environment variables. + 1 `e2e_pvc_stress_cd_cycles` + 2 `e2e_pvc_stress_crud_cycles` + +To run tests from here use the command line below. +``` +go test -v ./... -ginkgo.v -ginkgo.progress -timeout 0 +``` diff --git a/test/e2e/nightly/pvc_stress/pvc_stress_test.go b/test/e2e/nightly/pvc_stress/pvc_stress_test.go deleted file mode 100644 index 7cd123b10..000000000 --- a/test/e2e/nightly/pvc_stress/pvc_stress_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// JIRA: CAS-500 -package pvc_stress_test - -import ( - "fmt" - "testing" - - Cmn "e2e-basic/common" - - coreV1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var defTimeoutSecs = "30s" - -// Create a PVC and verify that -// 1. The PVC status transitions to bound, -// 2. The associated PV is created and its status transitions bound -// 3. The associated MV is created and has a State "healthy" -// then Delete the PVC and verify that -// 1. The PVC is deleted -// 2. The associated PV is deleted -// 3. The associated MV is deleted -func testPVC(volName string, scName string) { - fmt.Printf("%s, %s\n", volName, scName) - // PVC create options - createOpts := &coreV1.PersistentVolumeClaim{ - ObjectMeta: metaV1.ObjectMeta{ - Name: volName, - Namespace: "default", - }, - Spec: coreV1.PersistentVolumeClaimSpec{ - StorageClassName: &scName, - AccessModes: []coreV1.PersistentVolumeAccessMode{coreV1.ReadWriteOnce}, - Resources: coreV1.ResourceRequirements{ - Requests: coreV1.ResourceList{ - coreV1.ResourceStorage: resource.MustParse("64Mi"), - }, - }, - }, - } - - // Create the PVC. - _, createErr := Cmn.CreatePVC(createOpts) - Expect(createErr).To(BeNil()) - - // Confirm the PVC has been created. - pvc, getPvcErr := Cmn.GetPVC(volName) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - - // Wait for the PVC to be bound. - Eventually(func() coreV1.PersistentVolumeClaimPhase { - return Cmn.GetPvcStatusPhase(volName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(coreV1.ClaimBound)) - - // Refresh the PVC contents, so that we can get the PV name. - pvc, getPvcErr = Cmn.GetPVC(volName) - Expect(getPvcErr).To(BeNil()) - Expect(pvc).ToNot(BeNil()) - - // Wait for the PV to be provisioned - Eventually(func() *coreV1.PersistentVolume { - pv, getPvErr := Cmn.GetPV(pvc.Spec.VolumeName) - if getPvErr != nil { - return nil - } - return pv - - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Not(BeNil())) - - // Wait for the PV to be bound. - Eventually(func() coreV1.PersistentVolumePhase { - return Cmn.GetPvStatusPhase(pvc.Spec.VolumeName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(coreV1.VolumeBound)) - - // Wait for the MSV to be provisioned - Eventually(func() *Cmn.MayastorVolStatus { - return Cmn.GetMSV(string(pvc.ObjectMeta.UID)) - }, - defTimeoutSecs, //timeout - "1s", // polling interval - ).Should(Not(BeNil())) - - // Wait for the MSV to be healthy - Eventually(func() string { - return Cmn.GetMsvState(string(pvc.ObjectMeta.UID)) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal("healthy")) - - // Delete the PVC - deleteErr := Cmn.DeletePVC(volName) - Expect(deleteErr).To(BeNil()) - - // Wait for the PVC to be deleted. - Eventually(func() bool { - return Cmn.IsPVCDeleted(volName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - // Wait for the PV to be deleted. - Eventually(func() bool { - return Cmn.IsPVDeleted(pvc.Spec.VolumeName) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) - - // Wait for the MSV to be deleted. - Eventually(func() bool { - return Cmn.IsMSVDeleted(string(pvc.ObjectMeta.UID)) - }, - defTimeoutSecs, // timeout - "1s", // polling interval - ).Should(Equal(true)) -} - -func stressTestPVC() { - for ix := 0; ix < 100; ix++ { - testPVC(fmt.Sprintf("stress-pvc-nvmf-%d", ix), "mayastor-nvmf") - testPVC(fmt.Sprintf("stress-pvc-iscsi-%d", ix), "mayastor-iscsi") - } -} - -func TestPVCStress(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "PVC Stress Test Suite") -} - -var _ = Describe("Mayastor PVC Stress test", func() { - It("should stress test use of PVCs provisioned over iSCSI and NVMe-of", func() { - stressTestPVC() - }) -}) - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - Cmn.SetupTestEnv() - close(done) -}, 60) - -var _ = AfterSuite(func() { - // NB This only tears down the local structures for talking to the cluster, - // not the kubernetes cluster itself. - By("tearing down the test environment") - Cmn.TeardownTestEnv() -}) diff --git a/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go b/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go index 32da92991..ede54a57c 100644 --- a/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go +++ b/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go @@ -3,6 +3,8 @@ package pvc_stress_fio_test import ( "fmt" + "os" + "strconv" "testing" Cmn "e2e-basic/common" @@ -18,19 +20,35 @@ import ( . "github.com/onsi/gomega" ) -var defTimeoutSecs = "30s" +var defTimeoutSecs = "60s" -// Create a PVC and verify that +// Create Delete iterations +var cdIterations = 100 + +// Create Read Update Delete iterations +var crudIterations = 100 + +// volume name and associated storage class name +// parameters required by RmPVC +type volSc struct { + volName string + scName string +} + +var podNames []string +var volNames []volSc + +// Create a PVC and verify that (also see and keep in sync with README.md#pvc_stress_fio) // 1. The PVC status transitions to bound, // 2. The associated PV is created and its status transitions bound // 3. The associated MV is created and has a State "healthy" -// 4. That a test application (fio) can read and write to the volume +// 4. Optionally that a test application (fio) can read and write to the volume // then Delete the PVC and verify that // 1. The PVC is deleted // 2. The associated PV is deleted // 3. The associated MV is deleted -func testPVC(volName string, scName string) { - fmt.Printf("%s, %s\n", volName, scName) +func testPVC(volName string, scName string, runFio bool) { + fmt.Printf("volume: %s, storageClass:%s, run FIO:%v\n", volName, scName, runFio) // PVC create options createOpts := &coreV1.PersistentVolumeClaim{ ObjectMeta: metaV1.ObjectMeta{ @@ -47,7 +65,6 @@ func testPVC(volName string, scName string) { }, }, } - // Create the PVC. _, createErr := Cmn.CreatePVC(createOpts) Expect(createErr).To(BeNil()) @@ -57,6 +74,10 @@ func testPVC(volName string, scName string) { Expect(getPvcErr).To(BeNil()) Expect(pvc).ToNot(BeNil()) + // For cleanup + tmp := volSc{volName, scName} + volNames = append(volNames, tmp) + // Wait for the PVC to be bound. Eventually(func() coreV1.PersistentVolumeClaimPhase { return Cmn.GetPvcStatusPhase(volName) @@ -107,26 +128,34 @@ func testPVC(volName string, scName string) { "1s", // polling interval ).Should(Equal("healthy")) - // Create the fio Pod - fioPodName := "fio-" + volName - pod, err := Cmn.CreateFioPod(fioPodName, volName) - Expect(err).ToNot(HaveOccurred()) - Expect(pod).ToNot(BeNil()) + if runFio { + // Create the fio Pod + fioPodName := "fio-" + volName + pod, err := Cmn.CreateFioPod(fioPodName, volName) + Expect(err).ToNot(HaveOccurred()) + Expect(pod).ToNot(BeNil()) - // Wait for the fio Pod to transition to running - Eventually(func() bool { - return Cmn.IsPodRunning(fioPodName) - }, - defTimeoutSecs, - "1s", - ).Should(Equal(true)) + // For cleanup + podNames = append(podNames, fioPodName) - // Run the fio test - Cmn.RunFio(fioPodName, 5) + // Wait for the fio Pod to transition to running + Eventually(func() bool { + return Cmn.IsPodRunning(fioPodName) + }, + defTimeoutSecs, + "1s", + ).Should(Equal(true)) - // Delete the fio pod - err = Cmn.DeletePod(fioPodName) - Expect(err).ToNot(HaveOccurred()) + // Run the fio test + Cmn.RunFio(fioPodName, 5) + + // Delete the fio pod + err = Cmn.DeletePod(fioPodName) + Expect(err).ToNot(HaveOccurred()) + + // cleanup + podNames = podNames[:len(podNames)-1] + } // Delete the PVC deleteErr := Cmn.DeletePVC(volName) @@ -136,8 +165,8 @@ func testPVC(volName string, scName string) { Eventually(func() bool { return Cmn.IsPVCDeleted(volName) }, - defTimeoutSecs, // timeout - "1s", // polling interval + "120s", // timeout + "1s", // polling interval ).Should(Equal(true)) // Wait for the PV to be deleted. @@ -155,12 +184,19 @@ func testPVC(volName string, scName string) { defTimeoutSecs, // timeout "1s", // polling interval ).Should(Equal(true)) + + // cleanup + volNames = volNames[:len(volNames)-1] } -func stressTestPVC() { - for ix := 0; ix < 10; ix++ { - testPVC(fmt.Sprintf("stress-pvc-nvmf-%d", ix), "mayastor-nvmf") - testPVC(fmt.Sprintf("stress-pvc-iscsi-%d", ix), "mayastor-iscsi") +func stressTestPVC(iters int, runFio bool) { + decoration := "" + if runFio { + decoration = "-io" + } + for ix := 1; ix <= iters; ix++ { + testPVC(fmt.Sprintf("stress-pvc-nvmf%s-%d", decoration, ix), "mayastor-nvmf", runFio) + testPVC(fmt.Sprintf("stress-pvc-iscsi%s-%d", decoration, ix), "mayastor-iscsi", runFio) } } @@ -169,20 +205,50 @@ func TestPVCStress(t *testing.T) { RunSpecs(t, "PVC Stress Test Suite") } -var _ = Describe("Mayastor PVC Stress test with fio", func() { - It("should stress test use of PVCs provisioned over iSCSI and NVMe-of", func() { - stressTestPVC() +var _ = Describe("Mayastor PVC Stress test", func() { + It("should stress test creation and deletion of PVCs provisioned over iSCSI and NVMe-of", func() { + stressTestPVC(cdIterations, false) + }) + + It("should stress test creation and deletion of PVCs provisioned over iSCSI and NVMe-of", func() { + stressTestPVC(crudIterations, true) }) }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) Cmn.SetupTestEnv() + tmp := os.Getenv("e2e_pvc_stress_cd_cycles") + if len(tmp) != 0 { + var err error + cdIterations, err = strconv.Atoi(tmp) + Expect(err).NotTo(HaveOccurred()) + logf.Log.Info("Cycle count changed by environment ", "Create/Delete", cdIterations) + } + + tmp = os.Getenv("e2e_pvc_stress_crud_cycles") + if len(tmp) != 0 { + var err error + crudIterations, err = strconv.Atoi(tmp) + Expect(err).NotTo(HaveOccurred()) + logf.Log.Info("Cycle count changed by environment", "Create/Read/Update/Delete", crudIterations) + } + logf.Log.Info("Number of cycles are", "Create/Delete", cdIterations, "Create/Read/Update/Delete", crudIterations) + close(done) }, 60) var _ = AfterSuite(func() { + // Cleanup resources leftover in the event of failure. + for _, pod := range podNames { + err := Cmn.DeletePod(pod) + Expect(err).ToNot(HaveOccurred()) + } + for _, vol := range volNames { + Cmn.RmPVC(vol.volName, vol.scName) + } + // NB This only tears down the local structures for talking to the cluster, // not the kubernetes cluster itself. By("tearing down the test environment") diff --git a/test/e2e/nightly/test.sh b/test/e2e/nightly/test.sh deleted file mode 100755 index d0489eb8e..000000000 --- a/test/e2e/nightly/test.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -# The default go test timeout of 10 minutes may be insufficient. - -# We start with a timeout value of 60 seconds and bump up the value -# adding a number of seconds for each test. -timeout=60 - -#pvc_stress run duration is around 7 minutes for 100 iterations, -# add 8 minutes to handle variations in timing. -timeout=$(( timeout + 480 )) - -#pvc_stress_fio run duration is around 11 minutes for 10 iterations, -# with fio duration set to 5 seconds. -# add 12 minutes to handle variations in timing. -timeout=$(( timeout + 720 )) - -# FIXME: we want to pvc_stress before pvc_stress_fio. -go test ./... --timeout "${timeout}s" From 6dead24a344540f9bf674ed42e0765c9d0c9bfea Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Thu, 17 Dec 2020 09:42:47 +0000 Subject: [PATCH 47/85] test: add basic rebuild E2E test This test checks that a rebuild can be started and completes correctly. Some helper functions have been added to the common directory and could be used to refactor some of the existing code in the future. --- test/e2e/common/util.go | 175 ++++++++++++++++++++++++- test/e2e/rebuild/README.md | 21 +++ test/e2e/rebuild/basic_rebuild_test.go | 93 +++++++++++++ test/e2e/rebuild/fio.yaml | 19 +++ 4 files changed, 306 insertions(+), 2 deletions(-) create mode 100644 test/e2e/rebuild/README.md create mode 100644 test/e2e/rebuild/basic_rebuild_test.go create mode 100644 test/e2e/rebuild/fio.yaml diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index a875ab323..91c0e82ca 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -193,6 +193,11 @@ func IsPVDeleted(volName string) bool { } } +// IsPvcBound returns true if a PVC with the given name is bound otherwise false is returned. +func IsPvcBound(pvcName string) bool { + return GetPvcStatusPhase(pvcName) == corev1.ClaimBound +} + // Retrieve status phase of a Persistent Volume Claim func GetPvcStatusPhase(volname string) (phase corev1.PersistentVolumeClaimPhase) { pvc, getPvcErr := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").Get(context.TODO(), volname, metav1.GetOptions{}) @@ -294,8 +299,8 @@ func MkPVC(volName string, scName string) string { Eventually(func() *MayastorVolStatus { return GetMSV(string(pvc.ObjectMeta.UID)) }, - defTimeoutSecs, - "1s", + defTimeoutSecs, + "1s", ).Should(Not(BeNil())) return string(pvc.ObjectMeta.UID) @@ -660,3 +665,169 @@ func PodPresentOnNode(podNameRegexp string, namespace string, nodeName string) b } return false } + +// Return a group version resource for a MSV +func getMsvGvr() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } +} + +// Get the k8s MSV CRD +func getMsv(uuid string) (*unstructured.Unstructured, error) { + msvGVR := getMsvGvr() + return gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Get(context.TODO(), uuid, metav1.GetOptions{}) +} + +// Get a field within the MSV. +// The "fields" argument specifies the path within the MSV where the field should be found. +// E.g. for the replicaCount field which is nested under the MSV spec the function should be called like: +// getMsvFieldValue(, "spec", "replicaCount") +func getMsvFieldValue(uuid string, fields ...string) (interface{}, error) { + msv, err := getMsv(uuid) + if err != nil { + return nil, fmt.Errorf("Failed to get MSV with error %v", err) + } + if msv == nil { + return nil, fmt.Errorf("MSV with uuid %s does not exist", uuid) + } + + field, found, err := unstructured.NestedFieldCopy(msv.Object, fields...) + if err != nil { + // The last field is the one that we were looking for. + lastFieldIndex := len(fields) - 1 + return nil, fmt.Errorf("Failed to get field %s with error %v", fields[lastFieldIndex], err) + } + if !found { + // The last field is the one that we were looking for. + lastFieldIndex := len(fields) - 1 + return nil, fmt.Errorf("Failed to find field %s", fields[lastFieldIndex]) + } + return field, nil +} + +// GetNumReplicas returns the number of replicas in the MSV. +// An error is returned if the number of replicas cannot be retrieved. +func GetNumReplicas(uuid string) (int64, error) { + // Get the number of replicas from the MSV. + repl, err := getMsvFieldValue(uuid, "spec", "replicaCount") + if err != nil { + return 0, err + } + if repl == nil { + return 0, fmt.Errorf("Failed to get replicaCount") + } + + return reflect.ValueOf(repl).Interface().(int64), nil +} + +// UpdateNumReplicas sets the number of replicas in the MSV to the desired number. +// An error is returned if the number of replicas cannot be updated. +func UpdateNumReplicas(uuid string, numReplicas int64) error { + msv, err := getMsv(uuid) + if err != nil { + return fmt.Errorf("Failed to get MSV with error %v", err) + } + if msv == nil { + return fmt.Errorf("MSV not found") + } + + // Set the number of replicas in the MSV. + err = unstructured.SetNestedField(msv.Object, numReplicas, "spec", "replicaCount") + if err != nil { + return err + } + + // Update the k8s MSV object. + msvGVR := getMsvGvr() + _, err = gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Update(context.TODO(), msv, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("Failed to update MSV: %v", err) + } + return nil +} + +// GetNumChildren returns the number of nexus children listed in the MSV +func GetNumChildren(uuid string) int { + children, err := getMsvFieldValue(uuid, "status", "nexus", "children") + if err != nil { + return 0 + } + if children == nil { + return 0 + } + + switch reflect.TypeOf(children).Kind() { + case reflect.Slice: + return reflect.ValueOf(children).Len() + } + return 0 +} + +// NexusChild represents the information stored in the MSV about the child +type NexusChild struct { + State string + URI string +} + +// GetChildren returns a slice containing information about the children. +// An error is returned if the child information cannot be retrieved. +func GetChildren(uuid string) ([]NexusChild, error) { + children, err := getMsvFieldValue(uuid, "status", "nexus", "children") + if err != nil { + return nil, fmt.Errorf("Failed to get children with error %v", err) + } + if children == nil { + return nil, fmt.Errorf("Failed to find children") + } + + nexusChildren := make([]NexusChild, 2) + + switch reflect.TypeOf(children).Kind() { + case reflect.Slice: + s := reflect.ValueOf(children) + for i := 0; i < s.Len(); i++ { + child := s.Index(i).Elem() + if child.Kind() == reflect.Map { + for _, key := range child.MapKeys() { + skey := key.Interface().(string) + switch skey { + case "state": + nexusChildren[i].State = child.MapIndex(key).Interface().(string) + case "uri": + nexusChildren[i].URI = child.MapIndex(key).Interface().(string) + } + } + } + } + } + + return nexusChildren, nil +} + +// GetNexusState returns the nexus state from the MSV. +// An error is returned if the nexus state cannot be retrieved. +func GetNexusState(uuid string) (string, error) { + // Get the state of the nexus from the MSV. + state, err := getMsvFieldValue(uuid, "status", "nexus", "state") + if err != nil { + return "", err + } + if state == nil { + return "", fmt.Errorf("Failed to get nexus state") + } + + return reflect.ValueOf(state).Interface().(string), nil +} + +// IsVolumePublished returns true if the volume is published. +// A volume is published if the "targetNodes" field exists in the MSV. +func IsVolumePublished(uuid string) bool { + _, err := getMsvFieldValue(uuid, "status", "targetNodes") + if err != nil { + return false + } + return true +} diff --git a/test/e2e/rebuild/README.md b/test/e2e/rebuild/README.md new file mode 100644 index 000000000..aad936c28 --- /dev/null +++ b/test/e2e/rebuild/README.md @@ -0,0 +1,21 @@ +## Pre-requisites for this test + +* A Kubernetes cluster with at least 3 nodes, with mayastor installed. + +## Overview +The tests in this folder are for testing the behaviour of the rebuild feature in a Kubernetes environment. + +## Test Descriptions + +### Basic rebuild test +The purpose of this test is to ensure that a rebuild starts and completes successfully when the replica count in the MSV is incremented. + +To run: +```bash +go test basic_rebuild_test.go +``` + +To run with verbose output: +```bash +go test -v basic_rebuild_test.go +``` \ No newline at end of file diff --git a/test/e2e/rebuild/basic_rebuild_test.go b/test/e2e/rebuild/basic_rebuild_test.go new file mode 100644 index 000000000..93272a276 --- /dev/null +++ b/test/e2e/rebuild/basic_rebuild_test.go @@ -0,0 +1,93 @@ +package basic_rebuild_test + +import ( + "e2e-basic/common" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + pvcName = "rebuild-test-pvc" + storageClass = "mayastor-nvmf" +) + +const ApplicationPod = "fio.yaml" + +func basicRebuildTest() { + // Create a PVC + common.MkPVC(pvcName, storageClass) + pvc, err := common.GetPVC(pvcName) + Expect(err).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + timeout := "90s" + pollPeriod := "1s" + + // Create an application pod and wait for the PVC to be bound to it. + common.ApplyDeployYaml(ApplicationPod) + Eventually(func() bool { return common.IsPvcBound(pvcName) }, timeout, pollPeriod).Should(Equal(true)) + + uuid := string(pvc.ObjectMeta.UID) + repl, err := common.GetNumReplicas(uuid) + Expect(err).To(BeNil()) + Expect(repl).Should(Equal(int64(1))) + + // Wait for volume to be published before adding a child. + // This ensures that a nexus exists when the child is added. + Eventually(func() bool { return common.IsVolumePublished(uuid) }, timeout, pollPeriod).Should(Equal(true)) + + // Add another child which should kick off a rebuild. + common.UpdateNumReplicas(uuid, 2) + repl, err = common.GetNumReplicas(uuid) + Expect(err).To(BeNil()) + Expect(repl).Should(Equal(int64(2))) + + // Wait for the added child to show up. + Eventually(func() int { return common.GetNumChildren(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo(2)) + + getChildrenFunc := func(uuid string) []common.NexusChild { + children, err := common.GetChildren(uuid) + if err != nil { + panic("Failed to get children") + } + Expect(len(children)).Should(Equal(2)) + return children + } + + // Check the added child and nexus are both degraded. + Eventually(func() string { return getChildrenFunc(uuid)[1].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_DEGRADED")) + Eventually(func() (string, error) { return common.GetNexusState(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo("NEXUS_DEGRADED")) + + // Check everything eventually goes healthy following a rebuild. + Eventually(func() string { return getChildrenFunc(uuid)[0].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) + Eventually(func() string { return getChildrenFunc(uuid)[1].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) + Eventually(func() (string, error) { return common.GetNexusState(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo("NEXUS_ONLINE")) +} + +func TestRebuild(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Rebuild Test Suite") +} + +var _ = Describe("Mayastor rebuild test", func() { + It("should run a rebuild job to completion", func() { + basicRebuildTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + common.DeleteDeployYaml(ApplicationPod) + common.RmPVC(pvcName, storageClass) + common.TeardownTestEnv() +}) diff --git a/test/e2e/rebuild/fio.yaml b/test/e2e/rebuild/fio.yaml new file mode 100644 index 000000000..2d76360e9 --- /dev/null +++ b/test/e2e/rebuild/fio.yaml @@ -0,0 +1,19 @@ +kind: Pod +apiVersion: v1 +metadata: + name: fio +spec: + volumes: + - name: ms-volume + persistentVolumeClaim: + claimName: rebuild-test-pvc + containers: + - name: fio + image: nixery.dev/shell/fio/tini + command: [ "tini", "--" ] + args: + - sleep + - "1000000" + volumeMounts: + - mountPath: "/volume" + name: ms-volume From 957e11918981fafed78c029575ab8ea3df11e473 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Tue, 5 Jan 2021 18:39:02 +0000 Subject: [PATCH 48/85] fix(nexus): assertion failure when unpublishing a faulted nexus The assertion occurs due to the nvmf subsystem transitioning from a paused to an inactive state, which is forbidden. Always resume the nexus in child_retire so that it is usually in the active state, which avoids the assertion when unpublishing it. When there is only a single faulted child, the nexus itself is in a faulted state, which means no IO is possible. In nexus_bdev, fail the IO if all submissions failed, which also includes the case where no submissions were made. Refactor the post-IO submission to avoid repetition. Also include the case where no usable child was found when doing a round-robin for reads. This also fixes CAS-606. Repurpose the existing cargo test for a replica that is stopped, then continued, to test the cases above with a single remote replica. --- mayastor/src/bdev/nexus/nexus_bdev.rs | 65 +++++++++---------- mayastor/src/bdev/nexus/nexus_io.rs | 5 +- mayastor/src/subsys/nvmf/subsystem.rs | 14 +++-- mayastor/tests/replica_timeout.rs | 91 ++++++++++++++++++--------- 4 files changed, 99 insertions(+), 76 deletions(-) diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 7bcfbf308..e6212165c 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -766,6 +766,12 @@ impl Nexus { // we use RR to read from the children. let child = channels.child_select(); if child.is_none() { + error!( + "{}: No child available to read from {:p}", + io.nexus_as_ref().name, + io.as_ptr(), + ); + io.fail(); return; } @@ -820,6 +826,23 @@ impl Nexus { } } + /// check results after submitting IO, failing if all failed to submit + #[inline(always)] + fn check_io_submission(&self, results: &[i32], io: &Bio) { + // if any of the children failed to dispatch + if results.iter().any(|r| *r != 0) { + error!( + "{}: Failed to submit dispatched IO {:?}", + io.nexus_as_ref().name, + io.as_ptr(), + ); + } + + if results.iter().all(|r| *r != 0) { + io.fail(); + } + } + /// send reset IO to the underlying children. pub(crate) fn reset(&self, io: &Bio, channels: &NexusChannelInner) { // in case of resets, we want to reset all underlying children @@ -838,14 +861,7 @@ impl Nexus { }) .collect::>(); - // if any of the children failed to dispatch - if results.iter().any(|r| *r != 0) { - error!( - "{}: Failed to submit dispatched IO {:?}", - io.nexus_as_ref().name, - io.as_ptr(), - ); - } + self.check_io_submission(&results, &io); } /// write vectored IO to the underlying children. @@ -869,14 +885,7 @@ impl Nexus { }) .collect::>(); - // if any of the children failed to dispatch - if results.iter().any(|r| *r != 0) { - error!( - "{}: Failed to submit dispatched IO {:?}", - io.nexus_as_ref().name, - io.as_ptr() - ); - } + self.check_io_submission(&results, &io); } pub(crate) fn unmap(&self, io: &Bio, channels: &NexusChannelInner) { @@ -896,13 +905,7 @@ impl Nexus { }) .collect::>(); - if results.iter().any(|r| *r != 0) { - error!( - "{}: Failed to submit dispatched IO {:?}", - io.nexus_as_ref().name, - io.as_ptr() - ); - } + self.check_io_submission(&results, &io); } pub(crate) fn write_zeroes(&self, io: &Bio, channels: &NexusChannelInner) { @@ -922,13 +925,7 @@ impl Nexus { }) .collect::>(); - if results.iter().any(|r| *r != 0) { - error!( - "{}: Failed to submit dispatched IO {:?}", - io.nexus_as_ref().name, - io.as_ptr() - ); - } + self.check_io_submission(&results, &io); } pub(crate) fn nvme_admin(&self, io: &Bio, channels: &NexusChannelInner) { @@ -983,13 +980,7 @@ impl Nexus { }) .collect::>(); - if results.iter().any(|r| *r != 0) { - error!( - "{}: Failed to submit dispatched IO {:?}", - io.nexus_as_ref().name, - io.as_ptr() - ); - } + self.check_io_submission(&results, &io); } /// Status of the nexus diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index fdccbd172..2b110ea7f 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -338,9 +338,8 @@ impl Bio { nexus.reconfigure(DREvent::ChildFault).await; //nexus.remove_child(&uri).await.unwrap(); bdev_destroy(&uri).await.unwrap(); - if nexus.status() != NexusStatus::Faulted { - nexus.resume().await.unwrap(); - } else { + nexus.resume().await.unwrap(); + if nexus.status() == NexusStatus::Faulted { error!(":{} has no children left... ", nexus); } } diff --git a/mayastor/src/subsys/nvmf/subsystem.rs b/mayastor/src/subsys/nvmf/subsystem.rs index aaf33877b..64ffed88e 100644 --- a/mayastor/src/subsys/nvmf/subsystem.rs +++ b/mayastor/src/subsys/nvmf/subsystem.rs @@ -372,8 +372,8 @@ impl NvmfSubsystem { Ok(()) } - /// we are not making use of pause and resume yet but this will be needed - /// when we start to move things around + /// transition the subsystem to paused state + /// intended to be a temporary state while changes are made pub async fn pause(&self) -> Result<(), Error> { extern "C" fn pause_cb( ss: *mut spdk_nvmf_subsystem, @@ -403,9 +403,9 @@ impl NvmfSubsystem { ) } .to_result(|e| Error::Subsystem { - source: Errno::from_i32(e), + source: Errno::from_i32(-e), nqn: self.get_nqn(), - msg: "out of memory".to_string(), + msg: format!("subsystem_pause returned: {}", e), })?; r.await.unwrap().to_result(|e| Error::Subsystem { @@ -414,6 +414,8 @@ impl NvmfSubsystem { msg: "failed to pause the subsystem".to_string(), }) } + + /// transition the subsystem to active state pub async fn resume(&self) -> Result<(), Error> { extern "C" fn resume_cb( ss: *mut spdk_nvmf_subsystem, @@ -445,9 +447,9 @@ impl NvmfSubsystem { if rc != 0 { return Err(Error::Subsystem { - source: Errno::UnknownErrno, + source: Errno::from_i32(-rc), nqn: self.get_nqn(), - msg: "out of memory".to_string(), + msg: format!("subsystem_resume returned: {}", rc), }); } diff --git a/mayastor/tests/replica_timeout.rs b/mayastor/tests/replica_timeout.rs index 50ddc2110..380fcd126 100644 --- a/mayastor/tests/replica_timeout.rs +++ b/mayastor/tests/replica_timeout.rs @@ -1,23 +1,32 @@ -#![allow(unused_assignments)] - -use common::{bdev_io, compose::Builder, MayastorTest}; +use common::{compose::Builder, MayastorTest}; use mayastor::{ - bdev::{nexus_create, nexus_lookup}, + bdev::{nexus_create, nexus_lookup, NexusStatus}, core::MayastorCliArgs, + subsys::{Config, NvmeBdevOpts}, }; -use rpc::mayastor::{BdevShareRequest, BdevUri, Null}; +use rpc::mayastor::{BdevShareRequest, BdevUri, Null, ShareProtocolNexus}; +use std::process::{Command, Stdio}; use tokio::time::Duration; pub mod common; static NXNAME: &str = "nexus"; -#[ignore] #[tokio::test] async fn replica_stop_cont() { + // Use shorter timeouts than the defaults to reduce test runtime + Config::get_or_init(|| Config { + nvme_bdev_opts: NvmeBdevOpts { + timeout_us: 5_000_000, + keep_alive_timeout_ms: 5_000, + retry_count: 2, + ..Default::default() + }, + ..Default::default() + }) + .apply(); let test = Builder::new() .name("cargo-test") .network("10.1.0.0/16") - .add_container("ms2") .add_container("ms1") .with_clean(true) .build() @@ -47,27 +56,25 @@ async fn replica_stop_cont() { let mayastor = MayastorTest::new(MayastorCliArgs::default()); + // create a nexus with the remote replica as its child mayastor .spawn(async move { nexus_create( NXNAME, 1024 * 1024 * 50, None, - &[ - format!( - "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", - hdls[0].endpoint.ip() - ), - format!( - "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", - hdls[1].endpoint.ip() - ), - ], + &[format!( + "nvmf://{}:8420/nqn.2019-05.io.openebs:disk0", + hdls[0].endpoint.ip() + )], ) .await .unwrap(); - bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); - bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + nexus_lookup(&NXNAME) + .unwrap() + .share(ShareProtocolNexus::NexusNvmf, None) + .await + .expect("should publish nexus over nvmf"); }) .await; @@ -78,25 +85,49 @@ async fn replica_stop_cont() { println!("waiting for the container to be fully suspended... {}/5", i); } - mayastor.send(async { - // we do not determine if the IO completed with an error or not just - // that it completes. - let _ = dbg!(bdev_io::read_some(NXNAME, 0, 0xff).await); - let _ = dbg!(bdev_io::read_some(NXNAME, 0, 0xff).await); - }); + // initiate the read and leave it in the background to time out + let nxuri = + format!("nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:{}", NXNAME); + Command::new("../target/debug/initiator") + .args(&[&nxuri, "read", "/tmp/tmpread"]) + .stdout(Stdio::piped()) + .spawn() + .expect("should send read from initiator"); println!("IO submitted unfreezing container..."); - for i in 1 .. 6 { + // KATO is 5s, wait at least that long + let n = 10; + for i in 1 ..= n { ticker.tick().await; - println!("unfreeze delay... {}/5", i); + println!("unfreeze delay... {}/{}", i, n); } test.thaw("ms1").await.unwrap(); println!("container thawed"); + + // Wait for faulting to complete first + ticker.tick().await; + + // with no child to send read to, io should still complete as failed + let status = Command::new("../target/debug/initiator") + .args(&[&nxuri, "read", "/tmp/tmpread"]) + .stdout(Stdio::piped()) + .status() + .expect("should send read from initiator"); + assert!(!status.success()); + + // unshare the nexus while its status is faulted mayastor - .spawn(async { - let nexus = nexus_lookup(NXNAME).unwrap(); - nexus.destroy().await.unwrap(); + .spawn(async move { + assert_eq!( + nexus_lookup(&NXNAME).unwrap().status(), + NexusStatus::Faulted, + ); + nexus_lookup(&NXNAME) + .unwrap() + .unshare_nexus() + .await + .expect("should unpublish nexus"); }) .await; } From b5737ef662a603f5999aeef3699bb159aefb7d3e Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Fri, 18 Dec 2020 09:25:46 +0000 Subject: [PATCH 49/85] Unschedule Mayastor pod to test replica loss In order to avoid low-level network manipulation to fault a child, the new test unschedules the corresponding mayastor replica pod, so removing it from the IO path, to achieve the same effect. This allows greater flexibility in where the test can be run (e.g. CI testing) as the kubernetes API is all that is needed. Removed use of thread in the tests for better cleanup on failure. --- scripts/e2e-test.sh | 2 +- test/e2e/common/util.go | 83 ++++++++----- test/e2e/go.sum | 1 + test/e2e/node_disconnect/README.md | 25 +++- .../lib/node_disconnect_lib.go | 117 +++++++++++++++--- .../lib/node_disconnect_setup.go | 14 ++- .../replica_disconnection_test.go | 12 +- .../replica_pod_remove_test.go | 53 ++++++++ .../replica_reassign/replica_reassign_test.go | 2 +- test/e2e/node_disconnect/test.sh | 12 -- 10 files changed, 245 insertions(+), 76 deletions(-) create mode 100644 test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go delete mode 100755 test/e2e/node_disconnect/test.sh diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 9a225b73f..97443eeeb 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -3,7 +3,7 @@ set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") -TESTS="install basic_volume_io" +TESTS="install basic_volume_io node_disconnect/replica_pod_remove" DEVICE= REGISTRY= TAG= diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index a875ab323..25f1b0902 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -294,8 +294,8 @@ func MkPVC(volName string, scName string) string { Eventually(func() *MayastorVolStatus { return GetMSV(string(pvc.ObjectMeta.UID)) }, - defTimeoutSecs, - "1s", + defTimeoutSecs, + "1s", ).Should(Not(BeNil())) return string(pvc.ObjectMeta.UID) @@ -559,19 +559,31 @@ func RemoveAllNodeSelectorsFromDeployment(deploymentName string, namespace strin // Adjust the number of replicas in the deployment func SetDeploymentReplication(deploymentName string, namespace string, replicas *int32) { - depApi := gTestEnv.KubeInt.AppsV1().Deployments - deployment, err := depApi(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - deployment.Spec.Replicas = replicas - _, err = depApi("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + depAPI := gTestEnv.KubeInt.AppsV1().Deployments + var err error + + // this is to cater for a race condition, occasionally seen, + // when the deployment is changed between Get and Update + for attempts := 0; attempts < 10; attempts++ { + deployment, err := depAPI(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + deployment.Spec.Replicas = replicas + deployment, err = depAPI("mayastor").Update(context.TODO(), deployment, metav1.UpdateOptions{}) + if err == nil { + break + } + fmt.Printf("Re-trying update attempt due to error: %v\n", err) + time.Sleep(1 * time.Second) + } Expect(err).ToNot(HaveOccurred()) } // TODO remove dependency on kubectl // label is a string in the form "key=value" // function still succeeds if label already present -func LabelNode(nodename string, label string) { - cmd := exec.Command("kubectl", "label", "node", nodename, label, "--overwrite=true") +func LabelNode(nodename string, label string, value string) { + labelAssign := fmt.Sprintf("%s=%s", label, value) + cmd := exec.Command("kubectl", "label", "node", nodename, labelAssign, "--overwrite=true") cmd.Dir = "" _, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred()) @@ -615,33 +627,46 @@ func WaitForPodAbsentFromNode(podNameRegexp string, namespace string, nodeName s return nil } +// Get the execution status of the given pod, or nil if it does not exist +func getPodStatus(podNameRegexp string, namespace string, nodeName string) *v1.PodPhase { + var validID = regexp.MustCompile(podNameRegexp) + podAPI := gTestEnv.KubeInt.CoreV1().Pods + podList, err := podAPI(namespace).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + for _, pod := range podList.Items { + if pod.Spec.NodeName == nodeName && validID.MatchString(pod.Name) { + return &pod.Status.Phase + } + } + return nil // pod not found +} + // Wait until the instance of the specified pod is present and in the running // state on the given node func WaitForPodRunningOnNode(podNameRegexp string, namespace string, nodeName string, timeoutSeconds int) error { - var validID = regexp.MustCompile(podNameRegexp) - podReady := false + for i := 0; i < timeoutSeconds; i++ { + stat := getPodStatus(podNameRegexp, namespace, nodeName) - podApi := gTestEnv.KubeInt.CoreV1().Pods - - for i := 0; i < timeoutSeconds && podReady == false; i++ { - time.Sleep(time.Second) - podList, err := podApi(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return errors.New("failed to list pods") - } - for _, pod := range podList.Items { - if pod.Spec.NodeName == nodeName && pod.Status.Phase == v1.PodRunning { - if validID.MatchString(pod.Name) { - podReady = true - break - } - } + if stat != nil && *stat == v1.PodRunning { + return nil } + time.Sleep(1 * time.Second) } - if podReady == false { - return errors.New("timed out waiting for pod") + return errors.New("timed out waiting for pod to be running") +} + +// Wait until the instance of the specified pod is absent or not in the running +// state on the given node +func WaitForPodNotRunningOnNode(podNameRegexp string, namespace string, nodeName string, timeoutSeconds int) error { + for i := 0; i < timeoutSeconds; i++ { + stat := getPodStatus(podNameRegexp, namespace, nodeName) + + if stat == nil || *stat != v1.PodRunning { + return nil + } + time.Sleep(1 * time.Second) } - return nil + return errors.New("timed out waiting for pod to stop running") } // returns true if the pod is present on the given node diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 72ca0926a..f019956cb 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -482,6 +482,7 @@ k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKi k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ= k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw= k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= diff --git a/test/e2e/node_disconnect/README.md b/test/e2e/node_disconnect/README.md index ae3a0ce39..7a8738dfe 100644 --- a/test/e2e/node_disconnect/README.md +++ b/test/e2e/node_disconnect/README.md @@ -1,10 +1,25 @@ ## Note -The tests in this folder are not currently deployable by the CI system -as the test assumes a vagrant installation +The tests in directories replica_disconnect and replica_reassign +are not currently deployable by the CI system +as those tests assume a vagrant installation. -## Pre-requisites for these tests +## Pre-requisites for replica_pod_remove + +* A Kubernetes cluster with 3 nodes, with mayastor installed. + +## Pre-requisites for the other directories * A Kubernetes cluster with at least 3 nodes, with mayastor installed. -* The re-assignment test requires at least 4 nodes +* The replica_reassign test requires at least 4 nodes. * The cluster is deployed using vagrant and KUBESPRAY_REPO is correctly - defined in ./lib/io_connect_node.sh + defined in ``` ./lib/io_connect_node.sh ``` + +## Overview +The tests verify the behaviour of the cluster under fault conditions +affecting the availability of resources in the cluster, for example +a missing mayastor pod or a disconnected node. + +To run, cd to the test directory then: +```bash +go test +``` diff --git a/test/e2e/node_disconnect/lib/node_disconnect_lib.go b/test/e2e/node_disconnect/lib/node_disconnect_lib.go index 9359f7767..9006503c8 100644 --- a/test/e2e/node_disconnect/lib/node_disconnect_lib.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_lib.go @@ -4,15 +4,16 @@ import ( "e2e-basic/common" "fmt" "os/exec" - "time" . "github.com/onsi/gomega" ) const ( defTimeoutSecs = "90s" - disconnectionTimeoutSecs = "90s" - repairTimeoutSecs = "90s" + disconnectionTimeoutSecs = "180s" + podUnscheduleTimeoutSecs = 100 + podRescheduleTimeoutSecs = 180 + repairTimeoutSecs = "180s" ) type DisconnectEnv struct { @@ -53,6 +54,25 @@ func (env *DisconnectEnv) ReconnectNode(checkError bool) { Expect(err).ToNot(HaveOccurred()) } } + env.nodeToIsolate = "" + env.disconnectMethod = "" +} + +func SuppressMayastorPodOn(nodeName string) { + common.UnlabelNode(nodeName, engineLabel) + err := common.WaitForPodNotRunningOnNode(mayastorRegexp, namespace, nodeName, podUnscheduleTimeoutSecs) + Expect(err).ToNot(HaveOccurred()) +} + +// reconnect a node to the other nodes in the cluster +func (env *DisconnectEnv) UnsuppressMayastorPod() { + if env.nodeToIsolate != "" { + // add the mayastor label to the node + common.LabelNode(env.nodeToIsolate, engineLabel, mayastorLabel) + err := common.WaitForPodRunningOnNode(mayastorRegexp, namespace, env.nodeToIsolate, podRescheduleTimeoutSecs) + Expect(err).ToNot(HaveOccurred()) + env.nodeToIsolate = "" + } } // return the node name to isolate and a vector of IP addresses to isolate @@ -90,14 +110,12 @@ func getNodes(uuid string) (string, []string) { // Run fio against the cluster while a replica is being removed and reconnected to the network func (env *DisconnectEnv) LossTest() { - fmt.Printf("running spawned fio\n") - go common.RunFio(env.fioPodName, 20) - - time.Sleep(5 * time.Second) fmt.Printf("disconnecting \"%s\"\n", env.nodeToIsolate) - DisconnectNode(env.nodeToIsolate, env.otherNodes, env.disconnectMethod) + fmt.Printf("running fio\n") + common.RunFio(env.fioPodName, 20) + fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { return common.GetMsvState(env.uuid) @@ -144,6 +162,45 @@ func (env *DisconnectEnv) LossWhenIdleTest() { common.RunFio(env.fioPodName, 20) } +// Run fio against the cluster while a replica mayastor pod is unscheduled and then rescheduled +func (env *DisconnectEnv) PodLossTest() { + fmt.Printf("removing mayastor pod from node \"%s\"\n", env.nodeToIsolate) + SuppressMayastorPodOn(env.nodeToIsolate) + + fmt.Printf("waiting up to %s for pod removal to affect the nexus\n", disconnectionTimeoutSecs) + Eventually(func() string { + fmt.Printf("running fio against volume\n") + common.RunFio(env.fioPodName, 5) + return common.GetMsvState(env.uuid) + }, + disconnectionTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal("degraded")) + + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(env.uuid)) + + fmt.Printf("running fio against the degraded volume\n") + common.RunFio(env.fioPodName, 20) + + fmt.Printf("enabling mayastor pod on node \"%s\"\n", env.nodeToIsolate) + env.UnsuppressMayastorPod() + + fmt.Printf("waiting up to %s for the volume to be repaired\n", repairTimeoutSecs) + Eventually(func() string { + fmt.Printf("running fio while volume is being repaired\n") + common.RunFio(env.fioPodName, 5) + return common.GetMsvState(env.uuid) + }, + repairTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal("healthy")) + + fmt.Printf("volume is in state \"%s\"\n", common.GetMsvState(env.uuid)) + + fmt.Printf("running fio against the repaired volume\n") + common.RunFio(env.fioPodName, 20) +} + // Run fio against the cluster while a replica node is being removed, // wait for the volume to become degraded, then wait for it to be repaired. // Run fio against repaired volume, and again after node is reconnected. @@ -151,14 +208,12 @@ func (env *DisconnectEnv) ReplicaReassignTest() { // This test needs at least 4 nodes, a refuge node, a mayastor node to isolate, and 2 other mayastor nodes Expect(len(env.otherNodes)).To(BeNumerically(">=", 3)) - fmt.Printf("running spawned fio\n") - go common.RunFio(env.fioPodName, 20) - - time.Sleep(5 * time.Second) fmt.Printf("disconnecting \"%s\"\n", env.nodeToIsolate) - DisconnectNode(env.nodeToIsolate, env.otherNodes, env.disconnectMethod) + fmt.Printf("running fio against the volume\n") + common.RunFio(env.fioPodName, 20) + fmt.Printf("waiting up to %s for disconnection to affect the nexus\n", disconnectionTimeoutSecs) Eventually(func() string { return common.GetMsvState(env.uuid) @@ -189,10 +244,10 @@ func (env *DisconnectEnv) ReplicaReassignTest() { common.RunFio(env.fioPodName, 20) } -// Common steps required when setting up the test. -// Creates the PVC, deploys fio, determines the nodes used by the volume -// and selects a non-nexus replica to isolate -func Setup(pvcName string, storageClassName string, fioPodName string, disconnectMethod string) DisconnectEnv { +// Common steps required when setting up the test when using a refuge node. +// Creates the PVC, deploys fio on the refuge node, determines the nodes +// used by the volume and selects a non-nexus replica node to isolate. +func SetupWithRefuge(pvcName string, storageClassName string, fioPodName string, disconnectMethod string) DisconnectEnv { env := DisconnectEnv{} env.uuid = common.MkPVC(pvcName, storageClassName) @@ -215,6 +270,34 @@ func Setup(pvcName string, storageClassName string, fioPodName string, disconnec return env } +// Common steps required when setting up the test. +// Creates the PVC, deploys fio, determines the nodes used by the volume +// and selects a non-nexus replica node to isolate +func Setup(pvcName string, storageClassName string, fioPodName string) DisconnectEnv { + env := DisconnectEnv{} + + env.uuid = common.MkPVC(pvcName, storageClassName) + env.volToDelete = pvcName + env.storageClass = storageClassName + env.disconnectMethod = "" + + podObj := common.CreateFioPodDef(fioPodName, pvcName) + _, err := common.CreatePod(podObj) + Expect(err).ToNot(HaveOccurred()) + + fmt.Printf("waiting for fio\n") + Eventually(func() bool { + return common.FioReadyPod() + }, + defTimeoutSecs, // timeout + "1s", // polling interval + ).Should(Equal(true)) + env.fioPodName = fioPodName + + env.nodeToIsolate, env.otherNodes = getNodes(env.uuid) + return env +} + // Common steps required when tearing down the test func (env *DisconnectEnv) Teardown() { if env.fioPodName != "" { diff --git a/test/e2e/node_disconnect/lib/node_disconnect_setup.go b/test/e2e/node_disconnect/lib/node_disconnect_setup.go index 03aa23e6e..7fa3fbbc7 100644 --- a/test/e2e/node_disconnect/lib/node_disconnect_setup.go +++ b/test/e2e/node_disconnect/lib/node_disconnect_setup.go @@ -11,6 +11,10 @@ import ( const mayastorRegexp = "^mayastor-.....$" const moacRegexp = "^moac-..........-.....$" const namespace = "mayastor" +const engineLabel = "openebs.io/engine" +const mayastorLabel = "mayastor" +const refugeLabel = "openebs.io/podrefuge" +const refugeLabelValue = "true" const timeoutSeconds = 100 // DisconnectSetup @@ -30,8 +34,8 @@ func DisconnectSetup() { for i, node := range nodeList { if i == refugeIndex { refugeNode = node.NodeName - common.UnlabelNode(refugeNode, "openebs.io/engine") - common.LabelNode(refugeNode, "openebs.io/podrefuge=true") + common.UnlabelNode(refugeNode, engineLabel) + common.LabelNode(refugeNode, refugeLabel, refugeLabelValue) } } Expect(refugeNode).NotTo(Equal("")) @@ -40,7 +44,7 @@ func DisconnectSetup() { // Update moac to ensure it stays on the refuge node (even if it currently is) fmt.Printf("apply moac node selector for node \"%s\"\n", refugeNode) - common.ApplyNodeSelectorToDeployment("moac", namespace, "openebs.io/podrefuge", "true") + common.ApplyNodeSelectorToDeployment("moac", namespace, refugeLabel, refugeLabelValue) // if not already on the refuge node if moacOnRefugeNode == false { @@ -91,8 +95,8 @@ func DisconnectTeardown() { // apply/remove the labels whether present or not // An error will not occur if the label is already present/absent for _, node := range nodeList { - common.LabelNode(node.NodeName, "openebs.io/engine=mayastor") - common.UnlabelNode(node.NodeName, "openebs.io/podrefuge") + common.LabelNode(node.NodeName, engineLabel, mayastorLabel) + common.UnlabelNode(node.NodeName, refugeLabel) } fmt.Printf("remove moac node affinity\n") diff --git a/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go b/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go index 0b42aaa63..185d3d106 100644 --- a/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go +++ b/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go @@ -40,39 +40,39 @@ var _ = Describe("Mayastor replica disconnection test", func() { }) It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { - env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) env.LossTest() env.Teardown() }) It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables REJECT)", func() { - env = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", reject) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", reject) env.LossTest() env.Teardown() }) if run_drop { It("should verify nvmf nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { - env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", drop) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", drop) env.LossTest() env.Teardown() }) It("should verify iscsi nexus behaviour when a node becomes inaccessible (iptables DROP)", func() { - env = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", drop) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", drop) env.LossTest() env.Teardown() }) } It("should verify nvmf nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { - env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) env.LossWhenIdleTest() env.Teardown() }) It("should verify iscsi nexus behaviour when a node becomes inaccessible when no IO is received (iptables REJECT)", func() { - env = disconnect_lib.Setup("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", reject) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-iscsi", "mayastor-iscsi-2", "fio", reject) env.LossWhenIdleTest() env.Teardown() }) diff --git a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go new file mode 100644 index 000000000..669cdf474 --- /dev/null +++ b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go @@ -0,0 +1,53 @@ +package replica_pod_remove_test + +import ( + "e2e-basic/common" + disconnect_lib "e2e-basic/node_disconnect/lib" + + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var env disconnect_lib.DisconnectEnv +var gStorageClass string = "" + +func TestMayastorPodLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Replica pod removal tests") +} + +var _ = Describe("Mayastor replica pod removal test", func() { + + It("should define the storage class to use", func() { + common.MkStorageClass("mayastor-nvmf-3", 3, "nvmf", "io.openebs.csi-mayastor") + gStorageClass = "mayastor-nvmf-3" + }) + + It("should verify nvmf nexus behaviour when a mayastor pod is removed", func() { + env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-3", "fio") + env.PodLossTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + + env.UnsuppressMayastorPod() + env.Teardown() // removes fio pod and volume + + if gStorageClass != "" { + common.RmStorageClass(gStorageClass) + } + common.TeardownTestEnv() +}) diff --git a/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go b/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go index e21e306e8..680bdb289 100644 --- a/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go +++ b/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go @@ -36,7 +36,7 @@ var _ = Describe("Mayastor replica reassignment test", func() { }) It("should verify nvmf nexus repair of volume when a node becomes inaccessible", func() { - env = disconnect_lib.Setup("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) + env = disconnect_lib.SetupWithRefuge("loss-test-pvc-nvmf", "mayastor-nvmf-2", "fio", reject) env.ReplicaReassignTest() env.Teardown() }) diff --git a/test/e2e/node_disconnect/test.sh b/test/e2e/node_disconnect/test.sh deleted file mode 100755 index 3f64cf1bb..000000000 --- a/test/e2e/node_disconnect/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e - -cd "$(dirname ${BASH_SOURCE[0]})" - -timeout=1000 - -(cd replica_disconnect && go test -timeout "${timeout}s") - -# the following test requires a cluster with at least 4 nodes -(cd replica_reassign && go test -timeout "${timeout}s") From 556ccec576b46a98d687f5bce6ccc47cc759fcb9 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Wed, 6 Jan 2021 14:21:20 +0000 Subject: [PATCH 50/85] ci: check for mayastor pod restarts when running e2e-tests CAS-612 Add script which checks for mayastor pod restarts, invoke it after every test run --- scripts/e2e-test.sh | 4 ++++ scripts/e2e_check_pod_restarts.sh | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100755 scripts/e2e_check_pod_restarts.sh diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 9a225b73f..45b487053 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -73,6 +73,10 @@ for dir in $TESTS; do test_failed=1 break fi + if ! ("$SCRIPTDIR"/e2e_check_pod_restarts.sh) ; then + test_failed=1 + break + fi done # must always run uninstall test in order to clean up the cluster diff --git a/scripts/e2e_check_pod_restarts.sh b/scripts/e2e_check_pod_restarts.sh new file mode 100755 index 000000000..6befff3cf --- /dev/null +++ b/scripts/e2e_check_pod_restarts.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -e + +# typical output for kubectl get pods -n mayastor is, +# collect the restart values +#NAME READY STATUS RESTARTS AGE +#mayastor-4xg7x 1/1 Running 0 124m +#mayastor-csi-6746c 2/2 Running 0 124m +#mayastor-csi-pdwjp 2/2 Running 0 124m +#mayastor-lzr5n 1/1 Running 0 124m +restarts=$(kubectl get pods -n mayastor | grep -e mayastor -e moac | awk '{print $4}') +for num in $restarts +do + if [ "$num" -ne "0" ]; then + exit 255 + fi +done +exit 0 From 46a0d02316472615f73b8014277995bbc1d8a2c6 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Wed, 6 Jan 2021 13:40:06 +0000 Subject: [PATCH 51/85] ci: collect cluster state and pod logs on e2e test failure CAS-611 Add scripts/e2e-cluster-dump.sh, invoke from scripts/e2e-test.sh on test failures --- scripts/e2e-cluster-dump.sh | 182 ++++++++++++++++++++++++++++++++++++ scripts/e2e-test.sh | 10 +- 2 files changed, 191 insertions(+), 1 deletion(-) create mode 100755 scripts/e2e-cluster-dump.sh diff --git a/scripts/e2e-cluster-dump.sh b/scripts/e2e-cluster-dump.sh new file mode 100755 index 000000000..6286b96a3 --- /dev/null +++ b/scripts/e2e-cluster-dump.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash + +help() { + cat < Location to store log files + --clusteronly Only generate cluster information + +If --path is not specified the data is dumped to stdout +EOF +} + +function cluster-get { + echo "#-- PODS mayastor* --------------------" + # csi tests creates relevant namespaces containing mayastor + mns=$(kubectl get ns | grep mayastor | sed -e "s/ .*//") + for ns in $mns + do + kubectl -n "$ns" -o wide get pods --sort-by=.metadata.creationTimestamp + done + echo "#-- PODS ------------------------------" + kubectl get -o wide pods --sort-by=.metadata.creationTimestamp + echo "#-- PVCS ------------------------------" + kubectl get pvc --sort-by=.metadata.creationTimestamp + echo "#-- PV --------------------------------" + kubectl get pv --sort-by=.metadata.creationTimestamp + echo "#-- Storage Classes -------------------" + kubectl get sc --sort-by=.metadata.creationTimestamp + echo "#-- Mayastor Pools --------------------" + kubectl -n mayastor get msp --sort-by=.metadata.creationTimestamp + echo "#-- Mayastor Volumes ------------------" + kubectl -n mayastor get msv --sort-by=.metadata.creationTimestamp + echo "#-- Mayastor Nodes --------------------" + kubectl -n mayastor get msn --sort-by=.metadata.creationTimestamp + echo "#-- K8s Nodes -----------------------------" + kubectl get nodes -o wide --show-labels +} + +function cluster-describe { + echo "#-- PODS mayastor* --------------------" + # csi tests creates relevant namespaces containing mayastor + mns=$(kubectl get ns | grep mayastor | sed -e "s/ .*//") + for ns in $mns + do + kubectl -n "$ns" describe pods + done + echo "#-- PODS ------------------------------" + kubectl describe pods + echo "#-- PVCS ------------------------------" + kubectl describe pvc + echo "#-- PV --------------------------------" + kubectl describe pv + echo "#-- Storage Classes -------------------" + kubectl describe sc + echo "#-- Mayastor Pools --------------------" + kubectl -n mayastor describe msp + echo "#-- Mayastor Volumes ------------------" + kubectl -n mayastor describe msv + echo "#-- Mayastor Nodes --------------------" + kubectl -n mayastor describe msn + echo "#-- K8s Nodes -----------------------------" + kubectl describe nodes +} + +function logs-csi-containers { + mayastor_csipods=$(kubectl -n mayastor get pods | grep mayastor-csi | sed -e 's/ .*//') + for pod in $mayastor_csipods + do + echo "# $pod csi-driver-registrar $* ---------------------------------" + kubectl -n mayastor logs "$@" "$pod" csi-driver-registrar + done + + moacpod=$(kubectl -n mayastor get pods | grep moac | sed -e 's/ .*//') + echo "# $moacpod csi-provisioner $* ---------------------------------" + kubectl -n mayastor logs "$@" "$moacpod" csi-provisioner + echo "# $moacpod csi-attacher $* ---------------------------------" + kubectl -n mayastor logs "$@" "$moacpod" csi-attacher +} + +function logs-csi-mayastor { + mayastor_csipods=$(kubectl -n mayastor get pods | grep mayastor-csi | sed -e 's/ .*//') + for pod in $mayastor_csipods + do + echo "# $pod mayastor-csi $* ---------------------------------" + kubectl -n mayastor logs "$@" "$pod" mayastor-csi + done +} + +function logs-mayastor { + mayastor_pods=$(kubectl -n mayastor get pods | grep mayastor | grep -v mayastor-csi | sed -e 's/ .*//') + for pod in $mayastor_pods + do + echo "# $pod mayastor $* ---------------------------------" + kubectl -n mayastor logs "$@" "$pod" mayastor + done +} + +function logs-moac { + moacpod=$(kubectl -n mayastor get pods | grep moac | sed -e 's/ .*//') + echo "# $moacpod moac $* ---------------------------------" + kubectl -n mayastor logs "$@" "$moacpod" moac +} + +# $1 = podlogs, 0 => do not generate pod logs +function dump-to-stdout { + if [ "$1" -ne 0 ]; then + logs-moac + logs-mayastor + logs-csi-mayastor + logs-csi-containers + + logs-moac -p + logs-mayastor -p + logs-csi-mayastor -p + logs-csi-containers -p + fi + + cluster-get + cluster-describe +} + +# $1 = podlogs, 0 => do not generate pod logs +# $2 = dest mkdir $dest and generate logs there. +function dump-to-dir { + dest="$2" + echo "Generating logs in $dest" + mkdir -p "$dest" + if [ "$1" -ne 0 ]; then + logs-moac >& "$dest/moac.log" + logs-mayastor >& "$dest/mayastor.log" + logs-csi-mayastor >& "$dest/csi-mayastor.log" + logs-csi-containers >& "$dest/csi-containers.log" + + logs-moac -p >& "$dest/moac.previous.log" + logs-mayastor -p >& "$dest/mayastor.previous.log" + logs-csi-mayastor -p >& "$dest/csi-mayastor.previous.log" + logs-csi-containers -p >& "$dest/csi-containers.previous.log" + fi + + cluster-get >& "$dest/cluster.get.txt" + cluster-describe >& "$dest/cluster.describe.txt" +} + +# $1 = podlogs, 0 => do not generate pod logs +# $2 = [destdir] undefined => dump to stdout, +# otherwise generate log files in $destdir +function dump { + if [ -z "$2" ]; then + dump-to-stdout "$1" + else + dump-to-dir "$1" "$2" + fi +} + +podlogs=1 +destdir= + +# Parse arguments +while [ "$#" -gt 0 ]; do + case "$1" in + -p|--path) + shift + destdir="$1" + ;; + -c|--clusteronly) + podlogs=0 + ;; + *) + echo "Unknown option: $1" + help + exit 1 + ;; + esac + shift +done + +# @here dump to stdout +dump "$podlogs" "$destdir" diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index d7fe02b51..6ce0eb26f 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -79,9 +79,17 @@ for dir in $TESTS; do fi done +if [ -n "$test_failed" ]; then + # "$SCRIPTDIR"/e2e-cluster-dump.sh --path "/tmp/e2e-fail-logs" + "$SCRIPTDIR"/e2e-cluster-dump.sh +fi + # must always run uninstall test in order to clean up the cluster cd "$SCRIPTDIR/../test/e2e/uninstall" -go test +if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0 ; then + "$SCRIPTDIR"/e2e-cluster-dump.sh --clusteronly + # "$SCRIPTDIR"/e2e-cluster-dump.sh --path /tmp/uninstall-fail-logs +fi if [ -n "$test_failed" ]; then exit 1 From 4fc1d1daa3f032eebaf79728646b398445b9357a Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 7 Jan 2021 09:18:14 +0000 Subject: [PATCH 52/85] Review fix --- scripts/e2e-cluster-dump.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/e2e-cluster-dump.sh b/scripts/e2e-cluster-dump.sh index 6286b96a3..575775254 100755 --- a/scripts/e2e-cluster-dump.sh +++ b/scripts/e2e-cluster-dump.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -e + help() { cat < Location to store log files + --destdir Location to store log files --clusteronly Only generate cluster information -If --path is not specified the data is dumped to stdout +If --destdir is not specified the data is dumped to stdout EOF } @@ -71,7 +73,7 @@ function logs-csi-containers { for pod in $mayastor_csipods do echo "# $pod csi-driver-registrar $* ---------------------------------" - kubectl -n mayastor logs "$@" "$pod" csi-driver-registrar + kubectl -n mayastor logs "$@" "$pod" csi-driver-registrar done moacpod=$(kubectl -n mayastor get pods | grep moac | sed -e 's/ .*//') @@ -162,7 +164,7 @@ destdir= # Parse arguments while [ "$#" -gt 0 ]; do case "$1" in - -p|--path) + -d|--destdir) shift destdir="$1" ;; From f9bffecd1feef6bc8d65947c422f69b745f597c0 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Thu, 7 Jan 2021 09:47:15 +0000 Subject: [PATCH 53/85] Another review fix --- scripts/e2e-test.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 6ce0eb26f..7d7173419 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -80,7 +80,6 @@ for dir in $TESTS; do done if [ -n "$test_failed" ]; then - # "$SCRIPTDIR"/e2e-cluster-dump.sh --path "/tmp/e2e-fail-logs" "$SCRIPTDIR"/e2e-cluster-dump.sh fi @@ -88,7 +87,6 @@ fi cd "$SCRIPTDIR/../test/e2e/uninstall" if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0 ; then "$SCRIPTDIR"/e2e-cluster-dump.sh --clusteronly - # "$SCRIPTDIR"/e2e-cluster-dump.sh --path /tmp/uninstall-fail-logs fi if [ -n "$test_failed" ]; then From 1d3e114e9505ae306336fc8ebf219f8f9331a25e Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Thu, 7 Jan 2021 11:57:29 +0100 Subject: [PATCH 54/85] perf(tests): set npm jobs to cpu count --- scripts/grpc-test.sh | 9 +++++---- scripts/moac-test.sh | 5 +++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/scripts/grpc-test.sh b/scripts/grpc-test.sh index 097a7ed92..080356030 100755 --- a/scripts/grpc-test.sh +++ b/scripts/grpc-test.sh @@ -1,15 +1,16 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash set -euxo pipefail -export PATH=$PATH:${HOME}/.cargo/bin +export PATH="$PATH:${HOME}/.cargo/bin" +export npm_config_jobs=$(nproc) cargo build --all -cd test/grpc +cd "$(dirname "$0")/../test/grpc" npm install for ts in cli replica nexus csi rebuild snapshot nats; do ./node_modules/mocha/bin/mocha test_${ts}.js \ --reporter ./multi_reporter.js \ --reporter-options reporters="xunit spec",output=../../${ts}-xunit-report.xml -done \ No newline at end of file +done diff --git a/scripts/moac-test.sh b/scripts/moac-test.sh index 0889476f9..08c326708 100755 --- a/scripts/moac-test.sh +++ b/scripts/moac-test.sh @@ -1,8 +1,9 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash set -euxo pipefail -cd csi/moac +cd "$(dirname "$0")/../csi/moac" +export npm_config_jobs=$(nproc) npm install npm run prepare npm run compile From c57363290af4c22b703ecc2c5cea8450557170a9 Mon Sep 17 00:00:00 2001 From: Jeffry Molanus Date: Wed, 6 Jan 2021 16:15:22 +0100 Subject: [PATCH 55/85] feat(poller): want poller abstraction This change adds a safe abstraction around the poller interface. We did not use pollers much (if not at all) but now that we need to construct, among others our own qpairs we need to create them often and frequently. A small trampoline function (which is always inlined) is used to avoid passing around raw pointers. The poller will execute a closure and allows us to capture variables so there is no need for boxing arguments. Implements CAS-610 --- mayastor/src/core/mod.rs | 2 + mayastor/src/core/poller.rs | 148 ++++++++++++++++++++++++++++++++++++ mayastor/tests/poller.rs | 86 +++++++++++++++++++++ 3 files changed, 236 insertions(+) create mode 100644 mayastor/src/core/poller.rs create mode 100644 mayastor/tests/poller.rs diff --git a/mayastor/src/core/mod.rs b/mayastor/src/core/mod.rs index fecf51bbc..c382dbe37 100644 --- a/mayastor/src/core/mod.rs +++ b/mayastor/src/core/mod.rs @@ -33,10 +33,12 @@ mod env; mod handle; pub mod io_driver; mod nvme; +pub mod poller; mod reactor; mod share; pub(crate) mod thread; mod uuid; + #[derive(Debug, Snafu, Clone)] #[snafu(visibility = "pub")] pub enum CoreError { diff --git a/mayastor/src/core/poller.rs b/mayastor/src/core/poller.rs new file mode 100644 index 000000000..54c6ef80e --- /dev/null +++ b/mayastor/src/core/poller.rs @@ -0,0 +1,148 @@ +use std::{ + ffi::{c_void, CString}, + ptr::NonNull, + time::Duration, +}; + +use spdk_sys::{ + spdk_poller, + spdk_poller_pause, + spdk_poller_register, + spdk_poller_register_named, + spdk_poller_resume, + spdk_poller_unregister, +}; + +/// structure holding our function and context +struct PollCtx<'a>(Box i32 + 'a>); + +/// indirection to avoid raw pointers at upper layers +#[inline(always)] +extern "C" fn _cb(ctx: *mut c_void) -> i32 { + let poll = unsafe { &mut *(ctx as *mut PollCtx) }; + (poll.0)() +} + +/// Poller structure that allows us to pause, stop, resume periodic tasks +pub struct Poller<'a> { + inner: NonNull, + ctx: NonNull>, + stopped: bool, +} + +impl<'a> Poller<'a> { + /// stop the given poller and consumes self + pub fn stop(mut self) { + unsafe { + spdk_poller_unregister(&mut self.inner.as_ptr()); + Box::from_raw(self.ctx.as_ptr()); + self.stopped = true; + } + } + + /// pause the given poller + pub fn pause(&mut self) { + unsafe { + spdk_poller_pause(self.inner.as_ptr()); + } + } + + /// resume the given poller + pub fn resume(&mut self) { + unsafe { + spdk_poller_resume(self.inner.as_ptr()); + } + } +} + +impl<'a> Drop for Poller<'a> { + fn drop(&mut self) { + if !self.stopped { + unsafe { + spdk_poller_unregister(&mut self.inner.as_ptr()); + Box::from_raw(self.ctx.as_ptr()); + } + } + } +} + +/// builder type to create a new poller +pub struct Builder<'a> { + name: Option, + interval: std::time::Duration, + poll_fn: Option i32 + 'a>>, +} + +impl<'a> Default for Builder<'a> { + fn default() -> Self { + Self::new() + } +} + +impl<'a> Builder<'a> { + /// create a new nameless poller that runs every time the thread the poller + /// is created on is polled + pub fn new() -> Self { + Self { + name: None, + interval: Duration::from_micros(0), + poll_fn: None, + } + } + + /// create the poller with a given name + pub fn with_name>>(mut self, name: S) -> Self { + self.name = Some( + CString::new(name) + .expect("poller name is invalid or out of memory"), + ); + self + } + + /// set the interval for the poller in usec + pub fn with_interval(mut self, usec: u64) -> Self { + self.interval = Duration::from_micros(usec); + self + } + + /// set the function for this poller + pub fn with_poll_fn(mut self, poll_fn: impl FnMut() -> i32 + 'a) -> Self { + self.poll_fn = Some(Box::new(poll_fn)); + self + } + + /// build a new poller object + pub fn build(mut self) -> Poller<'a> { + let poll_fn = self + .poll_fn + .take() + .expect("can not start poller without poll function"); + + let ctx = NonNull::new(Box::into_raw(Box::new(PollCtx(poll_fn)))) + .expect("failed to allocate new poller context"); + + let inner = NonNull::new(unsafe { + if self.name.is_none() { + spdk_poller_register( + Some(_cb), + ctx.as_ptr().cast(), + self.interval.as_micros() as u64, + ) + } else { + spdk_poller_register_named( + Some(_cb), + ctx.as_ptr().cast(), + self.interval.as_micros() as u64, + self.name.as_ref().unwrap().as_ptr(), + ) + } + }) + .expect("failed to register poller"); + + Poller { + inner, + ctx, + stopped: false, + } + } +} diff --git a/mayastor/tests/poller.rs b/mayastor/tests/poller.rs new file mode 100644 index 000000000..84240ba7e --- /dev/null +++ b/mayastor/tests/poller.rs @@ -0,0 +1,86 @@ +use crossbeam::atomic::AtomicCell; +use once_cell::sync::Lazy; + +use mayastor::core::{ + mayastor_env_stop, + poller, + MayastorCliArgs, + MayastorEnvironment, + Reactors, +}; + +pub mod common; + +static COUNT: Lazy> = Lazy::new(|| AtomicCell::new(0)); + +fn test_fn(a: u32, b: u32) -> u32 { + a + b +} + +#[test] +fn poller() { + common::mayastor_test_init(); + MayastorEnvironment::new(MayastorCliArgs::default()).init(); + + let args = (1, 2); + let poller = poller::Builder::new() + .with_interval(0) + .with_poll_fn(move || { + println!("and a {} and {}", args.0, args.1); + let mut count = COUNT.load(); + count += 1; + COUNT.store(count); + 0 + }) + .build(); + + drop(poller); + Reactors::master().poll_once(); + + // we dropped the poller before we polled, the value should still be 0 + assert_eq!(COUNT.load(), 0); + + let args = (1, 2); + let mut poller = poller::Builder::new() + .with_interval(0) + .with_poll_fn(move || { + let count = COUNT.load(); + println!("and a {} and {} (count: {}) ", args.0, args.1, count); + COUNT.store(count + 1); + 0 + }) + .build(); + + Reactors::master().poll_times(64); + assert_eq!(COUNT.load(), 64); + + poller.pause(); + Reactors::master().poll_times(64); + assert_eq!(COUNT.load(), 64); + + poller.resume(); + Reactors::master().poll_times(64); + assert_eq!(COUNT.load(), 128); + + // poller stop consumes self + poller.stop(); + + Reactors::master().poll_times(64); + assert_eq!(COUNT.load(), 128); + + // demonstrate we keep state during callbacks + let mut ctx_state = 0; + let poller = poller::Builder::new() + .with_interval(0) + .with_poll_fn(move || { + ctx_state += test_fn(1, 2); + dbg!(ctx_state); + 0 + }) + .build(); + + Reactors::master().poll_times(64); + drop(poller); + + mayastor_env_stop(0); +} From f11a4365e546d86d741de43d73335dd713f3d7a9 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Fri, 8 Jan 2021 10:56:54 +0000 Subject: [PATCH 56/85] test: disable e2e tests pending investigation --- scripts/e2e-test.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 7d7173419..a73a16695 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash +# e2e tests disabled until we can make them more reliable +exit 0 + set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") From ab0a0caebef3a5a6bed33f3d6a2ef88a57edd0fa Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Sun, 10 Jan 2021 05:17:38 -0800 Subject: [PATCH 57/85] feat: configure for direnv via envrc Signed-off-by: Ana Hobden --- .envrc | 1 + 1 file changed, 1 insertion(+) create mode 100644 .envrc diff --git a/.envrc b/.envrc new file mode 100644 index 000000000..1d953f4bd --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use nix From dcfae9d4faaae1fbd13f5b2c0cfd81321d1f9921 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Thu, 7 Jan 2021 10:59:22 +0000 Subject: [PATCH 58/85] fix(replica): delay adding replica Adding a replica to an unpublished nexus was causing the child to be added as online rather than degraded when the volume was subsequently published. Because of this a rebuild was not run and data corruption could occur when reading from the newly added child. This change delays the addition of a replica until a volume is published; ensuring the child is added as degraded and rebuilt accordingly. Also added a new "replica_test" E2E test. --- csi/moac/volume.ts | 24 ++-------- scripts/e2e-test.sh | 3 +- test/e2e/replica/README.md | 21 ++++++++ test/e2e/replica/replica_test.go | 82 ++++++++++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 20 deletions(-) create mode 100644 test/e2e/replica/README.md create mode 100644 test/e2e/replica/replica_test.go diff --git a/csi/moac/volume.ts b/csi/moac/volume.ts index af6f25846..4dedc7d4b 100644 --- a/csi/moac/volume.ts +++ b/csi/moac/volume.ts @@ -244,20 +244,6 @@ export class Volume { return; } - // check number of replicas for the volume - const newReplicaCount = this.replicaCount - Object.values(this.replicas).length; - if (newReplicaCount > 0) { - this._setState(VolumeState.Degraded); - try { - await this._createReplicas(newReplicaCount); - } catch (err) { - logError(err); - } - // New replicas will be added to the volume through events. On next fsa - // enter they will be there and we may continue beyound this point then. - return; - } - if (!this.publishedOn) { // If the volume hasn't been published we can't do anything more than what // we have done (that is maintain required # of replicas). When we create @@ -329,9 +315,9 @@ export class Volume { } // pair nexus children with replica objects to get the full picture - const childReplicaPairs: {ch: Child, r: Replica | undefined}[] = this.nexus.children.map((ch) => { + const childReplicaPairs: { ch: Child, r: Replica | undefined }[] = this.nexus.children.map((ch) => { const r = Object.values(this.replicas).find((r) => r.uri === ch.uri); - return {ch, r}; + return { ch, r }; }); // add newly found replicas to the nexus (one by one) const newReplicas = Object.values(this.replicas).filter((r) => { @@ -421,7 +407,7 @@ export class Volume { if (!rmPair && onlineCount > this.replicaCount) { // The replica with the lowest score must go away const rmReplica = this._prioritizeReplicas( - childReplicaPairs + childReplicaPairs .map((pair) => pair.r) .filter((r) => r !== undefined) ).pop(); @@ -541,7 +527,7 @@ export class Volume { if (pools.length < count) { log.error( `No suitable pool(s) for volume "${this}" with capacity ` + - `${this.requiredBytes} and replica count ${this.replicaCount}` + `${this.requiredBytes} and replica count ${this.replicaCount}` ); throw new GrpcError( GrpcCode.RESOURCE_EXHAUSTED, @@ -604,7 +590,7 @@ export class Volume { // @param {object} replica Replica object. // @returns {number} Score from 0 to 18. // - _scoreReplica (replica: Replica) { + _scoreReplica(replica: Replica) { let score = 0; const node = replica.pool!.node; diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index a73a16695..a0df8bff3 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -6,7 +6,8 @@ exit 0 set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") -TESTS="install basic_volume_io node_disconnect/replica_pod_remove" +# new tests should be added before the replica_pod_remove test +TESTS="install basic_volume_io replica node_disconnect/replica_pod_remove" DEVICE= REGISTRY= TAG= diff --git a/test/e2e/replica/README.md b/test/e2e/replica/README.md new file mode 100644 index 000000000..1defde3a2 --- /dev/null +++ b/test/e2e/replica/README.md @@ -0,0 +1,21 @@ +## Pre-requisites for this test + +* A Kubernetes cluster with at least 3 nodes, with mayastor installed. + +## Overview +The tests in this folder are for testing the behaviour of the replicas in a Kubernetes environment. + +## Test Descriptions + +### replica test +The purpose of this test is to ensure that a replica can be correctly added to an unpublished nexus. + +To run: +```bash +go test replica_test.go +``` + +To run with verbose output: +```bash +go test -v replica_test.go +``` \ No newline at end of file diff --git a/test/e2e/replica/replica_test.go b/test/e2e/replica/replica_test.go new file mode 100644 index 000000000..43d41a6a3 --- /dev/null +++ b/test/e2e/replica/replica_test.go @@ -0,0 +1,82 @@ +package replica_test + +import ( + "e2e-basic/common" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + pvcName = "replica-test-pvc" + storageClass = "mayastor-nvmf" +) + +const fioPodName = "fio" + +func addUnpublishedReplicaTest() { + // Create a PVC + common.MkPVC(pvcName, storageClass) + pvc, err := common.GetPVC(pvcName) + Expect(err).To(BeNil()) + Expect(pvc).ToNot(BeNil()) + + timeout := "90s" + pollPeriod := "1s" + + // Add another child before publishing the volume. + uuid := string(pvc.ObjectMeta.UID) + common.UpdateNumReplicas(uuid, 2) + repl, err := common.GetNumReplicas(uuid) + Expect(err).To(BeNil()) + Expect(repl).Should(Equal(int64(2))) + + // Use the PVC and wait for the volume to be published + common.CreateFioPod(fioPodName, pvcName) + Eventually(func() bool { return common.IsVolumePublished(uuid) }, timeout, pollPeriod).Should(Equal(true)) + + getChildrenFunc := func(uuid string) []common.NexusChild { + children, err := common.GetChildren(uuid) + if err != nil { + panic("Failed to get children") + } + Expect(len(children)).Should(Equal(2)) + return children + } + + // Check the added child and nexus are both degraded. + Eventually(func() string { return getChildrenFunc(uuid)[1].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_DEGRADED")) + Eventually(func() (string, error) { return common.GetNexusState(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo("NEXUS_DEGRADED")) + + // Check everything eventually goes healthy following a rebuild. + Eventually(func() string { return getChildrenFunc(uuid)[0].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) + Eventually(func() string { return getChildrenFunc(uuid)[1].State }, timeout, pollPeriod).Should(BeEquivalentTo("CHILD_ONLINE")) + Eventually(func() (string, error) { return common.GetNexusState(uuid) }, timeout, pollPeriod).Should(BeEquivalentTo("NEXUS_ONLINE")) +} + +func TestReplica(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Replica Test Suite") +} + +var _ = Describe("Mayastor replica tests", func() { + It("should test the addition of a replica to an unpublished volume", func() { + addUnpublishedReplicaTest() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + common.SetupTestEnv() + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + common.DeletePod(fioPodName) + common.RmPVC(pvcName, storageClass) + common.TeardownTestEnv() +}) From 81489092eb41e03bc77a96281512baff0a24d986 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Wed, 6 Jan 2021 13:36:47 +0000 Subject: [PATCH 59/85] feat(js): use new version of grpc-uds with cached binary 0.1.6 has a cached prebuilt binary on github so it is downloaded if available instead of having to build grpc library each time the module is installed. Unfortunately, with nix that does not work. More over mayastor tests still depend on official node-grpc package so the benefit of this change is not as anticipated. That said the following cases were improved: 1. when running moac unit tests, npm install will be much faster 2. when running grpc api tests, we have to build grpc just once (the official one) instead of twice as before. --- csi/moac/node-packages.nix | 30 +-- csi/moac/package-lock.json | 424 +++++++++++++++++++++++++++++++++++- csi/moac/package.json | 2 +- nix/pkgs/images/default.nix | 5 + test/grpc/package-lock.json | 412 ++++++++++++++++++++++++++++++++++- test/grpc/package.json | 2 +- 6 files changed, 846 insertions(+), 29 deletions(-) diff --git a/csi/moac/node-packages.nix b/csi/moac/node-packages.nix index 1b87e6c30..cafe14fa2 100644 --- a/csi/moac/node-packages.nix +++ b/csi/moac/node-packages.nix @@ -2020,13 +2020,13 @@ let sha512 = "4BBXHXb5OjjBh7luylu8vFqL6H6aPn/LeqpQaSBeRzO/Xv95wHW/WkU9TJRqaCTMZ5wq9jTSvlJWp0vRJy1pVA=="; }; }; - "grpc-uds-0.1.4" = { + "grpc-uds-0.1.6" = { name = "grpc-uds"; packageName = "grpc-uds"; - version = "0.1.4"; + version = "0.1.6"; src = fetchurl { - url = "https://registry.npmjs.org/grpc-uds/-/grpc-uds-0.1.4.tgz"; - sha512 = "AzSJ8SscZuCmqZLyS7i/UbutJDuAkPnfN7wWZzkW7TA+xi7T1g2G7duYc/bgwhB4aTi/RwUs7KemJpKA4W5ZOw=="; + url = "https://registry.npmjs.org/grpc-uds/-/grpc-uds-0.1.6.tgz"; + sha512 = "l7sxZFjrdm6C7e0OHcyclrQGgaFQUxHD4jA93h9jnIzAw0NmJk+2xvnEe5chC8BOVv9cAVWKGPQyyOAzJoafwA=="; }; }; "har-schema-2.0.0" = { @@ -3019,13 +3019,13 @@ let sha512 = "sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="; }; }; - "nan-2.14.1" = { + "nan-2.14.2" = { name = "nan"; packageName = "nan"; - version = "2.14.1"; + version = "2.14.2"; src = fetchurl { - url = "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz"; - sha512 = "isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw=="; + url = "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz"; + sha512 = "M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ=="; }; }; "nats-2.0.0-209" = { @@ -4819,13 +4819,13 @@ let sha512 = "PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q=="; }; }; - "y18n-3.2.1" = { + "y18n-3.2.2" = { name = "y18n"; packageName = "y18n"; - version = "3.2.1"; + version = "3.2.2"; src = fetchurl { - url = "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz"; - sha1 = "6d15fba884c08679c0d77e88e7759e811e07fa41"; + url = "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz"; + sha512 = "uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ=="; }; }; "y18n-4.0.0" = { @@ -5242,7 +5242,7 @@ let sources."graceful-fs-4.2.4" sources."growl-1.10.5" sources."grpc-promise-1.4.0" - (sources."grpc-uds-0.1.4" // { + (sources."grpc-uds-0.1.6" // { dependencies = [ sources."protobufjs-5.0.3" sources."yargs-3.32.0" @@ -5373,7 +5373,7 @@ let ]; }) sources."ms-2.0.0" - sources."nan-2.14.1" + sources."nan-2.14.2" sources."nats-2.0.0-209" sources."natural-compare-1.4.0" sources."negotiator-0.6.2" @@ -5622,7 +5622,7 @@ let sources."ws-7.4.0" sources."wtfnode-0.8.3" sources."xdg-basedir-4.0.0" - sources."y18n-3.2.1" + sources."y18n-3.2.2" sources."yallist-4.0.0" (sources."yargs-16.0.3" // { dependencies = [ diff --git a/csi/moac/package-lock.json b/csi/moac/package-lock.json index f6e7caf33..2d73a7a2e 100644 --- a/csi/moac/package-lock.json +++ b/csi/moac/package-lock.json @@ -1983,16 +1983,309 @@ "integrity": "sha512-4BBXHXb5OjjBh7luylu8vFqL6H6aPn/LeqpQaSBeRzO/Xv95wHW/WkU9TJRqaCTMZ5wq9jTSvlJWp0vRJy1pVA==" }, "grpc-uds": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/grpc-uds/-/grpc-uds-0.1.4.tgz", - "integrity": "sha512-AzSJ8SscZuCmqZLyS7i/UbutJDuAkPnfN7wWZzkW7TA+xi7T1g2G7duYc/bgwhB4aTi/RwUs7KemJpKA4W5ZOw==", + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/grpc-uds/-/grpc-uds-0.1.6.tgz", + "integrity": "sha512-l7sxZFjrdm6C7e0OHcyclrQGgaFQUxHD4jA93h9jnIzAw0NmJk+2xvnEe5chC8BOVv9cAVWKGPQyyOAzJoafwA==", "requires": { "lodash.camelcase": "^4.3.0", "lodash.clone": "^4.5.0", "nan": "^2.13.2", + "node-pre-gyp": "^0.13.0", "protobufjs": "^5.0.3" }, "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true + }, + "are-we-there-yet": { + "version": "1.1.5", + "bundled": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.1.4", + "bundled": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true + }, + "debug": { + "version": "3.2.7", + "bundled": true, + "requires": { + "ms": "^2.1.1" + } + }, + "deep-extend": { + "version": "0.6.0", + "bundled": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true + }, + "fs-minipass": { + "version": "1.2.7", + "bundled": true, + "requires": { + "minipass": "^2.6.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "glob": { + "version": "7.1.6", + "bundled": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true + }, + "iconv-lite": { + "version": "0.4.24", + "bundled": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore-walk": { + "version": "3.0.3", + "bundled": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "bundled": true + }, + "ini": { + "version": "1.3.8", + "bundled": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.5", + "bundled": true + }, + "minipass": { + "version": "2.9.0", + "bundled": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "minizlib": { + "version": "1.3.3", + "bundled": true, + "requires": { + "minipass": "^2.9.0" + } + }, + "mkdirp": { + "version": "0.5.5", + "bundled": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "ms": { + "version": "2.1.3", + "bundled": true + }, + "needle": { + "version": "2.5.2", + "bundled": true, + "requires": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.13.0", + "bundled": true, + "requires": { + "detect-libc": "^1.0.2", + "mkdirp": "^0.5.1", + "needle": "^2.2.1", + "nopt": "^4.0.1", + "npm-packlist": "^1.1.6", + "npmlog": "^4.0.2", + "rc": "^1.2.7", + "rimraf": "^2.6.1", + "semver": "^5.3.0", + "tar": "^4" + } + }, + "nopt": { + "version": "4.0.3", + "bundled": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "npm-bundled": { + "version": "1.1.1", + "bundled": true, + "requires": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-normalize-package-bin": { + "version": "1.0.1", + "bundled": true + }, + "npm-packlist": { + "version": "1.4.8", + "bundled": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "requires": { + "wrappy": "1" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true + }, + "process-nextick-args": { + "version": "2.0.1", + "bundled": true + }, "protobufjs": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-5.0.3.tgz", @@ -2004,6 +2297,119 @@ "yargs": "^3.10.0" } }, + "rc": { + "version": "1.2.8", + "bundled": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "readable-stream": { + "version": "2.3.7", + "bundled": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.7.1", + "bundled": true, + "requires": { + "glob": "^7.1.3" + } + }, + "safe-buffer": { + "version": "5.1.2", + "bundled": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true + }, + "sax": { + "version": "1.2.4", + "bundled": true + }, + "semver": { + "version": "5.7.1", + "bundled": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true + }, + "signal-exit": { + "version": "3.0.3", + "bundled": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true + }, + "tar": { + "version": "4.4.13", + "bundled": true, + "requires": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true + }, + "wide-align": { + "version": "1.1.3", + "bundled": true, + "requires": { + "string-width": "^1.0.2 || 2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true + }, + "yallist": { + "version": "3.1.1", + "bundled": true + }, "yargs": { "version": "3.32.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz", @@ -2864,9 +3270,9 @@ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "nan": { - "version": "2.14.1", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", - "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==" + "version": "2.14.2", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", + "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==" }, "nats": { "version": "2.0.0-209", @@ -4544,9 +4950,9 @@ "dev": true }, "y18n": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", - "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=" + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz", + "integrity": "sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==" }, "yallist": { "version": "4.0.0", diff --git a/csi/moac/package.json b/csi/moac/package.json index b05bf62f6..f478137aa 100644 --- a/csi/moac/package.json +++ b/csi/moac/package.json @@ -29,7 +29,7 @@ "client-node-fixed-watcher": "^0.13.2", "express": "^4.17.1", "grpc-promise": "^1.4.0", - "grpc-uds": "^0.1.4", + "grpc-uds": "^0.1.6", "js-yaml": "^3.14.0", "lodash": "^4.17.20", "nats": "^2.0.0-27", diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index 60c2e0230..9068bb3cf 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -113,6 +113,11 @@ rec { chmod u+w bin ln -s ${moac.out}/bin/moac bin/moac chmod u-w bin + # workaround for detect-libc npm module unable to detect glibc system + chmod u+w . + mkdir -p usr/sbin + touch usr/sbin/detect-glibc-in-nix-container + chmod u-w . ''; maxLayers = 42; }; diff --git a/test/grpc/package-lock.json b/test/grpc/package-lock.json index b804c188b..037e3c612 100644 --- a/test/grpc/package-lock.json +++ b/test/grpc/package-lock.json @@ -1477,16 +1477,309 @@ "integrity": "sha512-4BBXHXb5OjjBh7luylu8vFqL6H6aPn/LeqpQaSBeRzO/Xv95wHW/WkU9TJRqaCTMZ5wq9jTSvlJWp0vRJy1pVA==" }, "grpc-uds": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/grpc-uds/-/grpc-uds-0.1.4.tgz", - "integrity": "sha512-AzSJ8SscZuCmqZLyS7i/UbutJDuAkPnfN7wWZzkW7TA+xi7T1g2G7duYc/bgwhB4aTi/RwUs7KemJpKA4W5ZOw==", + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/grpc-uds/-/grpc-uds-0.1.6.tgz", + "integrity": "sha512-l7sxZFjrdm6C7e0OHcyclrQGgaFQUxHD4jA93h9jnIzAw0NmJk+2xvnEe5chC8BOVv9cAVWKGPQyyOAzJoafwA==", "requires": { "lodash.camelcase": "^4.3.0", "lodash.clone": "^4.5.0", "nan": "^2.13.2", + "node-pre-gyp": "^0.13.0", "protobufjs": "^5.0.3" }, "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true + }, + "are-we-there-yet": { + "version": "1.1.5", + "bundled": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.1.4", + "bundled": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true + }, + "debug": { + "version": "3.2.7", + "bundled": true, + "requires": { + "ms": "^2.1.1" + } + }, + "deep-extend": { + "version": "0.6.0", + "bundled": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true + }, + "fs-minipass": { + "version": "1.2.7", + "bundled": true, + "requires": { + "minipass": "^2.6.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "glob": { + "version": "7.1.6", + "bundled": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true + }, + "iconv-lite": { + "version": "0.4.24", + "bundled": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore-walk": { + "version": "3.0.3", + "bundled": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "bundled": true + }, + "ini": { + "version": "1.3.8", + "bundled": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.5", + "bundled": true + }, + "minipass": { + "version": "2.9.0", + "bundled": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "minizlib": { + "version": "1.3.3", + "bundled": true, + "requires": { + "minipass": "^2.9.0" + } + }, + "mkdirp": { + "version": "0.5.5", + "bundled": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "ms": { + "version": "2.1.3", + "bundled": true + }, + "needle": { + "version": "2.5.2", + "bundled": true, + "requires": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.13.0", + "bundled": true, + "requires": { + "detect-libc": "^1.0.2", + "mkdirp": "^0.5.1", + "needle": "^2.2.1", + "nopt": "^4.0.1", + "npm-packlist": "^1.1.6", + "npmlog": "^4.0.2", + "rc": "^1.2.7", + "rimraf": "^2.6.1", + "semver": "^5.3.0", + "tar": "^4" + } + }, + "nopt": { + "version": "4.0.3", + "bundled": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "npm-bundled": { + "version": "1.1.1", + "bundled": true, + "requires": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-normalize-package-bin": { + "version": "1.0.1", + "bundled": true + }, + "npm-packlist": { + "version": "1.4.8", + "bundled": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "requires": { + "wrappy": "1" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true + }, + "process-nextick-args": { + "version": "2.0.1", + "bundled": true + }, "protobufjs": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-5.0.3.tgz", @@ -1497,6 +1790,119 @@ "glob": "^7.0.5", "yargs": "^3.10.0" } + }, + "rc": { + "version": "1.2.8", + "bundled": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "readable-stream": { + "version": "2.3.7", + "bundled": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.7.1", + "bundled": true, + "requires": { + "glob": "^7.1.3" + } + }, + "safe-buffer": { + "version": "5.1.2", + "bundled": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true + }, + "sax": { + "version": "1.2.4", + "bundled": true + }, + "semver": { + "version": "5.7.1", + "bundled": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true + }, + "signal-exit": { + "version": "3.0.3", + "bundled": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true + }, + "tar": { + "version": "4.4.13", + "bundled": true, + "requires": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true + }, + "wide-align": { + "version": "1.1.3", + "bundled": true, + "requires": { + "string-width": "^1.0.2 || 2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true + }, + "yallist": { + "version": "3.1.1", + "bundled": true } } }, diff --git a/test/grpc/package.json b/test/grpc/package.json index 624b7067e..57b31aef6 100644 --- a/test/grpc/package.json +++ b/test/grpc/package.json @@ -15,7 +15,7 @@ "grpc-kit": "^0.2.0", "grpc-mock": "^0.7.0", "grpc-promise": "^1.4.0", - "grpc-uds": "^0.1.4", + "grpc-uds": "^0.1.6", "inpath": "^1.0.2", "lodash": "^4.17.19", "mocha": "^7.1.1", From 0b8d18969278f37a58b1191e9d33e269e9cff6b1 Mon Sep 17 00:00:00 2001 From: Arne Rusek Date: Mon, 11 Jan 2021 11:30:13 +0100 Subject: [PATCH 60/85] docs(jenkins): disable password auth We use only keys for ssh auth. Disable password authentication altogether to avoid future mishaps. --- doc/jenkins.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/jenkins.md b/doc/jenkins.md index 77ab0f574..a04b2cfa7 100644 --- a/doc/jenkins.md +++ b/doc/jenkins.md @@ -19,7 +19,11 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). { imports = [ ./hardware.nix ]; - services.openssh.enable = true; + services.openssh = { + enable = true; + passwordAuthentication = false; + challengeResponseAuthentication = false; + }; services.jenkins.enable = true; networking.firewall.enable = false; @@ -141,7 +145,11 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). boot.kernelPackages = pkgs.linuxPackages_5_7; - services.openssh.enable = true; + services.openssh = { + enable = true; + passwordAuthentication = false; + challengeResponseAuthentication = false; + }; services.jenkinsSlave.enable = true; services.iscsid.enable = true; From eeffdf4214e54286c134cefc85f84e1fa91a1615 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 8 Jan 2021 17:28:46 +0000 Subject: [PATCH 61/85] ci(lint): lint commits on pull requests Add github action to lint PR commits and the PR itself --- .github/workflows/pr-commitlint.yml | 22 ++++++++++++++++++++++ commitlint.config.js | 8 ++++++++ 2 files changed, 30 insertions(+) create mode 100644 .github/workflows/pr-commitlint.yml create mode 100644 commitlint.config.js diff --git a/.github/workflows/pr-commitlint.yml b/.github/workflows/pr-commitlint.yml new file mode 100644 index 000000000..8ef3e229b --- /dev/null +++ b/.github/workflows/pr-commitlint.yml @@ -0,0 +1,22 @@ +name: Lint Commit Messages +on: + pull_request: + types: ['opened', 'edited', 'reopened', 'synchronize'] + +jobs: + commitlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Install CommitLint and Dependencies + run: npm install @commitlint/config-conventional @commitlint/cli + - name: Lint Commits + run: | + first_commit=$(curl ${{ github.event.pull_request.commits_url }} 2>/dev/null | jq '.[0].sha' | sed 's/"//g') + last_commit=HEAD^2 # don't lint the merge commit + npx commitlint --from $first_commit~1 --to $last_commit -V + - name: Lint Pull Request + run: echo $'${{ github.event.pull_request.title }}\n\n${{ github.event.pull_request.body }}' | npx commitlint -V + diff --git a/commitlint.config.js b/commitlint.config.js new file mode 100644 index 000000000..c12291346 --- /dev/null +++ b/commitlint.config.js @@ -0,0 +1,8 @@ +module.exports = { + extends: ['@commitlint/config-conventional'], + rules: { + "header-max-length": async () => [2, "always", 50], + "body-max-line-length": async () => [2, "always", 72], + }, + defaultIgnores: false, +} From 98f8a06b84eb61a7fcb44e43371a1eab991d06fe Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 8 Jan 2021 17:29:47 +0000 Subject: [PATCH 62/85] style(lint): lint commit messages Add pre-commit hook to lint commit messages locally --- .pre-commit-config.yaml | 8 ++++++++ shell.nix | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b2c3d171..22f6c0ea9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,3 +30,11 @@ repos: pass_filenames: true types: [file, javascript] language: system + - id: commit-lint + name: Commit Lint + description: Runs commitlint against the commit message. + language: system + entry: bash -c "cat $1 | npx commitlint" + args: [$1] + stages: [commit-msg] + diff --git a/shell.nix b/shell.nix index 0f59be465..6c574895a 100644 --- a/shell.nix +++ b/shell.nix @@ -69,6 +69,6 @@ mkShell { ${pkgs.lib.optionalString (norust) "cowsay ${norust_moth}"} ${pkgs.lib.optionalString (norust) "echo 'Hint: use rustup tool.'"} ${pkgs.lib.optionalString (norust) "echo"} - pre-commit install + pre-commit install --hook commit-msg ''; } From cd64417cd54613778581e8bbcd397046fa5dcd33 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Tue, 8 Dec 2020 12:52:32 +0000 Subject: [PATCH 63/85] fix: fix composer container restart and ... also add logging from containers on panic, tracing for the cargo code and common message bus connect fn to facilitate testing. Composer containers were not restarting because the containers were being created with two networks, the global test and a "fake" one. Address this by using a network with the compose name with defaults to the global one but allow using different networks by making sure that we always clean up networks with our tests labels. --- Cargo.lock | 3 + composer/Cargo.toml | 3 + composer/src/lib.rs | 194 +++++++++++++++++++++++++---- mbus-api/src/lib.rs | 28 ++++- mbus-api/src/mbus_nats.rs | 12 ++ rest/service/src/message_bus/v0.rs | 13 +- rest/tests/v0_test.rs | 20 ++- services/node/src/server.rs | 10 +- 8 files changed, 228 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5457418af..3709e951d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -955,9 +955,12 @@ dependencies = [ "crossbeam", "futures", "ipnetwork", + "mbus_api", "rpc", "tokio", "tonic", + "tracing", + "tracing-subscriber", ] [[package]] diff --git a/composer/Cargo.toml b/composer/Cargo.toml index 315795f46..9783d7d74 100644 --- a/composer/Cargo.toml +++ b/composer/Cargo.toml @@ -14,6 +14,9 @@ crossbeam = "0.7.3" rpc = { path = "../rpc" } ipnetwork = "0.17.0" bollard = "0.8.0" +tracing = "0.1.22" +tracing-subscriber = "0.2.15" +mbus_api = { path = "../mbus-api" } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } diff --git a/composer/src/lib.rs b/composer/src/lib.rs index b61db5dd4..b94e0fe1a 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -13,6 +13,7 @@ use bollard::{ LogsOptions, NetworkingConfig, RemoveContainerOptions, + RestartContainerOptions, StopContainerOptions, }, errors::Error, @@ -35,10 +36,12 @@ use ipnetwork::Ipv4Network; use tonic::transport::Channel; use bollard::models::ContainerInspectResponse; +use mbus_api::TimeoutOptions; use rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, mayastor_client::MayastorClient, }; + pub const TEST_NET_NAME: &str = "mayastor-testing-network"; pub const TEST_NET_NETWORK: &str = "10.1.0.0/16"; #[derive(Clone)] @@ -55,7 +58,7 @@ impl RpcHandle { name: String, endpoint: SocketAddr, ) -> Result { - let mut attempts = 60; + let mut attempts = 40; loop { if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) .is_ok() @@ -96,6 +99,8 @@ impl RpcHandle { pub struct Binary { path: String, arguments: Vec, + nats_arg: String, + env: HashMap, } impl Binary { @@ -132,6 +137,27 @@ impl Binary { self.arguments.extend(args.drain(..).map(|s| s.into())); self } + /// Set the nats endpoint via the provided argument + pub fn with_nats(mut self, arg: &str) -> Self { + self.nats_arg = arg.to_string(); + self + } + /// Add environment variables for the container + pub fn with_env(mut self, key: &str, val: &str) -> Self { + if let Some(old) = self.env.insert(key.into(), val.into()) { + println!("Replaced key {} val {} with val {}", key, old, val); + } + self + } + /// pick up the nats argument name for a particular binary from nats_arg + /// and fill up the nats server endpoint using the network name + fn setup_nats(&mut self, network: &str) { + if !self.nats_arg.is_empty() { + self.arguments.push(self.nats_arg.clone()); + self.arguments.push(format!("nats.{}:4222", network)); + self.nats_arg = String::new(); + } + } fn which(name: &str) -> std::io::Result { let output = std::process::Command::new("which").arg(name).output()?; @@ -141,6 +167,7 @@ impl Binary { Self { path, arguments: args, + ..Default::default() } } } @@ -153,6 +180,9 @@ impl Into> for Binary { } } +const RUST_LOG_DEFAULT: &str = + "debug,actix_web=debug,actix=debug,h2=info,hyper=info,tower_buffer=info,bollard=info,rustls=info"; + /// Specs of the allowed containers include only the binary path /// (relative to src) and the required arguments #[derive(Default, Clone)] @@ -180,7 +210,7 @@ impl ContainerSpec { /// Create new ContainerSpec from name and binary pub fn new(name: &str, binary: Binary) -> Self { let mut env = HashMap::new(); - env.insert("RUST_LOG".to_string(), "debug,h2=info".to_string()); + env.insert("RUST_LOG".to_string(), RUST_LOG_DEFAULT.to_string()); Self { name: name.into(), binary, @@ -192,13 +222,17 @@ impl ContainerSpec { /// Add port mapping from container to host pub fn with_portmap(mut self, from: &str, to: &str) -> Self { let from = format!("{}/tcp", from); - let mut port_map = bollard::service::PortMap::new(); let binding = bollard::service::PortBinding { host_ip: None, host_port: Some(to.into()), }; - port_map.insert(from, Some(vec![binding])); - self.port_map = Some(port_map); + if let Some(pm) = &mut self.port_map { + pm.insert(from, Some(vec![binding])); + } else { + let mut port_map = bollard::service::PortMap::new(); + port_map.insert(from, Some(vec![binding])); + self.port_map = Some(port_map); + } self } /// Add environment key-val, eg for setting the RUST_LOG @@ -209,7 +243,10 @@ impl ContainerSpec { } self } - fn env_to_vec(&self) -> Vec { + + /// Environment variables as a vector with each element as: + /// "{key}={value}" + fn environment(&self) -> Vec { let mut vec = vec![]; self.env.iter().for_each(|(k, v)| { vec.push(format!("{}={}", k, v)); @@ -233,6 +270,10 @@ pub struct Builder { clean: bool, /// destroy existing containers if any prune: bool, + /// run all containers on build + autorun: bool, + /// output container logs on panic + logs_on_panic: bool, } impl Default for Builder { @@ -245,14 +286,22 @@ impl Builder { /// construct a new builder for `[ComposeTest'] pub fn new() -> Self { Self { - name: "".to_string(), + name: TEST_NET_NAME.to_string(), containers: Default::default(), - network: "10.1.0.0".to_string(), + network: "10.1.0.0/16".to_string(), clean: true, prune: true, + autorun: true, + logs_on_panic: true, } } + /// run all containers on build + pub fn autorun(mut self, run: bool) -> Builder { + self.autorun = run; + self + } + /// set the network for this test pub fn network(mut self, network: &str) -> Builder { self.network = network.to_owned(); @@ -279,7 +328,8 @@ impl Builder { } /// add a generic container which runs a local binary - pub fn add_container_bin(self, name: &str, bin: Binary) -> Builder { + pub fn add_container_bin(self, name: &str, mut bin: Binary) -> Builder { + bin.setup_nats(&self.name); self.add_container_spec(ContainerSpec::new(name, bin)) } @@ -289,6 +339,7 @@ impl Builder { self } + /// prune containers and networks on start pub fn with_prune(mut self, enable: bool) -> Builder { self.prune = enable; self @@ -297,13 +348,16 @@ impl Builder { pub async fn build( self, ) -> Result> { + let autorun = self.autorun; let mut compose = self.build_only().await?; - compose.start_all().await?; + if autorun { + compose.start_all().await?; + } Ok(compose) } /// build the config but don't start the containers - pub async fn build_only( + async fn build_only( self, ) -> Result> { let net: Ipv4Network = self.network.parse()?; @@ -335,6 +389,7 @@ impl Builder { label_prefix: "io.mayastor.test".to_string(), clean: self.clean, prune: self.prune, + logs_on_panic: self.logs_on_panic, }; compose.network_id = @@ -386,11 +441,22 @@ pub struct ComposeTest { /// automatically clean up the things we have created for this test clean: bool, pub prune: bool, + /// output container logs on panic + logs_on_panic: bool, } impl Drop for ComposeTest { /// destroy the containers and network. Notice that we use sync code here fn drop(&mut self) { + if thread::panicking() && self.logs_on_panic { + self.containers.keys().for_each(|name| { + tracing::error!("Logs from container '{}':", name); + let _ = std::process::Command::new("docker") + .args(&["logs", name]) + .status(); + }); + } + if self.clean { self.containers.keys().for_each(|c| { std::process::Command::new("docker") @@ -416,18 +482,23 @@ impl ComposeTest { /// name already exists it will be reused. Note that we do not check the /// networking IP and/or subnets async fn network_create(&mut self) -> Result { - let mut net = self.network_list().await?; + let mut net = self.network_list_labeled().await?; if !net.is_empty() { let first = net.pop().unwrap(); - self.network_id = first.id.unwrap(); - return Ok(self.network_id.clone()); + if Some(self.name.clone()) == first.name { + // reuse the same network + self.network_id = first.id.unwrap(); + return Ok(self.network_id.clone()); + } else { + self.network_remove_labeled().await?; + } } let name_label = format!("{}.name", self.label_prefix); // we use the same network everywhere let create_opts = CreateNetworkOptions { - name: TEST_NET_NAME, + name: self.name.as_str(), check_duplicate: true, driver: "bridge", internal: false, @@ -449,12 +520,36 @@ impl ComposeTest { }) } - async fn network_remove(&self) -> Result<(), Error> { + async fn network_remove_labeled(&self) -> Result<(), Error> { + let our_networks = self.network_list_labeled().await?; + for network in our_networks { + let name = &network.name.unwrap(); + self.remove_network_containers(name).await?; + self.network_remove(name).await?; + } + Ok(()) + } + + /// remove all containers from the network + async fn remove_network_containers(&self, name: &str) -> Result<(), Error> { + let containers = self.list_network_containers(name).await?; + for k in &containers { + let name = k.id.clone().unwrap(); + self.remove_container(&name).await?; + while let Ok(_c) = self.docker.inspect_container(&name, None).await + { + tokio::time::delay_for(Duration::from_millis(500)).await; + } + } + Ok(()) + } + + async fn network_remove(&self, name: &str) -> Result<(), Error> { // if the network is not found, its not an error, any other error is // reported as such. Networks can only be destroyed when all containers // attached to it are removed. To get a list of attached // containers, use network_list() - if let Err(e) = self.docker.remove_network(&self.name).await { + if let Err(e) = self.docker.remove_network(name).await { if !matches!(e, Error::DockerResponseNotFoundError{..}) { return Err(e); } @@ -463,17 +558,30 @@ impl ComposeTest { Ok(()) } - /// list all the docker networks - pub async fn network_list(&self) -> Result, Error> { + /// list all the docker networks with our filter + pub async fn network_list_labeled(&self) -> Result, Error> { self.docker .list_networks(Some(ListNetworksOptions { - filters: vec![("name", vec![TEST_NET_NAME])] + filters: vec![("label", vec!["io.mayastor.test.name"])] .into_iter() .collect(), })) .await } + async fn list_network_containers( + &self, + name: &str, + ) -> Result, Error> { + self.docker + .list_containers(Some(ListContainersOptions { + all: true, + filters: vec![("network", vec![name])].into_iter().collect(), + ..Default::default() + })) + .await + } + /// list containers pub async fn list_containers( &self, @@ -518,7 +626,7 @@ impl ComposeTest { tokio::time::delay_for(Duration::from_millis(500)).await; } } - self.network_remove().await?; + self.network_remove(&self.name).await?; Ok(()) } @@ -602,7 +710,7 @@ impl ComposeTest { }, ); - let mut env = spec.env_to_vec(); + let mut env = spec.environment(); env.push(format!("MY_POD_IP={}", ipv4)); let cmd: Vec = spec.into(); @@ -694,6 +802,28 @@ impl ComposeTest { Ok(()) } + /// restart the container + pub async fn restart(&self, name: &str) -> Result<(), Error> { + let id = self.containers.get(name).unwrap(); + if let Err(e) = self + .docker + .restart_container( + id.0.as_str(), + Some(RestartContainerOptions { + t: 3, + }), + ) + .await + { + // where already stopped + if !matches!(e, Error::DockerResponseNotModifiedError{..}) { + return Err(e); + } + } + + Ok(()) + } + /// get the logs from the container. It would be nice to make it implicit /// that is, when you make a rpc call, whatever logs where created due to /// that are returned @@ -808,6 +938,24 @@ impl ComposeTest { pub async fn down(&self) { self.remove_all().await.unwrap(); } + + /// connect to message bus helper for the cargo test code + pub async fn connect_to_bus(&self, name: &str) { + let (_, ip) = self.containers.get(name).unwrap(); + let url = format!("{}", ip); + tokio::time::timeout(std::time::Duration::from_secs(2), async { + mbus_api::message_bus_init_options( + url, + TimeoutOptions::new() + .with_timeout(Duration::from_millis(500)) + .with_timeout_backoff(Duration::from_millis(500)) + .with_max_retries(10), + ) + .await + }) + .await + .unwrap(); + } } #[cfg(test)] @@ -821,7 +969,7 @@ mod tests { .name("composer") .network("10.1.0.0/16") .add_container_spec( - ContainerSpec::new( + ContainerSpec::from_binary( "nats", Binary::from_nix("nats-server").with_arg("-DV"), ) diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs index 81eaed7b6..ef9fb2aa2 100644 --- a/mbus-api/src/lib.rs +++ b/mbus-api/src/lib.rs @@ -13,7 +13,12 @@ pub mod v0; use async_trait::async_trait; use dyn_clonable::clonable; -pub use mbus_nats::{bus, message_bus_init, message_bus_init_tokio}; +pub use mbus_nats::{ + bus, + message_bus_init, + message_bus_init_options, + message_bus_init_tokio, +}; pub use receive::*; pub use send::*; use serde::{Deserialize, Serialize}; @@ -188,10 +193,12 @@ pub type DynBus = Box; /// Timeout for receiving a reply to a request message /// Max number of retries until it gives up -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TimeoutOptions { /// initial request message timeout pub(crate) timeout: std::time::Duration, + /// request message incremental timeout step + pub(crate) timeout_step: std::time::Duration, /// max number of retries following the initial attempt's timeout pub(crate) max_retries: Option, } @@ -200,6 +207,9 @@ impl TimeoutOptions { pub(crate) fn default_timeout() -> Duration { Duration::from_secs(6) } + pub(crate) fn default_timeout_step() -> Duration { + Duration::from_secs(1) + } pub(crate) fn default_max_retries() -> u32 { 6 } @@ -209,6 +219,7 @@ impl Default for TimeoutOptions { fn default() -> Self { Self { timeout: Self::default_timeout(), + timeout_step: Self::default_timeout_step(), max_retries: Some(Self::default_max_retries()), } } @@ -227,10 +238,19 @@ impl TimeoutOptions { self } + /// Timeout multiplied at each iteration + pub fn with_timeout_backoff(mut self, timeout: Duration) -> Self { + self.timeout_step = timeout; + self + } + /// Specify a max number of retries before giving up /// None for unlimited retries - pub fn with_max_retries(mut self, max_retries: Option) -> Self { - self.max_retries = max_retries; + pub fn with_max_retries>>( + mut self, + max_retries: M, + ) -> Self { + self.max_retries = max_retries.into(); self } } diff --git a/mbus-api/src/mbus_nats.rs b/mbus-api/src/mbus_nats.rs index 43a382b97..c2683de16 100644 --- a/mbus-api/src/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -31,6 +31,18 @@ pub async fn message_bus_init(server: String) { .expect("Expect to be initialised only once"); } +/// Initialise the Nats Message Bus with Options +pub async fn message_bus_init_options( + server: String, + timeouts: TimeoutOptions, +) { + let nc = NatsMessageBus::new(&server, BusOptions::new(), timeouts).await; + NATS_MSG_BUS + .set(nc) + .ok() + .expect("Expect to be initialised only once"); +} + /// Get the static `NatsMessageBus` as a boxed `MessageBus` pub fn bus() -> DynBus { Box::new( diff --git a/rest/service/src/message_bus/v0.rs b/rest/service/src/message_bus/v0.rs index 97e511fc3..d2d844d39 100644 --- a/rest/service/src/message_bus/v0.rs +++ b/rest/service/src/message_bus/v0.rs @@ -68,28 +68,23 @@ mod tests { #[tokio::test] async fn bus() -> Result<(), Box> { init_tracing(); - let natsep = format!("nats.{}", TEST_NET_NAME); - let nats_arg = vec!["-n", &natsep]; let mayastor = "node-test-name"; let test = Builder::new() .name("rest_backend") - .network(TEST_NET_NETWORK) .add_container_bin( "nats", Binary::from_nix("nats-server").with_arg("-DV"), ) - .add_container_bin( - "node", - Binary::from_dbg("node").with_args(nats_arg.clone()), - ) + .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) .add_container_bin( "mayastor", Binary::from_dbg("mayastor") - .with_args(nats_arg.clone()) + .with_nats("-n") .with_args(vec!["-N", mayastor]), ) .with_clean(true) - .build_only() + .autorun(false) + .build() .await?; orderly_start(&test).await?; diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index d248dc1f5..f37662f60 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -1,5 +1,4 @@ mod test; -use composer::{TEST_NET_NAME, TEST_NET_NETWORK}; use mbus_api::{ v0::{GetNodes, NodeState}, Message, @@ -33,38 +32,33 @@ async fn orderly_start( async fn client() -> Result<(), Box> { test::init(); - let natsep = format!("nats.{}", TEST_NET_NAME); - let nats_arg = vec!["-n", &natsep]; let mayastor = "node-test-name"; let test = Builder::new() .name("rest") - .network(TEST_NET_NETWORK) .add_container_spec( - ContainerSpec::new( + ContainerSpec::from_binary( "nats", Binary::from_nix("nats-server").with_arg("-DV"), ) .with_portmap("4222", "4222"), ) - .add_container_bin( - "node", - Binary::from_dbg("node").with_args(nats_arg.clone()), - ) + .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) .add_container_spec( - ContainerSpec::new( + ContainerSpec::from_binary( "rest", - Binary::from_dbg("rest").with_args(nats_arg.clone()), + Binary::from_dbg("rest").with_nats("-n"), ) .with_portmap("8080", "8080"), ) .add_container_bin( "mayastor", Binary::from_dbg("mayastor") - .with_args(nats_arg.clone()) + .with_nats("-n") .with_args(vec!["-N", mayastor]), ) .with_clean(true) - .build_only() + .autorun(false) + .build() .await?; let result = client_test(mayastor, &test).await; diff --git a/services/node/src/server.rs b/services/node/src/server.rs index 318196110..97a7a5d3b 100644 --- a/services/node/src/server.rs +++ b/services/node/src/server.rs @@ -251,12 +251,9 @@ mod tests { #[tokio::test] async fn node() -> Result<(), Box> { init_tracing(); - let natsep = format!("nats.{}", TEST_NET_NAME); - let nats_arg = vec!["-n", &natsep]; let maya_name = "node-test-name"; let test = Builder::new() .name("node") - .network(TEST_NET_NETWORK) .add_container_bin( "nats", Binary::from_nix("nats-server").with_arg("-DV"), @@ -264,17 +261,18 @@ mod tests { .add_container_bin( "node", Binary::from_dbg("node") - .with_args(nats_arg.clone()) + .with_nats("-n") .with_args(vec!["-d", "2sec"]), ) .add_container_bin( "mayastor", Binary::from_dbg("mayastor") - .with_args(nats_arg.clone()) + .with_nats("-n") .with_args(vec!["-N", maya_name]), ) .with_clean(true) - .build_only() + .autorun(false) + .build() .await?; orderly_start(&test).await?; From 20879673aab9cc6c8b5c9f73c254a22872e27fdf Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 7 Jan 2021 10:22:40 +0000 Subject: [PATCH 64/85] feat(composer): add docker image capabilities Add base image which can optionally be used by any of our containers Also add new builder for using remote images. Images are pulled if not present locally. --- composer/src/lib.rs | 176 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 147 insertions(+), 29 deletions(-) diff --git a/composer/src/lib.rs b/composer/src/lib.rs index b94e0fe1a..c4e39b21f 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -31,11 +31,11 @@ use bollard::{ }, Docker, }; -use futures::TryStreamExt; +use futures::{StreamExt, TryStreamExt}; use ipnetwork::Ipv4Network; use tonic::transport::Channel; -use bollard::models::ContainerInspectResponse; +use bollard::{image::CreateImageOptions, models::ContainerInspectResponse}; use mbus_api::TimeoutOptions; use rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, @@ -159,6 +159,11 @@ impl Binary { } } + fn commands(&self) -> Vec { + let mut v = vec![self.path.clone()]; + v.extend(self.arguments.clone()); + v + } fn which(name: &str) -> std::io::Result { let output = std::process::Command::new("which").arg(name).output()?; Ok(String::from_utf8_lossy(&output.stdout).trim().into()) @@ -172,14 +177,6 @@ impl Binary { } } -impl Into> for Binary { - fn into(self) -> Vec { - let mut v = vec![self.path.clone()]; - v.extend(self.arguments); - v - } -} - const RUST_LOG_DEFAULT: &str = "debug,actix_web=debug,actix=debug,h2=info,hyper=info,tower_buffer=info,bollard=info,rustls=info"; @@ -189,8 +186,14 @@ const RUST_LOG_DEFAULT: &str = pub struct ContainerSpec { /// Name of the container name: ContainerName, - /// Binary configuration - binary: Binary, + /// Base image of the container + image: Option, + /// Command to run + command: Option, + /// command arguments to run + arguments: Option>, + /// local binary + binary: Option, /// Port mapping to host ports port_map: Option, /// Use Init container @@ -200,21 +203,30 @@ pub struct ContainerSpec { env: HashMap, } -impl Into> for &ContainerSpec { - fn into(self) -> Vec { - self.binary.clone().into() - } -} - impl ContainerSpec { /// Create new ContainerSpec from name and binary - pub fn new(name: &str, binary: Binary) -> Self { + pub fn from_binary(name: &str, binary: Binary) -> Self { + let mut env = binary.env.clone(); + if !env.contains_key("RUST_LOG") { + env.insert("RUST_LOG".to_string(), RUST_LOG_DEFAULT.to_string()); + } + Self { + name: name.into(), + image: None, + binary: Some(binary), + init: Some(true), + env, + ..Default::default() + } + } + /// Create new ContainerSpec from name and image + pub fn from_image(name: &str, image: &str) -> Self { let mut env = HashMap::new(); env.insert("RUST_LOG".to_string(), RUST_LOG_DEFAULT.to_string()); Self { name: name.into(), - binary, init: Some(true), + image: Some(image.into()), env, ..Default::default() } @@ -253,6 +265,18 @@ impl ContainerSpec { }); vec } + /// Command/entrypoint followed by/and arguments + fn commands(&self) -> Vec { + let mut commands = vec![]; + if let Some(mut binary) = self.binary.clone() { + binary.setup_nats(&self.name); + commands.extend(binary.commands()); + } else if let Some(command) = self.command.clone() { + commands.push(command); + } + commands.extend(self.arguments.clone().unwrap_or_default()); + commands + } } pub struct Builder { @@ -272,6 +296,8 @@ pub struct Builder { prune: bool, /// run all containers on build autorun: bool, + /// base image for image-less containers + image: Option, /// output container logs on panic logs_on_panic: bool, } @@ -292,6 +318,7 @@ impl Builder { clean: true, prune: true, autorun: true, + image: None, logs_on_panic: true, } } @@ -316,8 +343,10 @@ impl Builder { /// add a mayastor container with a name pub fn add_container(mut self, name: &str) -> Builder { - self.containers - .push(ContainerSpec::new(name, Binary::from_dbg("mayastor"))); + self.containers.push(ContainerSpec::from_binary( + name, + Binary::from_dbg("mayastor"), + )); self } @@ -330,7 +359,13 @@ impl Builder { /// add a generic container which runs a local binary pub fn add_container_bin(self, name: &str, mut bin: Binary) -> Builder { bin.setup_nats(&self.name); - self.add_container_spec(ContainerSpec::new(name, bin)) + self.add_container_spec(ContainerSpec::from_binary(name, bin)) + } + + /// add a docker container + /// todo: still need to pull the image manually + pub fn add_container_image(self, name: &str, image: Binary) -> Builder { + self.add_container_spec(ContainerSpec::from_binary(name, image)) } /// clean on drop? @@ -344,6 +379,41 @@ impl Builder { self.prune = enable; self } + + /// output logs on panic + pub fn with_logs(mut self, enable: bool) -> Builder { + self.logs_on_panic = enable; + self + } + + /// use base image for all binary containers + /// note, the image must be present locally + /// todo: pull image, if not present + pub fn with_base_image>>( + mut self, + image: S, + ) -> Builder { + self.image = image.into(); + self + } + + /// setup tracing for the cargo test code with `RUST_LOG` const + pub fn with_default_tracing(self) -> Self { + self.with_tracing(RUST_LOG_DEFAULT) + } + + /// setup tracing for the cargo test code with `filter` + pub fn with_tracing(self, filter: &str) -> Self { + if let Ok(filter) = + tracing_subscriber::EnvFilter::try_from_default_env() + { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } + self + } + /// build the config and start the containers pub async fn build( self, @@ -389,6 +459,7 @@ impl Builder { label_prefix: "io.mayastor.test".to_string(), clean: self.clean, prune: self.prune, + image: self.image, logs_on_panic: self.logs_on_panic, }; @@ -441,6 +512,8 @@ pub struct ComposeTest { /// automatically clean up the things we have created for this test clean: bool, pub prune: bool, + /// base image for image-less containers + image: Option, /// output container logs on panic logs_on_panic: bool, } @@ -713,10 +786,7 @@ impl ComposeTest { let mut env = spec.environment(); env.push(format!("MY_POD_IP={}", ipv4)); - let cmd: Vec = spec.into(); - let name = spec.name.as_str(); - - // figure out why ports to expose based on the port mapping + // figure out which ports to expose based on the port mapping let mut exposed_ports = HashMap::new(); if let Some(map) = spec.port_map.as_ref() { map.iter().for_each(|binding| { @@ -724,11 +794,18 @@ impl ComposeTest { }) } + let name = spec.name.as_str(); + let cmd = spec.commands(); + let cmd = cmd.iter().map(|s| s.as_str()).collect(); + let image = spec + .image + .as_ref() + .map_or_else(|| self.image.as_deref(), |s| Some(s.as_str())); let name_label = format!("{}.name", self.label_prefix); let config = Config { - cmd: Some(cmd.iter().map(|s| s.as_str()).collect()), + cmd: Some(cmd), env: Some(env.iter().map(|s| s.as_str()).collect()), - image: None, // notice we do not have a base image here + image, hostname: Some(name), host_config: Some(host_config), networking_config: Some(NetworkingConfig { @@ -753,6 +830,8 @@ impl ComposeTest { ..Default::default() }; + self.pull_missing_image(&spec.image).await; + let container = self .docker .create_container( @@ -770,6 +849,45 @@ impl ComposeTest { Ok(()) } + /// Pulls the docker image, if one is specified and is not present locally + async fn pull_missing_image(&self, image: &Option) { + if let Some(image) = image { + if !self.image_exists(image).await { + self.pull_image(image).await; + } + } + } + + /// Check if image exists locally + async fn image_exists(&self, image: &str) -> bool { + let images = self.docker.list_images::(None).await.unwrap(); + images + .iter() + .any(|i| i.repo_tags.iter().any(|t| t == image)) + } + + /// Pulls the docker image + async fn pull_image(&self, image: &str) { + let mut stream = self + .docker + .create_image( + Some(CreateImageOptions { + from_image: image, + ..Default::default() + }), + None, + None, + ) + .into_future() + .await; + + while let Some(result) = stream.0.as_ref() { + let info = result.as_ref().unwrap(); + tracing::trace!("{:?}", &info); + stream = stream.1.into_future().await; + } + } + /// start the container pub async fn start(&self, name: &str) -> Result<(), Error> { let id = self.containers.get(name).unwrap(); From cdf421be3d552ba3d49486b3728f175ff9d56a3f Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 26 Nov 2020 15:32:44 +0000 Subject: [PATCH 65/85] refactor: move the message bus client trait ... into the common mbus library where it is more accessible. Make the message bus traits Send. --- Cargo.lock | 4 +++ mbus-api/Cargo.toml | 6 ++++ mbus-api/src/lib.rs | 4 ++- mbus-api/src/message_bus/mod.rs | 5 ++++ .../src/message_bus/v0.rs | 14 +++++---- mbus-api/src/send.rs | 30 ++++++++++++++----- mbus-api/src/v0.rs | 4 +-- rest/service/src/main.rs | 3 +- rest/service/src/message_bus/mod.rs | 1 - 9 files changed, 51 insertions(+), 20 deletions(-) create mode 100644 mbus-api/src/message_bus/mod.rs rename {rest/service => mbus-api}/src/message_bus/v0.rs (91%) delete mode 100644 rest/service/src/message_bus/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 3709e951d..d3691e7bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2561,11 +2561,13 @@ name = "mbus_api" version = "0.1.0" dependencies = [ "async-trait", + "composer", "dyn-clonable", "env_logger", "log", "nats", "once_cell", + "rpc", "serde", "serde_json", "smol", @@ -2575,6 +2577,8 @@ dependencies = [ "strum_macros", "tokio", "tracing", + "tracing-futures", + "tracing-subscriber", ] [[package]] diff --git a/mbus-api/Cargo.toml b/mbus-api/Cargo.toml index 106804bb6..979c635d0 100644 --- a/mbus-api/Cargo.toml +++ b/mbus-api/Cargo.toml @@ -19,6 +19,12 @@ snafu = "0.6" strum = "0.19" strum_macros = "0.19" tracing = "0.1" +tracing-futures = "0.2.4" +tracing-subscriber = "0.2.0" + +[dev-dependencies] +composer = { path = "../composer" } +rpc = { path = "../rpc" } [dependencies.serde] features = ["derive"] diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs index ef9fb2aa2..2f56afc45 100644 --- a/mbus-api/src/lib.rs +++ b/mbus-api/src/lib.rs @@ -4,6 +4,8 @@ //! We could split these out further into categories when they start to grow mod mbus_nats; +/// Message bus client interface +pub mod message_bus; /// received message traits pub mod receive; /// send messages traits @@ -126,7 +128,7 @@ pub type SenderId = String; /// This trait defines all Bus Messages which must: /// 1 - be uniquely identifiable via MessageId /// 2 - have a default Channel on which they are sent/received -#[async_trait(?Send)] +#[async_trait] pub trait Message { /// type which is sent back in response to a request type Reply; diff --git a/mbus-api/src/message_bus/mod.rs b/mbus-api/src/message_bus/mod.rs new file mode 100644 index 000000000..1c186d697 --- /dev/null +++ b/mbus-api/src/message_bus/mod.rs @@ -0,0 +1,5 @@ +//! Message Bus client interface which can optionally be used +//! to interact with the control plane services + +/// Version 0 of the client interface +pub mod v0; diff --git a/rest/service/src/message_bus/v0.rs b/mbus-api/src/message_bus/v0.rs similarity index 91% rename from rest/service/src/message_bus/v0.rs rename to mbus-api/src/message_bus/v0.rs index d2d844d39..b91686bd1 100644 --- a/rest/service/src/message_bus/v0.rs +++ b/mbus-api/src/message_bus/v0.rs @@ -1,17 +1,19 @@ +use crate::{v0::*, *}; use async_trait::async_trait; -use mbus_api::{v0::*, *}; /// Mayastor Node -pub type Node = mbus_api::v0::Node; +pub type Node = crate::v0::Node; -/// Interface used by the rest service to interact with the mayastor +/// Interface used by the rest service to interact with the control plane /// services via the message bus -#[async_trait(?Send)] +#[async_trait] pub trait MessageBusTrait: Sized { + /// Get all known nodes from the registry #[tracing::instrument(level = "info")] async fn get_nodes() -> std::io::Result> { - GetNodes {}.request().await.map(|v| v.0) + GetNodes {}.request().await.map(|v| v.into_inner()) } + /// Get a node through its id #[tracing::instrument(level = "info")] async fn get_node(id: String) -> std::io::Result> { let nodes = Self::get_nodes().await?; @@ -31,7 +33,7 @@ mod tests { async fn bus_init() -> Result<(), Box> { tokio::time::timeout(std::time::Duration::from_secs(2), async { - mbus_api::message_bus_init("10.1.0.2".into()).await + crate::message_bus_init("10.1.0.2".into()).await }) .await?; Ok(()) diff --git a/mbus-api/src/send.rs b/mbus-api/src/send.rs index bbb8c3bbb..811967435 100644 --- a/mbus-api/src/send.rs +++ b/mbus-api/src/send.rs @@ -135,7 +135,7 @@ macro_rules! bus_impl_message { bus_impl_message!($S, $I, $R, $C, $S); }; ($S:ident, $I:ident, $R:tt, $C:ident, $T:ident) => { - #[async_trait::async_trait(?Send)] + #[async_trait::async_trait] impl Message for $S { type Reply = $R; @@ -157,15 +157,31 @@ macro_rules! bus_impl_message { }; } +/// Implement request for all objects of `Type` +#[macro_export] +macro_rules! bus_impl_vector_request { + ($Request:ident, $Inner:ident) => { + /// Request all the `Inner` elements + #[derive(Serialize, Deserialize, Default, Debug, Clone)] + pub struct $Request(pub Vec<$Inner>); + impl $Request { + /// returns the first element of the tuple and consumes self + pub fn into_inner(self) -> Vec<$Inner> { + self.0 + } + } + }; +} + /// Trait to send a message `bus` request with the `payload` type `S` via a /// a `channel` and requesting a response back with the payload type `R` /// via a specific reply channel. /// Trait can be implemented using the macro helper `bus_impl_request`. -#[async_trait(?Send)] +#[async_trait] pub trait MessageRequest<'a, S, R> where - S: 'a + Sync + Message + Serialize, - for<'de> R: Deserialize<'de> + Default + 'a + Sync, + S: 'a + Sync + Send + Message + Serialize, + for<'de> R: Deserialize<'de> + Default + 'a + Sync + Send, { /// Sends the message and requests a reply /// May fail if the bus fails to publish the message. @@ -197,11 +213,11 @@ where /// Trait to send a message `bus` publish with the `payload` type `S` via a /// a `channel`. No reply is requested. /// Trait can be implemented using the macro helper `bus_impl_publish`. -#[async_trait(?Send)] +#[async_trait] pub trait MessagePublish<'a, S, R> where - S: 'a + Sync + Message + Serialize, - for<'de> R: Deserialize<'de> + Default + 'a + Sync, + S: 'a + Sync + Send + Message + Serialize, + for<'de> R: Deserialize<'de> + Default + 'a + Sync + Send, { /// Publishes the Message - not guaranteed to be sent or received (fire and /// forget) diff --git a/mbus-api/src/v0.rs b/mbus-api/src/v0.rs index 893db4e16..74fa2539b 100644 --- a/mbus-api/src/v0.rs +++ b/mbus-api/src/v0.rs @@ -179,7 +179,5 @@ pub struct Node { pub state: NodeState, } -/// All the nodes -#[derive(Serialize, Deserialize, Default, Debug, Clone)] -pub struct Nodes(pub Vec); +bus_impl_vector_request!(Nodes, Node); bus_impl_message_all!(GetNodes, GetNodes, Nodes, Node); diff --git a/rest/service/src/main.rs b/rest/service/src/main.rs index c27116021..2d0933e6a 100644 --- a/rest/service/src/main.rs +++ b/rest/service/src/main.rs @@ -1,5 +1,4 @@ -mod message_bus; -use message_bus::v0::{MessageBus, *}; +use mbus_api::message_bus::v0::{MessageBus, *}; use actix_web::{ get, diff --git a/rest/service/src/message_bus/mod.rs b/rest/service/src/message_bus/mod.rs deleted file mode 100644 index 2d24cd45f..000000000 --- a/rest/service/src/message_bus/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod v0; From 32eb31a4fd75772bde0c6b72d3f10ba59ef8f834 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 27 Nov 2020 10:29:00 +0000 Subject: [PATCH 66/85] refactor: add detailed errors to the message bus This way we're able to know where send/receive went wrong, eg failing to send or an error received from the remote service. Reply back with an error when we fail to deserialize a request, etc.. example: "reply message came back with an error: failed to deserialize the request: 'failed to deserialize the publish payload: '....' into type bus_api::v0::GetNodes': missing field `node` at line 1 column x'" Update the receivedmessage type to auto include the reply type using the base message trait ::Reply associated type. Added liveness probe which can be used to probe services for liveness. --- mayastor/src/core/env.rs | 2 +- mayastor/src/subsys/mbus/mod.rs | 9 +- mayastor/src/subsys/mbus/registration.rs | 4 +- mbus-api/examples/server/main.rs | 6 +- mbus-api/src/lib.rs | 155 +++++++++++++++++++--- mbus-api/src/mbus_nats.rs | 57 +++++--- mbus-api/src/message_bus/v0.rs | 11 +- mbus-api/src/receive.rs | 97 ++++++++++---- mbus-api/src/send.rs | 55 +++++--- mbus-api/src/v0.rs | 33 +++-- services/common/src/lib.rs | 161 +++++++++++++++++------ services/examples/service/main.rs | 3 +- services/kiiss/src/server.rs | 13 +- services/node/src/server.rs | 3 +- 14 files changed, 442 insertions(+), 167 deletions(-) diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 0c7ebae66..72703a4c5 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -607,7 +607,7 @@ impl MayastorEnvironment { } #[allow(dead_code)] - async fn get_service_config(&self) -> Result { + async fn get_service_config(&self) -> Result { if self.mbus_endpoint.is_some() { Ok(ConfigGetCurrent { kind: MayastorConfig, diff --git a/mayastor/src/subsys/mbus/mod.rs b/mayastor/src/subsys/mbus/mod.rs index 09ecc8fc5..b39441dbf 100644 --- a/mayastor/src/subsys/mbus/mod.rs +++ b/mayastor/src/subsys/mbus/mod.rs @@ -31,10 +31,15 @@ pub fn mbus_endpoint(endpoint: Option) -> Option { (endpoint.as_str(), 4222) }; + debug!("Looking up nats endpoint {}...", address_or_ip); if let Ok(ipv4) = address_or_ip.parse::() { - lookup_addr(&IpAddr::V4(ipv4)).expect("Invalid Ipv4 Address"); + let nats = lookup_addr(&IpAddr::V4(ipv4)) + .expect("Invalid Ipv4 Address"); + debug!("Nats endpoint found at {}", nats); } else { - lookup_host(&address_or_ip).expect("Invalid Host Name"); + let nats = + lookup_host(&address_or_ip).expect("Invalid Host Name"); + debug!("Nats endpoint found at {:?}", nats); } Some(format!("{}:{}", address_or_ip, port)) diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs index 2f9dc6f47..f7a6f8e40 100644 --- a/mayastor/src/subsys/mbus/registration.rs +++ b/mayastor/src/subsys/mbus/registration.rs @@ -38,9 +38,9 @@ pub enum Error { ))] NotStarted {}, #[snafu(display("Failed to queue register request: {:?}", cause))] - QueueRegister { cause: std::io::Error }, + QueueRegister { cause: mbus_api::Error }, #[snafu(display("Failed to queue deregister request: {:?}", cause))] - QueueDeregister { cause: std::io::Error }, + QueueDeregister { cause: mbus_api::Error }, } #[derive(Clone)] diff --git a/mbus-api/examples/server/main.rs b/mbus-api/examples/server/main.rs index 27d14d47e..ab77758b9 100644 --- a/mbus-api/examples/server/main.rs +++ b/mbus-api/examples/server/main.rs @@ -104,7 +104,7 @@ async fn receive_v2(sub: &mut nats::asynk::Subscription, count: u64) { let message = &sub.next().await.unwrap(); // notice that try_into can fail if the received type does not // match the received message - let message: ReceivedMessage = + let message: ReceivedMessageExt = message.try_into().unwrap(); message .reply(DummyReply { @@ -116,11 +116,11 @@ async fn receive_v2(sub: &mut nats::asynk::Subscription, count: u64) { async fn receive_v3(sub: &mut nats::asynk::Subscription, count: u64) { let message = &sub.next().await.unwrap(); - let message: ReceivedMessage = + let message: ReceivedMessageExt = message.try_into().unwrap(); message // same function can receive an error - .reply(Err(BusError::WithMessage { + .reply(Err(ReplyError::WithMessage { message: format!("Fake Error {}", count), })) .await diff --git a/mbus-api/src/lib.rs b/mbus-api/src/lib.rs index 2f56afc45..7c7ed6302 100644 --- a/mbus-api/src/lib.rs +++ b/mbus-api/src/lib.rs @@ -25,11 +25,114 @@ pub use receive::*; pub use send::*; use serde::{Deserialize, Serialize}; use smol::io; -use snafu::Snafu; +use snafu::{ResultExt, Snafu}; use std::{fmt::Debug, marker::PhantomData, str::FromStr, time::Duration}; +/// Result wrapper for send/receive +pub type BusResult = Result; /// Common error type for send/receive -pub type Error = io::Error; +#[derive(Debug, Snafu, strum_macros::AsRefStr)] +#[allow(missing_docs)] +pub enum Error { + #[snafu(display("Message with wrong message id received. Received '{}' but Expected '{}'", received.to_string(), expected.to_string()))] + WrongMessageId { + received: MessageId, + expected: MessageId, + }, + #[snafu(display("Failed to serialize the publish payload on channel '{}'", channel.to_string()))] + SerializeSend { + source: serde_json::Error, + channel: Channel, + }, + #[snafu(display( + "Failed to deserialize the publish payload: '{:?}' into type '{}'", + payload, + receiver + ))] + DeserializeSend { + payload: Result, + receiver: String, + source: serde_json::Error, + }, + #[snafu(display("Failed to serialize the reply payload for request message id '{}'", request.to_string()))] + SerializeReply { + request: MessageId, + source: serde_json::Error, + }, + #[snafu(display( + "Failed to deserialize the reply payload '{:?}' for message: '{:?}'", + reply, + request + ))] + DeserializeReceive { + request: Result, + reply: Result, + source: serde_json::Error, + }, + #[snafu(display( + "Failed to send message '{:?}' through the message bus on channel '{}'", + payload, + channel + ))] + Publish { + channel: String, + payload: Result, + source: io::Error, + }, + #[snafu(display( + "Timed out waiting for a reply to message '{:?}' on channel '{:?}' with options '{:?}'.", + payload, + channel, + options + ))] + RequestTimeout { + channel: String, + payload: Result, + options: TimeoutOptions, + }, + #[snafu(display( + "Failed to reply back to message id '{}' through the message bus", + request.to_string() + ))] + Reply { + request: MessageId, + source: io::Error, + }, + #[snafu(display("Failed to flush the message bus"))] + Flush { source: io::Error }, + #[snafu(display( + "Failed to subscribe to channel '{}' on the message bus", + channel + ))] + Subscribe { channel: String, source: io::Error }, + #[snafu(display("Reply message came back with an error"))] + ReplyWithError { source: ReplyError }, + #[snafu(display("Service error whilst handling request: {}", message))] + ServiceError { message: String }, +} + +/// Report error chain +pub trait ErrorChain { + /// full error chain as a string separated by ':' + fn full_string(&self) -> String; +} + +impl ErrorChain for T +where + T: std::error::Error, +{ + /// loops through the error chain and formats into a single string + /// containing all the lower level errors + fn full_string(&self) -> String { + let mut msg = format!("{}", self); + let mut opt_source = self.source(); + while let Some(source) = opt_source { + msg = format!("{}: {}", msg, source); + opt_source = source.source(); + } + msg + } +} /// Available Message Bus channels #[derive(Clone, Debug)] @@ -139,15 +242,28 @@ pub trait Message { fn channel(&self) -> Channel; /// publish a message with no delivery guarantees - async fn publish(&self) -> io::Result<()>; + async fn publish(&self) -> BusResult<()>; /// publish a message with a request for a `Self::Reply` reply - async fn request(&self) -> io::Result; + async fn request(&self) -> BusResult; + /// publish a message on the given channel with a request for a + /// `Self::Reply` reply + async fn request_on + Send>( + &self, + channel: C, + ) -> BusResult; /// publish a message with a request for a `Self::Reply` reply /// and non default timeout options async fn request_ext( &self, options: TimeoutOptions, - ) -> io::Result; + ) -> BusResult; + /// publish a message with a request for a `Self::Reply` reply + /// and non default timeout options on the given channel + async fn request_on_ext + Send>( + &self, + channel: C, + options: TimeoutOptions, + ) -> BusResult; } /// The preamble is used to peek into messages so allowing for them to be routed @@ -167,21 +283,22 @@ struct SendPayload { } /// Error type which is returned over the bus -/// todo: Use this Error not just for the "transport" but also /// for any other operation -#[derive(Serialize, Deserialize, Debug, Snafu)] +#[derive(Serialize, Deserialize, Debug, Snafu, strum_macros::AsRefStr)] #[allow(missing_docs)] -pub enum BusError { +pub enum ReplyError { #[snafu(display("Generic Failure, message={}", message))] WithMessage { message: String }, - #[snafu(display("Ill formed request when deserializing the request"))] - InvalidFormat, + #[snafu(display("Failed to deserialize the request: '{}'", message))] + DeserializeReq { message: String }, + #[snafu(display("Failed to process the request: '{}'", message))] + Process { message: String }, } /// Payload returned to the sender /// Includes an error as the operations may be fallible -#[derive(Serialize, Deserialize)] -pub struct ReplyPayload(pub Result); +#[derive(Serialize, Deserialize, Debug)] +pub struct ReplyPayload(pub Result); // todo: implement thin wrappers on these /// MessageBus raw Message @@ -263,24 +380,20 @@ impl TimeoutOptions { pub trait Bus: Clone + Send + Sync { /// publish a message - not guaranteed to be sent or received (fire and /// forget) - async fn publish( - &self, - channel: Channel, - message: &[u8], - ) -> std::io::Result<()>; + async fn publish(&self, channel: Channel, message: &[u8]) -> BusResult<()>; /// Send a message and wait for it to be received by the target component - async fn send(&self, channel: Channel, message: &[u8]) -> io::Result<()>; + async fn send(&self, channel: Channel, message: &[u8]) -> BusResult<()>; /// Send a message and request a reply from the target component async fn request( &self, channel: Channel, message: &[u8], options: Option, - ) -> io::Result; + ) -> BusResult; /// Flush queued messages to the server - async fn flush(&self) -> io::Result<()>; + async fn flush(&self) -> BusResult<()>; /// Create a subscription on the given channel which can be /// polled for messages until it is either explicitly closed or /// when the bus is closed - async fn subscribe(&self, channel: Channel) -> io::Result; + async fn subscribe(&self, channel: Channel) -> BusResult; } diff --git a/mbus-api/src/mbus_nats.rs b/mbus-api/src/mbus_nats.rs index c2683de16..af4eacc9b 100644 --- a/mbus-api/src/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -1,7 +1,6 @@ use super::*; use nats::asynk::Connection; use once_cell::sync::OnceCell; -use smol::io; use tracing::{info, warn}; static NATS_MSG_BUS: OnceCell = OnceCell::new(); @@ -108,15 +107,17 @@ impl NatsMessageBus { #[async_trait] impl Bus for NatsMessageBus { - async fn publish( - &self, - channel: Channel, - message: &[u8], - ) -> std::io::Result<()> { - self.connection.publish(&channel.to_string(), message).await + async fn publish(&self, channel: Channel, message: &[u8]) -> BusResult<()> { + self.connection + .publish(&channel.to_string(), message) + .await + .context(Publish { + channel: channel.to_string(), + payload: String::from_utf8(Vec::from(message)), + }) } - async fn send(&self, _channel: Channel, _message: &[u8]) -> io::Result<()> { + async fn send(&self, _channel: Channel, _message: &[u8]) -> BusResult<()> { unimplemented!() } @@ -124,11 +125,13 @@ impl Bus for NatsMessageBus { &self, channel: Channel, message: &[u8], - options: Option, - ) -> io::Result { + req_options: Option, + ) -> BusResult { let channel = &channel.to_string(); - let options = options.unwrap_or_else(|| self.timeout_options.clone()); + let options = req_options + .clone() + .unwrap_or_else(|| self.timeout_options.clone()); let mut timeout = options.timeout; let mut retries = 0; @@ -137,14 +140,23 @@ impl Bus for NatsMessageBus { let result = tokio::time::timeout(timeout, request).await; if let Ok(r) = result { - return r; + return r.context(Publish { + channel: channel.to_string(), + payload: String::from_utf8(Vec::from(message)), + }); } if Some(retries) == options.max_retries { - log::error!("Timed out on {}", channel); - return Err(io::ErrorKind::TimedOut.into()); + let error = Error::RequestTimeout { + channel: channel.to_string(), + payload: String::from_utf8(Vec::from(message)), + options: req_options + .unwrap_or_else(|| self.timeout_options.clone()), + }; + tracing::error!("{}", error); + return Err(error); } - log::debug!( + tracing::debug!( "Timeout after {:?} on {} - {} retries left", timeout, channel, @@ -157,17 +169,22 @@ impl Bus for NatsMessageBus { retries += 1; timeout = std::cmp::min( - Duration::from_secs(1) * retries, + options.timeout_step * retries, Duration::from_secs(10), ); } } - async fn flush(&self) -> io::Result<()> { - self.connection.flush().await + async fn flush(&self) -> BusResult<()> { + self.connection.flush().await.context(Flush {}) } - async fn subscribe(&self, channel: Channel) -> io::Result { - self.connection.subscribe(&channel.to_string()).await + async fn subscribe(&self, channel: Channel) -> BusResult { + self.connection + .subscribe(&channel.to_string()) + .await + .context(Subscribe { + channel: channel.to_string(), + }) } } diff --git a/mbus-api/src/message_bus/v0.rs b/mbus-api/src/message_bus/v0.rs index b91686bd1..ba2a7240f 100644 --- a/mbus-api/src/message_bus/v0.rs +++ b/mbus-api/src/message_bus/v0.rs @@ -1,6 +1,11 @@ use crate::{v0::*, *}; use async_trait::async_trait; +/// Error sending/receiving +pub type Error = crate::Error; +/// Result for sending/receiving +pub type BusResult = crate::BusResult; + /// Mayastor Node pub type Node = crate::v0::Node; @@ -10,12 +15,12 @@ pub type Node = crate::v0::Node; pub trait MessageBusTrait: Sized { /// Get all known nodes from the registry #[tracing::instrument(level = "info")] - async fn get_nodes() -> std::io::Result> { - GetNodes {}.request().await.map(|v| v.into_inner()) + async fn get_nodes() -> BusResult> { + GetNodes {}.request().await.map(|v| v.0) } /// Get a node through its id #[tracing::instrument(level = "info")] - async fn get_node(id: String) -> std::io::Result> { + async fn get_node(id: String) -> BusResult> { let nodes = Self::get_nodes().await?; Ok(nodes.into_iter().find(|n| n.id == id)) } diff --git a/mbus-api/src/receive.rs b/mbus-api/src/receive.rs index e01cbaf8c..ac2ddf8a9 100644 --- a/mbus-api/src/receive.rs +++ b/mbus-api/src/receive.rs @@ -6,20 +6,38 @@ use super::*; /// # Example: /// ``` /// let raw_msg = &subscriber.next().await?; -/// let msg: ReceivedMessage = +/// let msg: ReceivedMessageExt = /// raw_msg.try_into()?; /// /// msg.respond(ReplyConfig {}).await.unwrap(); /// // or we can also use the same fn to return an error /// msg.respond(Err(Error::Message("failure".into()))).await.unwrap(); /// ``` -pub struct ReceivedMessage<'a, S, R> { +pub struct ReceivedMessageExt<'a, S, R> { request: SendPayload, bus_message: &'a BusMessage, reply_type: PhantomData, } -impl<'a, S, R> ReceivedMessage<'a, S, R> +/// Specialization of type safe wrapper over a message bus message which decodes +/// the raw message into the actual payload `S` and allows only for a response +/// type `R` which is determined based on `S: Message` as a `Message::Reply` +/// type. +/// +/// # Example: +/// ``` +/// let raw_msg = &subscriber.next().await?; +/// let msg: ReceivedMessage = +/// raw_msg.try_into()?; +/// +/// msg.respond(ReplyConfig {}).await.unwrap(); +/// // or we can also use the same fn to return an error +/// msg.respond(Err(Error::Message("failure".into()))).await.unwrap(); +/// ``` +pub type ReceivedMessage<'a, S> = + ReceivedMessageExt<'a, S, ::Reply>; + +impl<'a, S, R> ReceivedMessageExt<'a, S, R> where for<'de> S: Deserialize<'de> + 'a + Debug + Clone + Message, R: Serialize, @@ -41,17 +59,24 @@ where pub async fn reply>>( &self, reply: T, - ) -> io::Result<()> { + ) -> BusResult<()> { let reply: ReplyPayload = reply.into(); - let payload = serde_json::to_vec(&reply)?; - self.bus_message.respond(&payload).await + let payload = serde_json::to_vec(&reply).context(SerializeReply { + request: self.request.id.clone(), + })?; + self.bus_message.respond(&payload).await.context(Reply { + request: self.request.id.clone(), + }) } /// Create a new received message object which wraps the send and /// receive types around a raw bus message. - fn new(bus_message: &'a BusMessage) -> Result { - let request: SendPayload = - serde_json::from_slice(&bus_message.data)?; + fn new(bus_message: &'a BusMessage) -> Result { + let request: SendPayload = serde_json::from_slice(&bus_message.data) + .context(DeserializeSend { + receiver: std::any::type_name::(), + payload: String::from_utf8(bus_message.data.clone()), + })?; if request.id == request.data.id() { log::trace!( "Received message from '{}': {:?}", @@ -64,16 +89,17 @@ where reply_type: Default::default(), }) } else { - Err(io::Error::new( - io::ErrorKind::InvalidInput, - "invalid message id!", - )) + Err(Error::WrongMessageId { + received: request.id, + expected: request.data.id(), + }) } } } /// Message received over the message bus with a reply serialization wrapper /// For type safety refer to `ReceivedMessage<'a,S,R>`. +#[derive(Clone)] pub struct ReceivedRawMessage<'a> { bus_msg: &'a BusMessage, } @@ -94,16 +120,25 @@ impl std::fmt::Display for ReceivedRawMessage<'_> { impl<'a> ReceivedRawMessage<'a> { /// Get a copy of the actual payload data which was sent /// May fail if the raw data cannot be deserialized into `S` - pub fn inner + Message>(&self) -> io::Result { - let request: SendPayload = - serde_json::from_slice(&self.bus_msg.data)?; + pub fn inner + Message>(&self) -> BusResult { + let request: SendPayload = serde_json::from_slice( + &self.bus_msg.data, + ) + .context(DeserializeSend { + receiver: std::any::type_name::(), + payload: String::from_utf8(self.bus_msg.data.clone()), + })?; Ok(request.data) } /// Get the identifier of this message. /// May fail if the raw data cannot be deserialized into the preamble. - pub fn id(&self) -> io::Result { - let preamble: Preamble = serde_json::from_slice(&self.bus_msg.data)?; + pub fn id(&self) -> BusResult { + let preamble: Preamble = serde_json::from_slice(&self.bus_msg.data) + .context(DeserializeSend { + receiver: std::any::type_name::(), + payload: String::from_utf8(self.bus_msg.data.clone()), + })?; Ok(preamble.id) } @@ -120,10 +155,14 @@ impl<'a> ReceivedRawMessage<'a> { pub async fn respond>>( &self, reply: R, - ) -> io::Result<()> { + ) -> BusResult<()> { let reply: ReplyPayload = reply.into(); - let payload = serde_json::to_vec(&reply)?; - self.bus_msg.respond(&payload).await + let payload = serde_json::to_vec(&reply).context(SerializeReply { + request: self.id()?, + })?; + self.bus_msg.respond(&payload).await.context(Reply { + request: self.id()?, + }) } } @@ -136,28 +175,28 @@ impl<'a> std::convert::From<&'a BusMessage> for ReceivedRawMessage<'a> { } impl<'a, S, R> std::convert::TryFrom<&'a BusMessage> - for ReceivedMessage<'a, S, R> + for ReceivedMessageExt<'a, S, R> where for<'de> S: Deserialize<'de> + 'a + Debug + Clone + Message, R: Serialize, { - type Error = io::Error; + type Error = Error; fn try_from(value: &'a BusMessage) -> Result { - ReceivedMessage::::new(value) + ReceivedMessageExt::::new(value) } } impl<'a, S, R> std::convert::TryFrom> - for ReceivedMessage<'a, S, R> + for ReceivedMessageExt<'a, S, R> where for<'de> S: Deserialize<'de> + 'a + Debug + Clone + Message, R: Serialize, { - type Error = io::Error; + type Error = Error; fn try_from(value: ReceivedRawMessage<'a>) -> Result { - ReceivedMessage::::new(value.bus_msg) + ReceivedMessageExt::::new(value.bus_msg) } } @@ -167,8 +206,8 @@ impl From for ReplyPayload { } } -impl From> for ReplyPayload { - fn from(val: Result) -> Self { +impl From> for ReplyPayload { + fn from(val: Result) -> Self { ReplyPayload(val) } } diff --git a/mbus-api/src/send.rs b/mbus-api/src/send.rs index 811967435..90038db26 100644 --- a/mbus-api/src/send.rs +++ b/mbus-api/src/send.rs @@ -141,18 +141,31 @@ macro_rules! bus_impl_message { impl_channel_id!($I, $C); - async fn publish(&self) -> smol::io::Result<()> { + async fn publish(&self) -> BusResult<()> { $T::Publish(self, self.channel(), bus()).await } - async fn request(&self) -> smol::io::Result<$R> { + async fn request(&self) -> BusResult<$R> { $T::Request(self, self.channel(), bus()).await } + async fn request_on + Send>( + &self, + channel: C, + ) -> BusResult<$R> { + $T::Request(self, channel.into(), bus()).await + } async fn request_ext( &self, options: TimeoutOptions, - ) -> smol::io::Result<$R> { + ) -> BusResult<$R> { $T::Request_Ext(self, self.channel(), bus(), options).await } + async fn request_on_ext + Send>( + &self, + channel: C, + options: TimeoutOptions, + ) -> BusResult<$R> { + $T::Request_Ext(self, channel.into(), bus(), options).await + } } }; } @@ -186,12 +199,12 @@ where /// Sends the message and requests a reply /// May fail if the bus fails to publish the message. #[allow(non_snake_case)] - async fn Request( + async fn Request + Send>( payload: &'a S, - channel: Channel, + channel: C, bus: DynBus, - ) -> io::Result { - let msg = SendMessage::::new(payload, channel, bus); + ) -> BusResult { + let msg = SendMessage::::new(payload, channel.into(), bus); msg.request(None).await } @@ -204,7 +217,7 @@ where channel: Channel, bus: DynBus, options: TimeoutOptions, - ) -> io::Result { + ) -> BusResult { let msg = SendMessage::::new(payload, channel, bus); msg.request(Some(options)).await } @@ -227,7 +240,7 @@ where payload: &'a S, channel: Channel, bus: DynBus, - ) -> io::Result<()> { + ) -> BusResult<()> { let msg = SendMessage::::new(payload, channel, bus); msg.publish().await } @@ -279,8 +292,11 @@ where /// Publishes the Message - not guaranteed to be sent or received (fire and /// forget). - pub(crate) async fn publish(&self) -> io::Result<()> { - let payload = serde_json::to_vec(&self.payload)?; + pub(crate) async fn publish(&self) -> BusResult<()> { + let payload = + serde_json::to_vec(&self.payload).context(SerializeSend { + channel: self.channel.clone(), + })?; self.bus.publish(self.channel.clone(), &payload).await } @@ -288,16 +304,21 @@ where pub(crate) async fn request( &self, options: Option, - ) -> io::Result { - let payload = serde_json::to_vec(&self.payload)?; + ) -> BusResult { + let payload = + serde_json::to_vec(&self.payload).context(SerializeSend { + channel: self.channel.clone(), + })?; let reply = self .bus .request(self.channel.clone(), &payload, options) .await? .data; - let reply: ReplyPayload = serde_json::from_slice(&reply)?; - reply.0.map_err(|error| { - io::Error::new(io::ErrorKind::Other, format!("{:?}", error)) - }) + let reply: ReplyPayload = + serde_json::from_slice(&reply).context(DeserializeReceive { + request: serde_json::to_string(&self.payload), + reply: String::from_utf8(reply), + })?; + reply.0.context(ReplyWithError {}) } } diff --git a/mbus-api/src/v0.rs b/mbus-api/src/v0.rs index 74fa2539b..fb850e3b2 100644 --- a/mbus-api/src/v0.rs +++ b/mbus-api/src/v0.rs @@ -1,11 +1,11 @@ use super::*; use serde::{Deserialize, Serialize}; use std::fmt::Debug; -use strum_macros::EnumString; +use strum_macros::{EnumString, ToString}; /// Versioned Channels -#[derive(Clone, Debug, EnumString, strum_macros::ToString)] -#[strum(serialize_all = "snake_case")] +#[derive(Clone, Debug, EnumString, ToString)] +#[strum(serialize_all = "camelCase")] pub enum ChannelVs { /// Default Default, @@ -29,13 +29,13 @@ impl From for Channel { } /// Versioned Message Id's -#[derive( - Debug, PartialEq, Clone, strum_macros::ToString, strum_macros::EnumString, -)] +#[derive(Debug, PartialEq, Clone, ToString, EnumString)] #[strum(serialize_all = "camelCase")] pub enum MessageIdVs { /// Default Default, + /// Liveness Probe + Liveness, /// Update Config ConfigUpdate, /// Request current Config @@ -65,6 +65,11 @@ macro_rules! impl_channel_id { }; } +/// Liveness Probe +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +pub struct Liveness {} +bus_impl_message_all!(Liveness, Liveness, (), Default); + /// Mayastor configurations /// Currently, we have the global mayastor config and the child states config #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] @@ -135,24 +140,18 @@ pub struct Deregister { bus_impl_message_all!(Deregister, Deregister, (), Registry); /// Node Service - +/// /// Get all the nodes #[derive(Serialize, Deserialize, Default, Debug, Clone)] pub struct GetNodes {} /// State of the Node #[derive( - Serialize, - Deserialize, - Debug, - Clone, - EnumString, - strum_macros::ToString, - Eq, - PartialEq, + Serialize, Deserialize, Debug, Clone, EnumString, ToString, Eq, PartialEq, )] -#[strum(serialize_all = "camelCase")] pub enum NodeState { + /// Node has unexpectedly disappeared + Unknown, /// Node is deemed online if it has not missed the /// registration keep alive deadline Online, @@ -163,7 +162,7 @@ pub enum NodeState { impl Default for NodeState { fn default() -> Self { - Self::Offline + Self::Unknown } } diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index b15f26302..f5291c0fd 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -7,32 +7,28 @@ use async_trait::async_trait; use dyn_clonable::clonable; use futures::{future::join_all, stream::StreamExt}; -use mbus_api::*; +use mbus_api::{v0::Liveness, *}; use snafu::{OptionExt, ResultExt, Snafu}; use state::Container; -use std::{collections::HashMap, convert::Into, ops::Deref}; +use std::{ + collections::HashMap, + convert::{Into, TryInto}, + ops::Deref, +}; use tracing::{debug, error}; #[derive(Debug, Snafu)] #[allow(missing_docs)] pub enum ServiceError { - #[snafu(display("Channel {} has been closed.", channel.to_string()))] - GetMessage { - channel: Channel, - }, - #[snafu(display("Failed to subscribe on Channel {}", channel.to_string()))] - Subscribe { - channel: Channel, - source: Error, - }, - GetMessageId { - channel: Channel, - source: Error, - }, - FindSubscription { - channel: Channel, - id: MessageId, - }, + #[snafu(display("Channel '{}' has been closed.", channel.to_string()))] + GetMessage { channel: Channel }, + #[snafu(display("Failed to subscribe on Channel '{}'", channel.to_string()))] + Subscribe { channel: Channel, source: Error }, + #[snafu(display("Failed to get message Id on Channel '{}'", channel.to_string()))] + GetMessageId { channel: Channel, source: Error }, + #[snafu(display("Failed to find subscription '{}' on Channel '{}'", id.to_string(), channel.to_string()))] + FindSubscription { channel: Channel, id: MessageId }, + #[snafu(display("Failed to handle message id '{}' on Channel '{}'", id.to_string(), channel.to_string()))] HandleMessage { channel: Channel, id: MessageId, @@ -44,6 +40,7 @@ pub enum ServiceError { /// message bus channel on a specific ID pub struct Service { server: String, + server_connected: bool, channel: Channel, subscriptions: HashMap>>, shared_state: std::sync::Arc, @@ -53,6 +50,7 @@ impl Default for Service { fn default() -> Self { Self { server: "".to_string(), + server_connected: false, channel: Default::default(), subscriptions: Default::default(), shared_state: std::sync::Arc::new(Container::new()), @@ -60,6 +58,7 @@ impl Default for Service { } } +#[derive(Clone)] /// Service Arguments for the service handler callback pub struct Arguments<'a> { /// Service context, like access to the message bus @@ -100,9 +99,17 @@ impl<'a> Context<'a> { } /// get the shared state of type `T` from the context pub fn get_state(&self) -> &T { - self.state - .try_get() - .expect("Requested data type not shared via with_shared_data!") + match self.state.try_get() { + Some(state) => state, + None => { + let type_name = std::any::type_name::(); + let error = format!( + "Requested data type '{}' not shared via with_shared_data", + type_name + ); + panic!(error); + } + } } } @@ -125,11 +132,28 @@ impl Service { pub fn builder(server: String, channel: impl Into) -> Self { Self { server, + server_connected: false, channel: channel.into(), ..Default::default() } } + /// Connect to the provided message bus server immediately + /// Useful for when dealing with async shared data which might required the + /// message bus before the builder is complete + pub async fn connect(mut self) -> Self { + self.message_bus_init().await; + self + } + + async fn message_bus_init(&mut self) { + if !self.server_connected { + // todo: parse connection options when nats has better support + mbus_api::message_bus_init(self.server.clone()).await; + self.server_connected = true; + } + } + /// Setup default `channel` where `with_subscription` will listen on pub fn with_channel(mut self, channel: impl Into) -> Self { self.channel = channel.into(); @@ -155,6 +179,7 @@ impl Service { /// # } pub fn with_shared_state(self, state: T) -> Self { let type_name = std::any::type_name::(); + tracing::debug!("Adding shared type: {}", type_name); if !self.shared_state.set(state) { panic!(format!( "Shared state for type '{}' has already been set!", @@ -164,6 +189,40 @@ impl Service { self } + /// Add a default liveness endpoint which can be used to probe + /// the service for liveness on the current selected channel. + /// + /// Example: + /// # async fn main() { + /// Service::builder(cli_args.url, ChannelVs::Node) + /// .with_default_liveness() + /// .with_subscription(ServiceHandler::::default()) + /// .run().await; + /// + /// # async fn alive() -> bool { + /// Liveness{}.request().await.is_ok() + /// # } + pub fn with_default_liveness(self) -> Self { + #[derive(Clone, Default)] + struct ServiceHandler { + data: std::marker::PhantomData, + } + + #[async_trait] + impl ServiceSubscriber for ServiceHandler { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let request: ReceivedMessage = + args.request.try_into()?; + request.reply(()).await + } + fn filter(&self) -> Vec { + vec![Liveness::default().id()] + } + } + + self.with_subscription(ServiceHandler::::default()) + } + /// Add a new subscriber on the default channel pub fn with_subscription( self, @@ -216,7 +275,7 @@ impl Service { if let Err(error) = Self::process_message(args, &subscriptions).await { - error!("Error processing message: {}", error); + error!("Error processing message: {}", error.full_string()); } } } @@ -240,25 +299,43 @@ impl Service { id: id.clone(), })?; - let result = - subscription - .handler(arguments) - .await - .context(HandleMessage { - channel: channel.clone(), - id: id.clone(), - }); + let result = subscription.handler(arguments.clone()).await; + + Self::assess_handler_error(&result, &arguments).await; + + result.context(HandleMessage { + channel: channel.clone(), + id: id.clone(), + }) + } + async fn assess_handler_error( + result: &Result<(), Error>, + arguments: &Arguments<'_>, + ) { if let Err(error) = result.as_ref() { - // todo: should an error be returned to the sender? - error!( - "Error handling message id {:?}: {:?}", - subscription.filter(), - error - ); + match error { + Error::DeserializeSend { + .. + } => { + arguments + .request + .respond::<(), _>(Err(ReplyError::DeserializeReq { + message: error.full_string(), + })) + .await + } + _ => { + arguments + .request + .respond::<(), _>(Err(ReplyError::Process { + message: error.full_string(), + })) + .await + } + } + .ok(); } - - result } /// Runs the server which services all subscribers asynchronously until all @@ -268,10 +345,10 @@ impl Service { /// each channel benefits from a tokio thread which routes messages /// accordingly todo: only one subscriber per message id supported at /// the moment - pub async fn run(&self) { + pub async fn run(&mut self) { let mut threads = vec![]; - // todo: parse connection options when nats has better support for it - mbus_api::message_bus_init(self.server.clone()).await; + + self.message_bus_init().await; let bus = mbus_api::bus(); for subscriptions in self.subscriptions.iter() { diff --git a/services/examples/service/main.rs b/services/examples/service/main.rs index ca25a60eb..4417743a5 100644 --- a/services/examples/service/main.rs +++ b/services/examples/service/main.rs @@ -36,8 +36,7 @@ bus_impl_message_all!(GetSvcName, Default, SvcName, Default); #[async_trait] impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let msg: ReceivedMessage = - args.request.try_into()?; + let msg: ReceivedMessage = args.request.try_into()?; let reply = SvcName("example".into()); diff --git a/services/kiiss/src/server.rs b/services/kiiss/src/server.rs index 91ef1a9b1..395788361 100644 --- a/services/kiiss/src/server.rs +++ b/services/kiiss/src/server.rs @@ -40,7 +40,8 @@ impl ServiceSubscriber for ServiceHandler { let data: ConfigUpdate = args.request.inner()?; info!("Received: {:?}", data); - let msg: ReceivedMessage = args.request.try_into()?; + let msg: ReceivedMessageExt = + args.request.try_into()?; let config = msg.inner(); let mut state = CONFIGS.state.lock().await; @@ -69,7 +70,7 @@ impl ServiceSubscriber for ServiceHandler { let data: ConfigGetCurrent = args.request.inner()?; info!("Received: {:?}", data); - let msg: ReceivedMessage = + let msg: ReceivedMessageExt = args.request.try_into()?; let request = msg.inner(); @@ -84,14 +85,14 @@ impl ServiceSubscriber for ServiceHandler { .await } None => { - msg.reply(Err(BusError::WithMessage { + msg.reply(Err(ReplyError::WithMessage { message: "Config is missing".into(), })) .await } }, None => { - msg.reply(Err(BusError::WithMessage { + msg.reply(Err(ReplyError::WithMessage { message: "Config is missing".into(), })) .await @@ -106,7 +107,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let _: ReceivedMessage = args.request.try_into()?; + let _: ReceivedMessageExt = args.request.try_into()?; Ok(()) } fn filter(&self) -> Vec { @@ -117,7 +118,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let _: ReceivedMessage = args.request.try_into()?; + let _: ReceivedMessageExt = args.request.try_into()?; Ok(()) } fn filter(&self) -> Vec { diff --git a/services/node/src/server.rs b/services/node/src/server.rs index 97a7a5d3b..9c6cf58e0 100644 --- a/services/node/src/server.rs +++ b/services/node/src/server.rs @@ -165,8 +165,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let request: ReceivedMessage = - args.request.try_into()?; + let request: ReceivedMessage = args.request.try_into()?; let store: &NodeStore = args.context.get_state(); let nodes = store.get_nodes().await; From a3644b58bc62f96a7660d0353dbb733bab6abaa2 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Tue, 1 Dec 2020 18:47:13 +0000 Subject: [PATCH 67/85] feat: add initial pool service, extended the ... rest server and the client library to include get/put/delete verbs for the pool and replicas. Added simple compose test which makes use of the rest client library to create/delete pools/replicas. The pool service makes use of the node service to query existing nodes and then issues gRPC commands to mayastor instances. --- Cargo.lock | 31 +- mbus-api/src/message_bus/v0.rs | 164 ++++++- mbus-api/src/v0.rs | 293 +++++++++++++ rest/Cargo.toml | 1 + rest/service/src/main.rs | 40 +- rest/service/src/v0/mod.rs | 16 + rest/service/src/v0/nodes.rs | 21 + rest/service/src/v0/pools.rs | 85 ++++ rest/service/src/v0/replicas.rs | 257 +++++++++++ rest/src/lib.rs | 56 ++- rest/src/versions/v0.rs | 331 ++++++++++++++- rest/tests/test.rs | 15 +- rest/tests/v0_test.rs | 88 +++- services/Cargo.toml | 8 +- services/common/src/lib.rs | 3 + services/common/src/wrapper/mod.rs | 4 + services/common/src/wrapper/v0/mod.rs | 81 ++++ services/common/src/wrapper/v0/node_traits.rs | 225 ++++++++++ services/common/src/wrapper/v0/pool.rs | 364 ++++++++++++++++ services/common/src/wrapper/v0/registry.rs | 400 ++++++++++++++++++ services/node/src/server.rs | 1 + services/pool/src/server.rs | 241 +++++++++++ services/pool/src/service.rs | 204 +++++++++ 23 files changed, 2850 insertions(+), 79 deletions(-) create mode 100644 rest/service/src/v0/mod.rs create mode 100644 rest/service/src/v0/nodes.rs create mode 100644 rest/service/src/v0/pools.rs create mode 100644 rest/service/src/v0/replicas.rs create mode 100644 services/common/src/wrapper/mod.rs create mode 100644 services/common/src/wrapper/v0/mod.rs create mode 100644 services/common/src/wrapper/v0/node_traits.rs create mode 100644 services/common/src/wrapper/v0/pool.rs create mode 100644 services/common/src/wrapper/v0/registry.rs create mode 100644 services/pool/src/server.rs create mode 100644 services/pool/src/service.rs diff --git a/Cargo.lock b/Cargo.lock index d3691e7bb..ed97bcbea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -39,7 +39,7 @@ dependencies = [ "derive_more", "either", "futures-util", - "http 0.2.1", + "http 0.2.2", "log", "rustls", "tokio-rustls", @@ -76,7 +76,7 @@ dependencies = [ "futures-util", "fxhash", "h2", - "http 0.2.1", + "http 0.2.2", "httparse", "indexmap", "itoa", @@ -113,7 +113,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd1f7dbda1645bf7da33554db60891755f6c01c1b2169e2f4c492098d30c235" dependencies = [ "bytestring", - "http 0.2.1", + "http 0.2.2", "log", "regex", "serde", @@ -736,7 +736,7 @@ dependencies = [ "futures-core", "futures-util", "hex", - "http 0.2.1", + "http 0.2.2", "hyper", "hyper-rustls", "hyper-unix-connector", @@ -1930,7 +1930,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.1", + "http 0.2.2", "indexmap", "slab", "tokio", @@ -1993,9 +1993,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes 0.5.6", "fnv", @@ -2009,7 +2009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4908999be8b408e507d4148f3374a6f9e34e941f2d8c3928b1d565f1453291d" dependencies = [ "bytes 0.5.6", - "http 0.2.1", + "http 0.2.2", ] [[package]] @@ -2019,7 +2019,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ "bytes 0.5.6", - "http 0.2.1", + "http 0.2.2", ] [[package]] @@ -2060,7 +2060,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.1", + "http 0.2.2", "http-body 0.3.1", "httparse", "httpdate", @@ -2282,7 +2282,7 @@ dependencies = [ "base64 0.12.3", "bytes 0.5.6", "chrono", - "http 0.2.1", + "http 0.2.2", "percent-encoding 2.1.0", "serde", "serde-value", @@ -2314,7 +2314,7 @@ dependencies = [ "either", "futures", "futures-util", - "http 0.2.1", + "http 0.2.2", "jsonpath_lib", "k8s-openapi", "log", @@ -3497,7 +3497,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "http 0.2.1", + "http 0.2.2", "http-body 0.3.1", "hyper", "hyper-tls", @@ -3548,6 +3548,7 @@ dependencies = [ "rustls", "serde", "serde_json", + "snafu", "structopt", "strum", "strum_macros", @@ -3867,6 +3868,7 @@ dependencies = [ "composer", "dyn-clonable", "futures", + "http 0.2.2", "humantime 2.0.1", "lazy_static", "mbus_api", @@ -3879,6 +3881,7 @@ dependencies = [ "state", "structopt", "tokio", + "tonic", "tracing", "tracing-futures", "tracing-subscriber", @@ -4502,7 +4505,7 @@ dependencies = [ "bytes 0.5.6", "futures-core", "futures-util", - "http 0.2.1", + "http 0.2.2", "http-body 0.3.1", "hyper", "percent-encoding 1.0.1", diff --git a/mbus-api/src/message_bus/v0.rs b/mbus-api/src/message_bus/v0.rs index ba2a7240f..f6362ff51 100644 --- a/mbus-api/src/message_bus/v0.rs +++ b/mbus-api/src/message_bus/v0.rs @@ -1,28 +1,168 @@ +// clippy warning caused by the instrument macro +#![allow(clippy::unit_arg)] + use crate::{v0::*, *}; use async_trait::async_trait; /// Error sending/receiving -pub type Error = crate::Error; +/// Common error type for send/receive +#[derive(Debug, Snafu, strum_macros::AsRefStr)] +#[allow(missing_docs)] +pub enum BusError { + #[snafu(display("Bus Internal error"))] + MessageBusError { source: Error }, + #[snafu(display("Resource not unique"))] + NotUnique, + #[snafu(display("Resource not found"))] + NotFound, +} + +impl From for BusError { + fn from(source: Error) -> Self { + BusError::MessageBusError { + source, + } + } +} + /// Result for sending/receiving -pub type BusResult = crate::BusResult; +pub type BusResult = Result; -/// Mayastor Node +/// Node pub type Node = crate::v0::Node; +/// Nodes list +pub type Nodes = crate::v0::Nodes; +/// Pool +pub type Pool = crate::v0::Pool; +/// Pool list +pub type Pools = crate::v0::Pools; +/// Replica +pub type Replica = crate::v0::Replica; +/// Replica list +pub type Replicas = crate::v0::Replicas; +/// Protocol +pub type Protocol = crate::v0::Protocol; +/// Replica Create +pub type CreateReplica = crate::v0::CreateReplica; +/// Pool Create +pub type CreatePool = crate::v0::CreatePool; +/// Replica Destroy +pub type DestroyReplica = crate::v0::DestroyReplica; +/// Pool Destroy +pub type DestroyPool = crate::v0::DestroyPool; +/// Replica Share +pub type ShareReplica = crate::v0::ShareReplica; +/// Replica Unshare +pub type UnshareReplica = crate::v0::UnshareReplica; +/// Query Filter +pub type Filter = crate::v0::Filter; + +macro_rules! only_one { + ($list:ident) => { + if let Some(obj) = $list.first() { + if $list.len() > 1 { + Err(BusError::NotUnique) + } else { + Ok(obj.clone()) + } + } else { + Err(BusError::NotFound) + } + }; +} /// Interface used by the rest service to interact with the control plane /// services via the message bus #[async_trait] pub trait MessageBusTrait: Sized { /// Get all known nodes from the registry - #[tracing::instrument(level = "info")] + #[tracing::instrument(level = "debug", err)] async fn get_nodes() -> BusResult> { - GetNodes {}.request().await.map(|v| v.0) + Ok(GetNodes {}.request().await?.into_inner()) } - /// Get a node through its id - #[tracing::instrument(level = "info")] - async fn get_node(id: String) -> BusResult> { + + /// Get node with `id` + #[tracing::instrument(level = "debug", err)] + async fn get_node(id: &str) -> BusResult { let nodes = Self::get_nodes().await?; - Ok(nodes.into_iter().find(|n| n.id == id)) + let nodes = + nodes.into_iter().filter(|n| n.id == id).collect::>(); + only_one!(nodes) + } + + /// Get pool with filter + #[tracing::instrument(level = "debug", err)] + async fn get_pool(filter: Filter) -> BusResult { + let pools = Self::get_pools(filter).await?; + only_one!(pools) + } + + /// Get pools with filter + #[tracing::instrument(level = "debug", err)] + async fn get_pools(filter: Filter) -> BusResult> { + let pools = GetPools { + filter, + } + .request() + .await?; + Ok(pools.into_inner()) + } + + /// create pool + #[tracing::instrument(level = "debug", err)] + async fn create_pool(request: CreatePool) -> BusResult { + Ok(request.request().await?) + } + + /// destroy pool + #[tracing::instrument(level = "debug", err)] + async fn destroy_pool(request: DestroyPool) -> BusResult<()> { + request.request().await?; + Ok(()) + } + + /// Get replica with filter + #[tracing::instrument(level = "debug", err)] + async fn get_replica(filter: Filter) -> BusResult { + let replicas = Self::get_replicas(filter).await?; + only_one!(replicas) + } + + /// Get replicas with filter + #[tracing::instrument(level = "debug", err)] + async fn get_replicas(filter: Filter) -> BusResult> { + let replicas = GetReplicas { + filter, + } + .request() + .await?; + Ok(replicas.into_inner()) + } + + /// create replica + #[tracing::instrument(level = "debug", err)] + async fn create_replica(request: CreateReplica) -> BusResult { + Ok(request.request().await?) + } + + /// destroy replica + #[tracing::instrument(level = "debug", err)] + async fn destroy_replica(request: DestroyReplica) -> BusResult<()> { + request.request().await?; + Ok(()) + } + + /// create replica + #[tracing::instrument(level = "debug", err)] + async fn share_replica(request: ShareReplica) -> BusResult { + Ok(request.request().await?) + } + + /// create replica + #[tracing::instrument(level = "debug", err)] + async fn unshare_replica(request: UnshareReplica) -> BusResult<()> { + let _ = request.request().await?; + Ok(()) } } @@ -118,14 +258,14 @@ mod tests { state: NodeState::Online, } ); - let node = MessageBus::get_node(mayastor.to_string()).await?; + let node = MessageBus::get_node(mayastor).await?; assert_eq!( node, - Some(Node { + Node { id: mayastor.to_string(), grpc_endpoint: "0.0.0.0:10124".to_string(), state: NodeState::Online, - }) + } ); test.stop("mayastor").await?; diff --git a/mbus-api/src/v0.rs b/mbus-api/src/v0.rs index fb850e3b2..67753445b 100644 --- a/mbus-api/src/v0.rs +++ b/mbus-api/src/v0.rs @@ -13,6 +13,8 @@ pub enum ChannelVs { Registry, /// Node Service which exposes the registered mayastor instances Node, + /// Pool Service which manages mayastor pools and replicas + Pool, /// Keep it In Sync Service Kiiss, } @@ -47,6 +49,24 @@ pub enum MessageIdVs { /// Node Service /// Get all node information GetNodes, + /// Pool Service + /// + /// Get pools with filter + GetPools, + /// Create Pool, + CreatePool, + /// Destroy Pool, + DestroyPool, + /// Get replicas with filter + GetReplicas, + /// Create Replica, + CreateReplica, + /// Destroy Replica, + DestroyReplica, + /// Share Replica, + ShareReplica, + /// Unshare Replica, + UnshareReplica, } // Only V0 should export this macro @@ -180,3 +200,276 @@ pub struct Node { bus_impl_vector_request!(Nodes, Node); bus_impl_message_all!(GetNodes, GetNodes, Nodes, Node); + +/// Filter Objects based on one of the following criteria +/// # Example: +/// // Get all nexuses from the node `node_id` +/// let nexuses = +/// MessageBus::get_nexuses(Filter::Node(node_id)).await.unwrap(); +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum Filter { + /// All objects + None, + /// Filter by Node id + Node(String), + /// Pool filters + /// + /// Filter by Pool id + Pool(String), + /// Filter by Node and Pool id + NodePool(String, String), + /// Filter by Node and Replica id + NodeReplica(String, String), + /// Filter by Node, Pool and Replica id + NodePoolReplica(String, String, String), + /// Filter by Pool and Replica id + PoolReplica(String, String), + /// Filter by Replica id + Replica(String), +} +impl Default for Filter { + fn default() -> Self { + Self::None + } +} + +/// Pool Service +/// Get all the pools from specific node or None for all nodes +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct GetPools { + /// Filter request + pub filter: Filter, +} + +/// State of the Pool +#[derive( + Serialize, Deserialize, Debug, Clone, EnumString, ToString, Eq, PartialEq, +)] +pub enum PoolState { + /// unknown state + Unknown = 0, + /// the pool is in normal working order + Online = 1, + /// the pool has experienced a failure but can still function + Degraded = 2, + /// the pool is completely inaccessible + Faulted = 3, +} + +impl Default for PoolState { + fn default() -> Self { + Self::Unknown + } +} +impl From for PoolState { + fn from(src: i32) -> Self { + match src { + 1 => Self::Online, + 2 => Self::Degraded, + 3 => Self::Faulted, + _ => Self::Unknown, + } + } +} + +/// Pool information +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Pool { + /// id of the mayastor instance + pub node: String, + /// name of the pool + pub name: String, + /// absolute disk paths claimed by the pool + pub disks: Vec, + /// current state of the pool + pub state: PoolState, + /// size of the pool in bytes + pub capacity: u64, + /// used bytes from the pool + pub used: u64, +} + +/// Create Pool Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct CreatePool { + /// id of the mayastor instance + pub node: String, + /// name of the pool + pub name: String, + /// disk device paths or URIs to be claimed by the pool + pub disks: Vec, +} +bus_impl_message_all!(CreatePool, CreatePool, Pool, Pool); + +/// Destroy Pool Request +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct DestroyPool { + /// id of the mayastor instance + pub node: String, + /// name of the pool + pub name: String, +} +bus_impl_message_all!(DestroyPool, DestroyPool, (), Pool); + +bus_impl_vector_request!(Pools, Pool); +bus_impl_message_all!(GetPools, GetPools, Pools, Pool); + +/// Get all the replicas from specific node and pool +/// or None for all nodes or all pools +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct GetReplicas { + /// Filter request + pub filter: Filter, +} + +/// Replica information +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Replica { + /// id of the mayastor instance + pub node: String, + /// uuid of the replica + pub uuid: String, + /// name of the pool + pub pool: String, + /// thin provisioning + pub thin: bool, + /// size of the replica in bytes + pub size: u64, + /// protocol used for exposing the replica + pub share: Protocol, + /// uri usable by nexus to access it + pub uri: String, +} + +bus_impl_vector_request!(Replicas, Replica); +bus_impl_message_all!(GetReplicas, GetReplicas, Replicas, Pool); + +/// Create Replica Request +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CreateReplica { + /// id of the mayastor instance + pub node: String, + /// uuid of the replica + pub uuid: String, + /// name of the pool + pub pool: String, + /// size of the replica in bytes + pub size: u64, + /// thin provisioning + pub thin: bool, + /// protocol to expose the replica over + pub share: Protocol, +} +bus_impl_message_all!(CreateReplica, CreateReplica, Replica, Pool); + +/// Destroy Replica Request +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct DestroyReplica { + /// id of the mayastor instance + pub node: String, + /// name of the pool + pub pool: String, + /// uuid of the replica + pub uuid: String, +} +bus_impl_message_all!(DestroyReplica, DestroyReplica, (), Pool); + +/// Share Replica Request +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ShareReplica { + /// id of the mayastor instance + pub node: String, + /// name of the pool + pub pool: String, + /// uuid of the replica + pub uuid: String, + /// protocol used for exposing the replica + pub protocol: Protocol, +} +bus_impl_message_all!(ShareReplica, ShareReplica, String, Pool); + +/// Unshare Replica Request +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct UnshareReplica { + /// id of the mayastor instance + pub node: String, + /// name of the pool + pub pool: String, + /// uuid of the replica + pub uuid: String, +} +bus_impl_message_all!(UnshareReplica, UnshareReplica, (), Pool); + +/// Indicates what protocol the bdev is shared as +#[derive( + Serialize, Deserialize, Debug, Clone, EnumString, ToString, Eq, PartialEq, +)] +#[strum(serialize_all = "camelCase")] +#[serde(rename_all = "camelCase")] +pub enum Protocol { + /// not shared by any of the variants + Off = 0, + /// shared as NVMe-oF TCP + Nvmf = 1, + /// shared as iSCSI + Iscsi = 2, + /// shared as NBD + Nbd = 3, +} + +impl Default for Protocol { + fn default() -> Self { + Self::Off + } +} +impl From for Protocol { + fn from(src: i32) -> Self { + match src { + 0 => Self::Off, + 1 => Self::Nvmf, + 2 => Self::Iscsi, + _ => Self::Off, + } + } +} + +/// State of the Replica +#[derive( + Serialize, Deserialize, Debug, Clone, EnumString, ToString, Eq, PartialEq, +)] +#[strum(serialize_all = "camelCase")] +#[serde(rename_all = "camelCase")] +pub enum ReplicaState { + /// unknown state + Unknown = 0, + /// the pool is in normal working order + Online = 1, + /// the pool has experienced a failure but can still function + Degraded = 2, + /// the pool is completely inaccessible + Faulted = 3, +} + +impl Default for ReplicaState { + fn default() -> Self { + Self::Unknown + } +} +impl From for ReplicaState { + fn from(src: i32) -> Self { + match src { + 1 => Self::Online, + 2 => Self::Degraded, + 3 => Self::Faulted, + _ => Self::Unknown, + } + } +} diff --git a/rest/Cargo.toml b/rest/Cargo.toml index 37f28db9c..e4d556482 100644 --- a/rest/Cargo.toml +++ b/rest/Cargo.toml @@ -28,6 +28,7 @@ tracing-futures = "0.2.4" strum = "0.19" strum_macros = "0.19" anyhow = "1.0.32" +snafu = "0.6" [dev-dependencies] composer = { path = "../composer" } diff --git a/rest/service/src/main.rs b/rest/service/src/main.rs index 2d0933e6a..7cb5bee7f 100644 --- a/rest/service/src/main.rs +++ b/rest/service/src/main.rs @@ -1,14 +1,6 @@ -use mbus_api::message_bus::v0::{MessageBus, *}; +mod v0; -use actix_web::{ - get, - middleware, - web, - App, - HttpResponse, - HttpServer, - Responder, -}; +use actix_web::{middleware, App, HttpServer}; use rustls::{ internal::pemfile::{certs, rsa_private_keys}, NoClientAuth, @@ -29,29 +21,6 @@ struct CliArgs { nats: String, } -#[get("/v0/nodes")] -async fn get_nodes() -> impl Responder { - match MessageBus::get_nodes().await { - Ok(nodes) => HttpResponse::Ok().json(nodes), - Err(error) => { - let error = serde_json::json!({"error": error.to_string()}); - HttpResponse::InternalServerError().json(error) - } - } -} - -#[get("/v0/nodes/{id}")] -async fn get_node(web::Path(node_id): web::Path) -> impl Responder { - match MessageBus::get_node(node_id).await { - Ok(Some(node)) => HttpResponse::Ok().json(node), - Ok(None) => HttpResponse::NoContent().json(()), - Err(error) => { - let error = serde_json::json!({"error": error.to_string()}); - HttpResponse::InternalServerError().json(error) - } - } -} - fn init_tracing() { if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(filter).init(); @@ -80,8 +49,9 @@ async fn main() -> std::io::Result<()> { HttpServer::new(move || { App::new() .wrap(middleware::Logger::default()) - .service(get_nodes) - .service(get_node) + .service(v0::nodes::factory()) + .service(v0::pools::factory()) + .service(v0::replicas::factory()) }) .bind_rustls(CliArgs::from_args().rest, config)? .run() diff --git a/rest/service/src/v0/mod.rs b/rest/service/src/v0/mod.rs new file mode 100644 index 000000000..44b4ab46a --- /dev/null +++ b/rest/service/src/v0/mod.rs @@ -0,0 +1,16 @@ +//! Version 0 of the URI's +//! Ex: /v0/nodes + +pub mod nodes; +pub mod pools; +pub mod replicas; + +use mbus_api::{ + message_bus::v0::{MessageBus, *}, + v0::Filter, +}; +use rest_client::versions::v0::*; + +use actix_web::{delete, get, put, web, Responder}; + +use actix_web::dev::{AppService, HttpServiceFactory}; diff --git a/rest/service/src/v0/nodes.rs b/rest/service/src/v0/nodes.rs new file mode 100644 index 000000000..01d7e65f1 --- /dev/null +++ b/rest/service/src/v0/nodes.rs @@ -0,0 +1,21 @@ +use super::*; + +struct Factory {} +impl HttpServiceFactory for Factory { + fn register(self, config: &mut AppService) { + get_node.register(config); + get_nodes.register(config); + } +} +pub(crate) fn factory() -> impl HttpServiceFactory { + Factory {} +} + +#[get("/v0/nodes")] +async fn get_nodes() -> impl Responder { + RestRespond::result(MessageBus::get_nodes().await) +} +#[get("/v0/nodes/{id}")] +async fn get_node(web::Path(node_id): web::Path) -> impl Responder { + RestRespond::result(MessageBus::get_node(&node_id).await) +} diff --git a/rest/service/src/v0/pools.rs b/rest/service/src/v0/pools.rs new file mode 100644 index 000000000..e62814505 --- /dev/null +++ b/rest/service/src/v0/pools.rs @@ -0,0 +1,85 @@ +use super::*; + +struct Factory {} +impl HttpServiceFactory for Factory { + fn register(self, config: &mut AppService) { + get_pools.register(config); + get_pool.register(config); + get_node_pools.register(config); + get_node_pool.register(config); + put_node_pool.register(config); + del_node_pool.register(config); + del_pool.register(config); + } +} +pub(crate) fn factory() -> impl HttpServiceFactory { + Factory {} +} + +#[get("/v0/pools")] +async fn get_pools() -> impl Responder { + RestRespond::result(MessageBus::get_pools(Filter::None).await) +} + +#[get("/v0/pools/{id}")] +async fn get_pool(web::Path(pool_id): web::Path) -> impl Responder { + RestRespond::result(MessageBus::get_pool(Filter::Pool(pool_id)).await) +} + +#[get("/v0/nodes/{id}/pools")] +async fn get_node_pools( + web::Path(node_id): web::Path, +) -> impl Responder { + RestRespond::result(MessageBus::get_pools(Filter::Node(node_id)).await) +} + +#[get("/v0/nodes/{node_id}/pools/{pool_id}")] +async fn get_node_pool( + web::Path((node_id, pool_id)): web::Path<(String, String)>, +) -> impl Responder { + RestRespond::result( + MessageBus::get_pool(Filter::NodePool(node_id, pool_id)).await, + ) +} + +#[put("/v0/nodes/{node_id}/pools/{pool_id}")] +async fn put_node_pool( + web::Path((node_id, pool_id)): web::Path<(String, String)>, + create: web::Json, +) -> impl Responder { + let create = create.into_inner().bus_request(node_id, pool_id); + RestRespond::result(MessageBus::create_pool(create).await) +} + +#[delete("/v0/nodes/{node_id}/pools/{pool_id}")] +async fn del_node_pool( + web::Path((node_id, pool_id)): web::Path<(String, String)>, +) -> impl Responder { + destroy_pool(Filter::NodePool(node_id, pool_id)).await +} +#[delete("/v0/pools/{pool_id}")] +async fn del_pool(web::Path(pool_id): web::Path) -> impl Responder { + destroy_pool(Filter::Pool(pool_id)).await +} + +async fn destroy_pool(filter: Filter) -> impl Responder { + let destroy = match filter.clone() { + Filter::NodePool(node_id, pool_id) => DestroyPool { + node: node_id, + name: pool_id, + }, + Filter::Pool(pool_id) => { + let node_id = match MessageBus::get_pool(filter).await { + Ok(pool) => pool.node, + Err(error) => return (RestError::from(error)).into(), + }; + DestroyPool { + node: node_id, + name: pool_id, + } + } + _ => return (RestError::from(BusError::NotFound)).into(), + }; + + RestRespond::result(MessageBus::destroy_pool(destroy).await) +} diff --git a/rest/service/src/v0/replicas.rs b/rest/service/src/v0/replicas.rs new file mode 100644 index 000000000..08e7f7841 --- /dev/null +++ b/rest/service/src/v0/replicas.rs @@ -0,0 +1,257 @@ +use super::*; + +struct Factory {} +impl HttpServiceFactory for Factory { + fn register(self, config: &mut AppService) { + get_replicas.register(config); + get_replica.register(config); + get_replica.register(config); + get_node_replicas.register(config); + get_node_pool_replicas.register(config); + get_node_pool_replica.register(config); + put_node_pool_replica.register(config); + put_pool_replica.register(config); + del_node_pool_replica.register(config); + del_pool_replica.register(config); + put_node_pool_replica_share.register(config); + put_pool_replica_share.register(config); + del_node_pool_replica_share.register(config); + del_pool_replica_share.register(config); + } +} +pub(crate) fn factory() -> impl HttpServiceFactory { + Factory {} +} + +#[get("/v0/replicas")] +async fn get_replicas() -> impl Responder { + RestRespond::result(MessageBus::get_replicas(Filter::None).await) +} +#[get("/v0/replicas/{id}")] +async fn get_replica( + web::Path(replica_id): web::Path, +) -> impl Responder { + RestRespond::result( + MessageBus::get_replica(Filter::Replica(replica_id)).await, + ) +} + +#[get("/v0/nodes/{id}/replicas")] +async fn get_node_replicas( + web::Path(node_id): web::Path, +) -> impl Responder { + RestRespond::result(MessageBus::get_replicas(Filter::Node(node_id)).await) +} + +#[get("/v0/nodes/{node_id}/pools/{pool_id}/replicas")] +async fn get_node_pool_replicas( + web::Path((node_id, pool_id)): web::Path<(String, String)>, +) -> impl Responder { + RestRespond::result( + MessageBus::get_replicas(Filter::NodePool(node_id, pool_id)).await, + ) +} +#[get("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}")] +async fn get_node_pool_replica( + web::Path((node_id, pool_id, replica_id)): web::Path<( + String, + String, + String, + )>, +) -> impl Responder { + RestRespond::result( + MessageBus::get_replica(Filter::NodePoolReplica( + node_id, pool_id, replica_id, + )) + .await, + ) +} + +#[put("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}")] +async fn put_node_pool_replica( + web::Path((node_id, pool_id, replica_id)): web::Path<( + String, + String, + String, + )>, + create: web::Json, +) -> impl Responder { + put_replica( + Filter::NodePoolReplica(node_id, pool_id, replica_id), + create.into_inner(), + ) + .await +} +#[put("/v0/pools/{pool_id}/replicas/{replica_id}")] +async fn put_pool_replica( + web::Path((pool_id, replica_id)): web::Path<(String, String)>, + create: web::Json, +) -> impl Responder { + put_replica( + Filter::PoolReplica(pool_id, replica_id), + create.into_inner(), + ) + .await +} + +#[delete("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}")] +async fn del_node_pool_replica( + web::Path((node_id, pool_id, replica_id)): web::Path<( + String, + String, + String, + )>, +) -> impl Responder { + destroy_replica(Filter::NodePoolReplica(node_id, pool_id, replica_id)).await +} +#[delete("/v0/pools/{pool_id}/replicas/{replica_id}")] +async fn del_pool_replica( + web::Path((pool_id, replica_id)): web::Path<(String, String)>, +) -> impl Responder { + destroy_replica(Filter::PoolReplica(pool_id, replica_id)).await +} + +#[put("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}")] +async fn put_node_pool_replica_share( + web::Path((node_id, pool_id, replica_id, protocol)): web::Path<( + String, + String, + String, + Protocol, + )>, +) -> impl Responder { + share_replica( + Filter::NodePoolReplica(node_id, pool_id, replica_id), + protocol, + ) + .await +} +#[put("/v0/pools/{pool_id}/replicas/{replica_id}/share/{protocol}")] +async fn put_pool_replica_share( + web::Path((pool_id, replica_id, protocol)): web::Path<( + String, + String, + Protocol, + )>, +) -> impl Responder { + share_replica(Filter::PoolReplica(pool_id, replica_id), protocol).await +} + +#[delete("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share")] +async fn del_node_pool_replica_share( + web::Path((node_id, pool_id, replica_id)): web::Path<( + String, + String, + String, + )>, +) -> impl Responder { + unshare_replica(Filter::NodePoolReplica(node_id, pool_id, replica_id)).await +} +#[delete("/v0/pools/{pool_id}/replicas/{replica_id}/share")] +async fn del_pool_replica_share( + web::Path((pool_id, replica_id)): web::Path<(String, String)>, +) -> impl Responder { + unshare_replica(Filter::PoolReplica(pool_id, replica_id)).await +} + +async fn put_replica( + filter: Filter, + body: CreateReplicaBody, +) -> impl Responder { + let create = match filter.clone() { + Filter::NodePoolReplica(node_id, pool_id, replica_id) => { + body.bus_request(node_id, pool_id, replica_id) + } + Filter::PoolReplica(pool_id, replica_id) => { + let node_id = match MessageBus::get_replica(filter).await { + Ok(replica) => replica.node, + Err(error) => return (RestError::from(error)).into(), + }; + body.bus_request(node_id, pool_id, replica_id) + } + _ => return (RestError::from(BusError::NotFound)).into(), + }; + + RestRespond::result(MessageBus::create_replica(create).await) +} + +async fn destroy_replica(filter: Filter) -> impl Responder { + let destroy = match filter.clone() { + Filter::NodePoolReplica(node_id, pool_id, replica_id) => { + DestroyReplica { + node: node_id, + pool: pool_id, + uuid: replica_id, + } + } + Filter::PoolReplica(pool_id, replica_id) => { + let node_id = match MessageBus::get_replica(filter).await { + Ok(replica) => replica.node, + Err(error) => return (RestError::from(error)).into(), + }; + + DestroyReplica { + node: node_id, + pool: pool_id, + uuid: replica_id, + } + } + _ => return (RestError::from(BusError::NotFound)).into(), + }; + + RestRespond::result(MessageBus::destroy_replica(destroy).await) +} + +async fn share_replica(filter: Filter, protocol: Protocol) -> impl Responder { + let share = match filter.clone() { + Filter::NodePoolReplica(node_id, pool_id, replica_id) => ShareReplica { + node: node_id, + pool: pool_id, + uuid: replica_id, + protocol, + }, + Filter::PoolReplica(pool_id, replica_id) => { + let node_id = match MessageBus::get_replica(filter).await { + Ok(replica) => replica.node, + Err(error) => return (RestError::from(error)).into(), + }; + + ShareReplica { + node: node_id, + pool: pool_id, + uuid: replica_id, + protocol, + } + } + _ => return (RestError::from(BusError::NotFound)).into(), + }; + + RestRespond::result(MessageBus::share_replica(share).await) +} + +async fn unshare_replica(filter: Filter) -> impl Responder { + let unshare = match filter.clone() { + Filter::NodePoolReplica(node_id, pool_id, replica_id) => { + UnshareReplica { + node: node_id, + pool: pool_id, + uuid: replica_id, + } + } + Filter::PoolReplica(pool_id, replica_id) => { + let node_id = match MessageBus::get_replica(filter).await { + Ok(replica) => replica.node, + Err(error) => return (RestError::from(error)).into(), + }; + + UnshareReplica { + node: node_id, + pool: pool_id, + uuid: replica_id, + } + } + _ => return (RestError::from(BusError::NotFound)).into(), + }; + + RestRespond::result(MessageBus::unshare_replica(unshare).await) +} diff --git a/rest/src/lib.rs b/rest/src/lib.rs index 3629552ef..b1d30774f 100644 --- a/rest/src/lib.rs +++ b/rest/src/lib.rs @@ -14,7 +14,7 @@ /// expose different versions of the client pub mod versions; -use actix_web::client::Client; +use actix_web::{body::Body, client::Client}; use serde::Deserialize; use std::{io::BufReader, string::ToString}; @@ -54,9 +54,59 @@ impl ActixRestClient { let uri = format!("{}{}", self.url, urn); let mut rest_response = - self.client.get(uri).send().await.map_err(|error| { + self.client.get(uri.clone()).send().await.map_err(|error| { anyhow::anyhow!( - "Failed to get nodes from rest, err={:?}", + "Failed to get uri '{}' from rest, err={:?}", + uri, + error + ) + })?; + + let rest_body = rest_response.body().await?; + Ok(serde_json::from_slice::(&rest_body)?) + } + async fn put>( + &self, + urn: String, + body: B, + ) -> anyhow::Result + where + for<'de> R: Deserialize<'de>, + { + let uri = format!("{}{}", self.url, urn); + + let mut rest_response = self + .client + .put(uri.clone()) + .content_type("application/json") + .send_body(body) + .await + .map_err(|error| { + anyhow::anyhow!( + "Failed to put uri '{}' from rest, err={:?}", + uri, + error + ) + })?; + + let rest_body = rest_response.body().await?; + Ok(serde_json::from_slice::(&rest_body)?) + } + async fn del(&self, urn: String) -> anyhow::Result + where + for<'de> R: Deserialize<'de>, + { + let uri = format!("{}{}", self.url, urn); + + let mut rest_response = self + .client + .delete(uri.clone()) + .send() + .await + .map_err(|error| { + anyhow::anyhow!( + "Failed to delete uri '{}' from rest, err={:?}", + uri, error ) })?; diff --git a/rest/src/versions/v0.rs b/rest/src/versions/v0.rs index a28d6c3f9..a67f0a748 100644 --- a/rest/src/versions/v0.rs +++ b/rest/src/versions/v0.rs @@ -1,27 +1,147 @@ use super::super::ActixRestClient; +use actix_web::{body::Body, http::StatusCode, HttpResponse, ResponseError}; use async_trait::async_trait; -use std::string::ToString; +use mbus_api::{ + message_bus::{v0, v0::BusError}, + ErrorChain, +}; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Display, Formatter}, + string::ToString, +}; use strum_macros::{self, Display}; /// Node from the node service -pub type Node = mbus_api::v0::Node; +pub type Node = v0::Node; /// Vector of Nodes from the node service -pub type Nodes = mbus_api::v0::Nodes; +pub type Nodes = v0::Nodes; +/// Pool from the node service +pub type Pool = v0::Pool; +/// Vector of Pools from the node service +pub type Pools = v0::Pools; +/// Replica +pub type Replica = v0::Replica; +/// Vector of Replicas from the node service +pub type Replicas = v0::Replicas; +/// Replica protocol +pub type Protocol = v0::Protocol; +/// Create Pool request +pub type CreatePool = v0::CreatePool; +/// Create Replica request +pub type CreateReplica = v0::CreateReplica; +/// Replica Destroy +pub type DestroyReplica = v0::DestroyReplica; +/// Pool Destroy +pub type DestroyPool = v0::DestroyPool; +/// Create Replica Body JSON +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct CreateReplicaBody { + /// size of the replica in bytes + pub size: u64, + /// thin provisioning + pub thin: bool, + /// protocol to expose the replica over + pub share: Protocol, +} +/// Create Pool Body JSON +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct CreatePoolBody { + /// disk device paths or URIs to be claimed by the pool + pub disks: Vec, +} +impl From for CreatePoolBody { + fn from(create: CreatePool) -> Self { + CreatePoolBody { + disks: create.disks, + } + } +} +impl CreatePoolBody { + /// convert into message bus type + pub fn bus_request( + &self, + node_id: String, + pool_id: String, + ) -> v0::CreatePool { + v0::CreatePool { + node: node_id, + name: pool_id, + disks: self.disks.clone(), + } + } +} +impl From for CreateReplicaBody { + fn from(create: CreateReplica) -> Self { + CreateReplicaBody { + size: create.size, + thin: create.thin, + share: create.share, + } + } +} +impl CreateReplicaBody { + /// convert into message bus type + pub fn bus_request( + &self, + node_id: String, + pool_id: String, + uuid: String, + ) -> v0::CreateReplica { + v0::CreateReplica { + node: node_id, + uuid, + pool: pool_id, + size: self.size, + thin: self.thin, + share: self.share.clone(), + } + } +} +/// Filter Nodes, Pools, Replicas +pub type Filter = v0::Filter; /// RestClient interface #[async_trait(?Send)] pub trait RestClient { /// Get all the known nodes async fn get_nodes(&self) -> anyhow::Result>; + /// Get all the known pools + async fn get_pools(&self, filter: Filter) -> anyhow::Result>; + /// Get all the known replicas + async fn get_replicas( + &self, + filter: Filter, + ) -> anyhow::Result>; + /// Create new pool with arguments + async fn create_pool(&self, args: CreatePool) -> anyhow::Result; + /// Create new replica with arguments + async fn create_replica( + &self, + args: CreateReplica, + ) -> anyhow::Result; + /// Destroy pool with arguments + async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()>; + /// Destroy replica with arguments + async fn destroy_replica(&self, args: DestroyReplica) + -> anyhow::Result<()>; } #[derive(Display, Debug)] +#[allow(clippy::enum_variant_names)] enum RestURNs { #[strum(serialize = "nodes")] GetNodes(Nodes), + #[strum(serialize = "pools")] + GetPools(Pools), + #[strum(serialize = "replicas")] + GetReplicas(Replicas), + /* does not work as expect as format! only takes literals... + * #[strum(serialize = "nodes/{}/pools/{}")] + * PutPool(Pool), */ } -macro_rules! get { +macro_rules! get_all { ($S:ident, $T:ident) => { $S.get( format!("/v0/{}", RestURNs::$T(Default::default()).to_string()), @@ -29,12 +149,116 @@ macro_rules! get { ) }; } +macro_rules! get_filter { + ($S:ident, $F:ident, $T:ident) => { + $S.get( + format!( + "/v0/{}", + get_filtered_urn($F, &RestURNs::$T(Default::default()))? + ), + RestURNs::$T, + ) + }; +} + +fn get_filtered_urn(filter: Filter, r: &RestURNs) -> anyhow::Result { + let urn = match r { + RestURNs::GetNodes(_) => match filter { + Filter::None => "nodes".to_string(), + Filter::Node(id) => format!("nodes/{}", id), + _ => return Err(anyhow::Error::msg("Invalid filter for Nodes")), + }, + RestURNs::GetPools(_) => match filter { + Filter::None => "pools".to_string(), + Filter::Node(id) => format!("nodes/{}/pools", id), + Filter::Pool(id) => format!("pools/{}", id), + Filter::NodePool(n, p) => format!("nodes/{}/pools/{}", n, p), + _ => return Err(anyhow::Error::msg("Invalid filter for pools")), + }, + RestURNs::GetReplicas(_) => match filter { + Filter::None => "replicas".to_string(), + Filter::Node(id) => format!("nodes/{}/replicas", id), + Filter::Pool(id) => format!("pools/{}/replicas", id), + Filter::Replica(id) => format!("replicas/{}", id), + Filter::NodePool(n, p) => { + format!("nodes/{}/pools/{}/replicas", n, p) + } + Filter::NodeReplica(n, r) => format!("nodes/{}/replicas/{}", n, r), + Filter::NodePoolReplica(n, p, r) => { + format!("nodes/{}/pools/{}/replicas/{}", n, p, r) + } + Filter::PoolReplica(p, r) => format!("pools/{}/replicas/{}", p, r), + }, + }; + + Ok(urn) +} #[async_trait(?Send)] impl RestClient for ActixRestClient { async fn get_nodes(&self) -> anyhow::Result> { - let nodes = get!(self, GetNodes).await?; - Ok(nodes.0) + let nodes = get_all!(self, GetNodes).await?; + Ok(nodes.into_inner()) + } + + async fn get_pools(&self, filter: Filter) -> anyhow::Result> { + let pools = get_filter!(self, filter, GetPools).await?; + Ok(pools.into_inner()) + } + + async fn get_replicas( + &self, + filter: Filter, + ) -> anyhow::Result> { + let replicas = get_filter!(self, filter, GetReplicas).await?; + Ok(replicas.into_inner()) + } + + async fn create_pool(&self, args: CreatePool) -> anyhow::Result { + let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + let pool = self.put(urn, CreatePoolBody::from(args)).await?; + Ok(pool) + } + + async fn create_replica( + &self, + args: CreateReplica, + ) -> anyhow::Result { + let urn = format!( + "/v0/nodes/{}/pools/{}/replicas/{}", + &args.node, &args.pool, &args.uuid + ); + let replica = self.put(urn, CreateReplicaBody::from(args)).await?; + Ok(replica) + } + + async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()> { + let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + self.del(urn).await?; + Ok(()) + } + + async fn destroy_replica( + &self, + args: DestroyReplica, + ) -> anyhow::Result<()> { + let urn = format!( + "/v0/nodes/{}/pools/{}/replicas/{}", + &args.node, &args.pool, &args.uuid + ); + self.del(urn).await?; + Ok(()) + } +} + +impl Into for CreatePoolBody { + fn into(self) -> Body { + Body::from(serde_json::to_value(self).unwrap()) + } +} +impl Into for CreateReplicaBody { + fn into(self) -> Body { + Body::from(serde_json::to_value(self).unwrap()) } } @@ -44,3 +268,98 @@ impl ActixRestClient { self.clone() } } + +/// Rest Error +#[derive(Debug)] +pub struct RestError { + kind: BusError, + message: String, +} + +impl RestError { + // todo: response type convention + fn get_resp_error(&self) -> HttpResponse { + match &self.kind { + BusError::NotFound => HttpResponse::NoContent().json(()), + BusError::NotUnique => { + let error = serde_json::json!({"error": self.kind.as_ref(), "message": self.message }); + tracing::error!("Got error: {}", error); + HttpResponse::InternalServerError().json(error) + } + BusError::MessageBusError { + source, + } => { + let error = serde_json::json!({"error": source.as_ref(), "message": source.full_string() }); + tracing::error!("Got error: {}", error); + HttpResponse::InternalServerError().json(error) + } + } + } +} +// used by the trait ResponseError only when the default error_response trait +// method is used. +impl Display for RestError { + fn fmt(&self, _: &mut Formatter<'_>) -> std::fmt::Result { + unimplemented!() + } +} +impl ResponseError for RestError { + fn status_code(&self) -> StatusCode { + self.get_resp_error().status() + } + fn error_response(&self) -> HttpResponse { + self.get_resp_error() + } +} +impl From for RestError { + fn from(kind: BusError) -> Self { + Self { + message: kind.to_string(), + kind, + } + } +} +impl Into for RestError { + fn into(self) -> HttpResponse { + self.get_resp_error() + } +} + +/// Respond using a message bus response Result +/// In case of success the Response is sent via the body of a HttpResponse with +/// StatusCode OK. +/// Otherwise, the RestError is returned, also as a HttpResponse/ResponseError. +#[derive(Debug)] +pub struct RestRespond(Result); + +// used by the trait ResponseError only when the default error_response trait +// method is used. +impl Display for RestRespond { + fn fmt(&self, _: &mut Formatter<'_>) -> std::fmt::Result { + unimplemented!() + } +} +impl RestRespond { + /// Respond with a Result + pub fn result(from: Result) -> HttpResponse { + let resp: Self = from.into(); + resp.into() + } + /// Respond T with success + pub fn ok(object: T) -> Result { + Ok(HttpResponse::Ok().json(object)) + } +} +impl Into> for Result { + fn into(self) -> RestRespond { + RestRespond(self.map_err(RestError::from)) + } +} +impl Into for RestRespond { + fn into(self) -> HttpResponse { + match self.0 { + Ok(resp) => HttpResponse::Ok().json(resp), + Err(error) => error.into(), + } + } +} diff --git a/rest/tests/test.rs b/rest/tests/test.rs index a5a7b902e..4204929b3 100644 --- a/rest/tests/test.rs +++ b/rest/tests/test.rs @@ -1,11 +1,13 @@ pub use composer::*; +use mbus_api::{message_bus_init_options, TimeoutOptions}; +use std::time::Duration; pub use tracing::info; fn init_tracing() { if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(filter).init(); } else { - tracing_subscriber::fmt().with_env_filter("info").init(); + tracing_subscriber::fmt().with_env_filter("debug,h2=info,bollard=info,hyper=info,trust_dns_resolver=info,rustls=info,tower_buffer=info").init(); } } @@ -14,8 +16,15 @@ pub fn init() { } pub async fn bus_init(nats: &str) -> Result<(), Box> { - tokio::time::timeout(std::time::Duration::from_secs(2), async { - mbus_api::message_bus_init(nats.into()).await + tokio::time::timeout(Duration::from_secs(2), async { + message_bus_init_options( + nats.into(), + TimeoutOptions::new() + .with_timeout(Duration::from_millis(150)) + .with_max_retries(10) + .with_timeout_backoff(Duration::from_millis(100)), + ) + .await }) .await?; Ok(()) diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index f37662f60..a6cb984d9 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -1,25 +1,36 @@ mod test; use mbus_api::{ - v0::{GetNodes, NodeState}, + v0::{GetNodes, GetPools, NodeState, PoolState}, Message, }; use rest_client::{versions::v0::*, ActixRestClient}; use rpc::mayastor::Null; use test::{Binary, Builder, ComposeTest, ContainerSpec}; +use tracing::info; async fn wait_for_node() -> Result<(), Box> { let _ = GetNodes {}.request().await?; Ok(()) } +async fn wait_for_pool() -> Result<(), Box> { + let _ = GetPools { + filter: Default::default(), + } + .request() + .await?; + Ok(()) +} // to avoid waiting for timeouts async fn orderly_start( test: &ComposeTest, ) -> Result<(), Box> { - test.start_containers(vec!["nats", "node", "rest"]).await?; + test.start_containers(vec!["nats", "node", "pool", "rest"]) + .await?; test::bus_init("localhost").await?; wait_for_node().await?; + wait_for_pool().await?; test.start("mayastor").await?; @@ -43,6 +54,7 @@ async fn client() -> Result<(), Box> { .with_portmap("4222", "4222"), ) .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) + .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) .add_container_spec( ContainerSpec::from_binary( "rest", @@ -54,9 +66,9 @@ async fn client() -> Result<(), Box> { "mayastor", Binary::from_dbg("mayastor") .with_nats("-n") - .with_args(vec!["-N", mayastor]), + .with_args(vec!["-N", mayastor]) + .with_args(vec!["-g", "10.1.0.6:10124"]), ) - .with_clean(true) .autorun(false) .build() .await?; @@ -84,10 +96,76 @@ async fn client_test( nodes.first().unwrap(), &Node { id: mayastor.to_string(), - grpc_endpoint: "0.0.0.0:10124".to_string(), + grpc_endpoint: "10.1.0.6:10124".to_string(), state: NodeState::Online, } ); + info!("Nodes: {:#?}", nodes); + let _ = client.get_pools(Filter::None).await?; + let pool = client.create_pool(CreatePool { + node: mayastor.to_string(), + name: "pooloop".to_string(), + disks: vec!["malloc:///malloc0?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()] + }).await?; + info!("Pools: {:#?}", pool); + assert_eq!( + pool, + Pool { + node: "node-test-name".to_string(), + name: "pooloop".to_string(), + disks: vec!["malloc:///malloc0?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()], + state: PoolState::Online, + capacity: 100663296, + used: 0, + } + ); + assert_eq!(Some(&pool), client.get_pools(Filter::None).await?.first()); + let _ = client.get_replicas(Filter::None).await?; + let replica = client + .create_replica(CreateReplica { + node: pool.node.clone(), + pool: pool.name.clone(), + uuid: "replica1".to_string(), + size: 12582912, /* actual size will be a multiple of 4MB so just + * create it like so */ + thin: true, + share: Protocol::Nvmf, + }) + .await?; + info!("Replica: {:#?}", replica); + assert_eq!( + replica, + Replica { + node: pool.node.clone(), + uuid: "replica1".to_string(), + pool: pool.name.clone(), + thin: false, + size: 12582912, + share: Protocol::Nvmf, + uri: "nvmf://10.1.0.6:8420/nqn.2019-05.io.openebs:replica1" + .to_string(), + } + ); + assert_eq!( + Some(&replica), + client.get_replicas(Filter::None).await?.first() + ); + client + .destroy_replica(DestroyReplica { + node: replica.node.clone(), + pool: replica.pool.clone(), + uuid: replica.uuid, + }) + .await?; + assert_eq!(client.get_replicas(Filter::None).await?.is_empty(), true); + client + .destroy_pool(DestroyPool { + node: pool.node.clone(), + name: pool.name, + }) + .await?; + assert_eq!(client.get_pools(Filter::None).await?.is_empty(), true); + test.stop("mayastor").await?; tokio::time::delay_for(std::time::Duration::from_millis(250)).await; assert!(client.get_nodes().await?.is_empty()); diff --git a/services/Cargo.toml b/services/Cargo.toml index 0c0e04c61..71edb0f70 100644 --- a/services/Cargo.toml +++ b/services/Cargo.toml @@ -12,6 +12,10 @@ path = "kiiss/src/server.rs" name = "node" path = "node/src/server.rs" +[[bin]] +name = "pool" +path = "pool/src/server.rs" + [lib] name = "common" path = "common/src/lib.rs" @@ -21,6 +25,7 @@ mbus_api = { path = "../mbus-api" } nats = "0.8" structopt = "0.3.15" tokio = { version = "0.2", features = ["full"] } +tonic = "0.1" futures = "0.3.6" serde_json = "1.0" async-trait = "0.1.36" @@ -33,10 +38,11 @@ state = "0.4.2" tracing = "0.1" tracing-subscriber = "0.2" tracing-futures = "0.2.4" +rpc = { path = "../rpc" } +http = "0.2.2" [dev-dependencies] composer = { path = "../composer" } -rpc = { path = "../rpc" } [dependencies.serde] features = ["derive"] diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index f5291c0fd..b3fed96f7 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -4,6 +4,9 @@ //! It's meant to facilitate the creation of services with a helper builder to //! subscribe handlers for different message identifiers. +/// wrapper for mayastor resources +pub mod wrapper; + use async_trait::async_trait; use dyn_clonable::clonable; use futures::{future::join_all, stream::StreamExt}; diff --git a/services/common/src/wrapper/mod.rs b/services/common/src/wrapper/mod.rs new file mode 100644 index 000000000..d9222c1c3 --- /dev/null +++ b/services/common/src/wrapper/mod.rs @@ -0,0 +1,4 @@ +//! Service backend for the message bus and gRPC + +/// Version 0 of the message bus types +pub mod v0; diff --git a/services/common/src/wrapper/v0/mod.rs b/services/common/src/wrapper/v0/mod.rs new file mode 100644 index 000000000..8bde247f1 --- /dev/null +++ b/services/common/src/wrapper/v0/mod.rs @@ -0,0 +1,81 @@ +//! Implementation of a service backend which interacts with +//! mayastor instances via gRPC and with the other services via the +//! message bus. + +mod registry; + +pub use pool::NodeWrapperPool; +pub use registry::Registry; + +use async_trait::async_trait; +use dyn_clonable::clonable; +use mbus_api::{ + message_bus::v0::{BusError, MessageBus, MessageBusTrait}, + v0::*, +}; +use rpc::mayastor::{mayastor_client::MayastorClient, Null}; +use snafu::{ResultExt, Snafu}; +use std::{ + collections::HashMap, + fmt::Debug, + marker::PhantomData, + str::FromStr, + sync::Arc, +}; +use tokio::sync::Mutex; +use tonic::transport::Channel; + +/// Common error type for send/receive +#[derive(Debug, Snafu)] +#[allow(missing_docs)] +pub enum SvcError { + #[snafu(display("Failed to get nodes from the node service"))] + BusGetNodes { source: BusError }, + #[snafu(display("Failed to get node '{}' from the node service", node))] + BusGetNode { source: BusError, node: String }, + #[snafu(display("Node '{}' is not online", node))] + NodeNotOnline { node: String }, + #[snafu(display("Failed to connect to node via gRPC"))] + GrpcConnect { source: tonic::transport::Error }, + #[snafu(display("Failed to list pools via gRPC"))] + GrpcListPools { source: tonic::Status }, + #[snafu(display("Failed to create pool via gRPC"))] + GrpcCreatePool { source: tonic::Status }, + #[snafu(display("Failed to destroy pool via gRPC"))] + GrpcDestroyPool { source: tonic::Status }, + #[snafu(display("Failed to list replicas via gRPC"))] + GrpcListReplicas { source: tonic::Status }, + #[snafu(display("Failed to create replica via gRPC"))] + GrpcCreateReplica { source: tonic::Status }, + #[snafu(display("Failed to destroy replica via gRPC"))] + GrpcDestroyReplica { source: tonic::Status }, + #[snafu(display("Failed to share replica via gRPC"))] + GrpcShareReplica { source: tonic::Status }, + #[snafu(display("Failed to unshare replica via gRPC"))] + GrpcUnshareReplica { source: tonic::Status }, + #[snafu(display("Node not found"))] + BusNodeNotFound { node_id: String }, + #[snafu(display("Pool not found"))] + BusPoolNotFound { pool_id: String }, + #[snafu(display("Invalid filter for pools"))] + InvalidFilter { filter: Filter }, + #[snafu(display("Failed to list nexuses via gRPC"))] + GrpcListNexuses { source: tonic::Status }, + #[snafu(display("Failed to create nexus via gRPC"))] + GrpcCreateNexus { source: tonic::Status }, + #[snafu(display("Failed to destroy nexus via gRPC"))] + GrpcDestroyNexus { source: tonic::Status }, + #[snafu(display("Failed to share nexus via gRPC"))] + GrpcShareNexus { source: tonic::Status }, + #[snafu(display("Failed to unshare nexus via gRPC"))] + GrpcUnshareNexus { source: tonic::Status }, + #[snafu(display("Failed to volume due to insufficient resources"))] + NotEnoughResources {}, + #[snafu(display("Invalid arguments"))] + InvalidArguments {}, + #[snafu(display("Not implemented"))] + NotImplemented {}, +} + +mod node_traits; +mod pool; diff --git a/services/common/src/wrapper/v0/node_traits.rs b/services/common/src/wrapper/v0/node_traits.rs new file mode 100644 index 000000000..d50d4c2cf --- /dev/null +++ b/services/common/src/wrapper/v0/node_traits.rs @@ -0,0 +1,225 @@ +use super::*; + +/// Context with the gRPC clients +pub struct GrpcContext { + pub client: MayaClient, +} +pub type MayaClient = MayastorClient; +impl GrpcContext { + pub async fn new(endpoint: String) -> Result { + let uri = format!("http://{}", endpoint); + let uri = http::uri::Uri::from_str(&uri).unwrap(); + let endpoint = tonic::transport::Endpoint::from(uri) + .timeout(std::time::Duration::from_secs(1)); + let client = MayaClient::connect(endpoint) + .await + .context(GrpcConnect {})?; + + Ok(Self { + client, + }) + } +} + +/// Trait for a Node Replica which can be implemented to interact with mayastor +/// node replicas either via gRPC or MBUS or with a service via MBUS +#[async_trait] +#[clonable] +pub trait NodeReplicaTrait: Send + Sync + Debug + Clone { + /// Fetch replicas on all pools via gRPC or MBUS + async fn fetch_replicas(&self) -> Result, SvcError>; + + /// Create a replica on a pool via gRPC or MBUS + async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result; + + /// Share a replica on a pool via gRPC or MBUS + async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result; + + /// Unshare a replica on a pool via gRPC or MBUS + async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError>; + + /// Destroy a replica on a pool via gRPC or MBUS + async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError>; + + /// Update internal replica list following a create + fn on_create_replica(&mut self, replica: &Replica); + /// Update internal replica list following a destroy + fn on_destroy_replica(&mut self, pool: &str, replica: &str); + /// Update internal replica list following an update + fn on_update_replica( + &mut self, + pool: &str, + replica: &str, + share: &Protocol, + uri: &str, + ); +} + +/// Trait for a Node Pool which can be implemented to interact with mayastor +/// node pools either via gRPC or MBUS or with a service via MBUS +#[async_trait] +#[clonable] +pub trait NodePoolTrait: Send + Sync + Debug + Clone { + /// Fetch all pools via gRPC or MBUS + async fn fetch_pools(&self) -> Result, SvcError>; + + /// Create a pool on a node via gRPC or MBUS + async fn create_pool(&self, request: &CreatePool) + -> Result; + + /// Destroy a pool on a node via gRPC or MBUS + async fn destroy_pool(&self, request: &DestroyPool) + -> Result<(), SvcError>; + + /// Update internal pool list following a create + async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]); + /// Update internal pool list following a destroy + fn on_destroy_pool(&mut self, pool: &str); +} + +/// Trait for a Node which can be implemented to interact with mayastor +/// node replicas either via gRPC or MBUS or with a service via MBUS +#[async_trait] +#[clonable] +pub trait NodeWrapperTrait: + Send + Sync + Debug + Clone + NodeReplicaTrait + NodePoolTrait +{ + /// New NodeWrapper for the node + #[allow(clippy::new_ret_no_self)] + async fn new(node: &str) -> Result + where + Self: Sized; + /// Fetch all nodes via the message bus + async fn fetch_nodes() -> Result, SvcError> + where + Self: Sized, + { + MessageBus::get_nodes().await.context(BusGetNodes {}) + } + + /// Get the internal id + fn id(&self) -> String; + /// Get the internal node + fn node(&self) -> Node; + /// Get the internal pools + fn pools(&self) -> Vec; + /// Get the internal pools wrapper + fn pools_wrapper(&self) -> Vec; + /// Get the internal replicas + fn replicas(&self) -> Vec; + + /// Check if the node is online + fn is_online(&self) -> bool; + /// Fallible Result used by operations that should only proceed with the + /// node online + fn online_only(&self) -> Result<(), SvcError> { + if !self.is_online() { + Err(SvcError::NodeNotOnline { + node: self.node().id, + }) + } else { + Ok(()) + } + } + + /// Update this node with the latest information from the message bus and + /// mayastor + async fn update(&mut self); + /// Set the node state + fn set_state(&mut self, state: NodeState); + + /// Get the gRPC context with the mayastor proto handle + async fn grpc_client(&self) -> Result { + self.online_only()?; + GrpcContext::new(self.node().grpc_endpoint.clone()).await + } +} +/// Handy Boxed NodeWrapperTrait +pub type NodeWrapper = Box; + +/// Wrapper over the message bus Pools +/// With the respective node and pool replicas +#[derive(Clone, Debug, Default, Eq, PartialEq)] +pub struct PoolWrapper { + pool: Pool, + replicas: Vec, +} + +impl PoolWrapper { + /// New Pool wrapper with the pool and replicas + pub fn new_from(pool: &Pool, replicas: &[Replica]) -> Self { + Self { + pool: pool.clone(), + replicas: replicas.into(), + } + } + + /// Get the internal pool + pub fn pool(&self) -> Pool { + self.pool.clone() + } + /// Get the pool uuid + pub fn uuid(&self) -> String { + self.pool.name.clone() + } + /// Get the pool node name + pub fn node(&self) -> String { + self.pool.node.clone() + } + /// Get the pool state + pub fn state(&self) -> PoolState { + self.pool.state.clone() + } + + /// Get the free space + pub fn free_space(&self) -> u64 { + if self.pool.capacity > self.pool.used { + self.pool.capacity - self.pool.used + } else { + // odd, let's report no free space available + 0 + } + } + + /// Set pool state as unknown + pub fn set_unknown(&mut self) { + self.pool.state = PoolState::Unknown; + } + + /// Get all replicas from this pool + pub fn replicas(&self) -> Vec { + self.replicas.clone() + } + + /// Add replica to list + pub fn added_replica(&mut self, replica: &Replica) { + self.replicas.push(replica.clone()) + } + /// Remove replica from list + pub fn removed_replica(&mut self, uuid: &str) { + self.replicas.retain(|replica| replica.uuid != uuid) + } + /// update replica from list + pub fn updated_replica(&mut self, uuid: &str, share: &Protocol, uri: &str) { + if let Some(replica) = self + .replicas + .iter_mut() + .find(|replica| replica.uuid == uuid) + { + replica.share = share.clone(); + replica.uri = uri.to_string(); + } + } +} diff --git a/services/common/src/wrapper/v0/pool.rs b/services/common/src/wrapper/v0/pool.rs new file mode 100644 index 000000000..79a269df0 --- /dev/null +++ b/services/common/src/wrapper/v0/pool.rs @@ -0,0 +1,364 @@ +use super::{node_traits::*, *}; + +/// Implementation of the trait NodeWrapperPool for the pool service +#[derive(Debug, Default, Clone)] +pub struct NodeWrapperPool { + node: Node, + pools: HashMap, +} + +#[async_trait] +impl NodePoolTrait for NodeWrapperPool { + /// Fetch all pools from this node via gRPC + async fn fetch_pools(&self) -> Result, SvcError> { + let mut ctx = self.grpc_client().await?; + let rpc_pools = ctx + .client + .list_pools(Null {}) + .await + .context(GrpcListPools {})?; + let rpc_pools = &rpc_pools.get_ref().pools; + let pools = rpc_pools + .iter() + .map(|p| rpc_pool_to_bus(p, self.node.id.clone())) + .collect(); + Ok(pools) + } + + /// Create a pool on the node via gRPC + async fn create_pool( + &self, + request: &CreatePool, + ) -> Result { + let mut ctx = self.grpc_client().await?; + let rpc_pool = ctx + .client + .create_pool(bus_pool_to_rpc(&request)) + .await + .context(GrpcCreatePool {})?; + + Ok(rpc_pool_to_bus(&rpc_pool.into_inner(), self.id())) + } + + /// Destroy a pool on the node via gRPC + async fn destroy_pool( + &self, + request: &DestroyPool, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client().await?; + let _ = ctx + .client + .destroy_pool(bus_pool_destroy_to_rpc(request)) + .await + .context(GrpcDestroyPool {})?; + + Ok(()) + } + + async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]) { + self.pools + .insert(pool.name.clone(), PoolWrapper::new_from(&pool, replicas)); + } + + fn on_destroy_pool(&mut self, pool: &str) { + self.pools.remove(pool); + } +} + +#[async_trait] +impl NodeReplicaTrait for NodeWrapperPool { + /// Fetch all replicas from this node via gRPC + async fn fetch_replicas(&self) -> Result, SvcError> { + let mut ctx = self.grpc_client().await?; + let rpc_pools = ctx + .client + .list_replicas(Null {}) + .await + .context(GrpcListPools {})?; + let rpc_pools = &rpc_pools.get_ref().replicas; + let pools = rpc_pools + .iter() + .map(|p| rpc_replica_to_bus(p, self.node.id.clone())) + .collect(); + Ok(pools) + } + + /// Create a replica on the pool via gRPC + async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result { + let mut ctx = self.grpc_client().await?; + let rpc_replica = ctx + .client + .create_replica(bus_replica_to_rpc(request)) + .await + .context(GrpcCreateReplica {})?; + + Ok(rpc_replica_to_bus(&rpc_replica.into_inner(), self.id())) + } + + /// Share a replica on the pool via gRPC + async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result { + let mut ctx = self.grpc_client().await?; + let share = ctx + .client + .share_replica(bus_replica_share_to_rpc(request)) + .await + .context(GrpcShareReplica {})?; + + Ok(share.into_inner().uri) + } + + /// Unshare a replica on the pool via gRPC + async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client().await?; + let _ = ctx + .client + .share_replica(bus_replica_unshare_to_rpc(request)) + .await + .context(GrpcUnshareReplica {})?; + + Ok(()) + } + + /// Destroy a replica on the pool via gRPC + async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client().await?; + let _ = ctx + .client + .destroy_replica(bus_replica_destroy_to_rpc(request)) + .await + .context(GrpcDestroyReplica {})?; + + Ok(()) + } + + fn on_create_replica(&mut self, replica: &Replica) { + if let Some(pool) = self.pools.get_mut(&replica.pool) { + pool.added_replica(replica); + } + } + + fn on_destroy_replica(&mut self, pool: &str, replica: &str) { + if let Some(pool) = self.pools.get_mut(pool) { + pool.removed_replica(replica) + } + } + + fn on_update_replica( + &mut self, + pool: &str, + replica: &str, + share: &Protocol, + uri: &str, + ) { + if let Some(pool) = self.pools.get_mut(pool) { + pool.updated_replica(replica, share, uri); + } + } +} + +#[async_trait] +impl NodeWrapperTrait for NodeWrapperPool { + async fn new(node: &str) -> Result { + Ok(Box::new(Self::new_wrapper(node).await?)) + } + + fn id(&self) -> String { + self.node.id.clone() + } + fn node(&self) -> Node { + self.node.clone() + } + fn pools(&self) -> Vec { + self.pools.values().map(|p| p.pool()).collect() + } + fn pools_wrapper(&self) -> Vec { + self.pools.values().cloned().collect() + } + fn replicas(&self) -> Vec { + self.pools + .values() + .map(|p| p.replicas()) + .flatten() + .collect() + } + fn is_online(&self) -> bool { + self.node.state == NodeState::Online + } + + async fn update(&mut self) { + match Self::new_wrapper(&self.node.id).await { + Ok(node) => { + let old_state = self.node.state.clone(); + *self = node; + if old_state != self.node.state { + tracing::error!( + "Node '{}' changed state from '{}' to '{}'", + self.node.id, + old_state.to_string(), + self.node.state.to_string() + ) + } + } + Err(error) => { + tracing::error!( + "Failed to update the node '{}', error: {}", + self.node.id, + error + ); + self.set_state(NodeState::Unknown); + } + } + } + fn set_state(&mut self, state: NodeState) { + if self.node.state != state { + tracing::info!( + "Node '{}' state is now {}", + self.node.id, + state.to_string() + ); + self.node.state = state; + for (_, pool) in self.pools.iter_mut() { + pool.set_unknown(); + } + } + } +} + +impl NodeWrapperPool { + /// Fetch node via the message bus + async fn fetch_node(node: &str) -> Result { + MessageBus::get_node(node).await.context(BusGetNode { + node, + }) + } + + /// New node wrapper for the pool service containing + /// a list of pools and replicas + async fn new_wrapper(node: &str) -> Result { + let mut node = Self { + // if we can't even fetch the node, then no point in proceeding + node: NodeWrapperPool::fetch_node(node).await?, + ..Default::default() + }; + + // if the node is not online, don't even bother trying to connect + if node.is_online() { + let pools = node.fetch_pools().await?; + let replicas = node.fetch_replicas().await?; + + for pool in &pools { + let replicas = replicas + .iter() + .filter(|r| r.pool == pool.name) + .cloned() + .collect::>(); + node.on_create_pool(pool, &replicas).await; + } + } + // we've got a node, but we might not have the full picture if it's + // offline + Ok(node) + } +} + +/// Helper methods to convert between the message bus types and the +/// mayastor gRPC types + +/// convert rpc pool to a message bus pool +fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: String) -> Pool { + Pool { + node: id, + name: rpc_pool.name.clone(), + disks: rpc_pool.disks.clone(), + state: rpc_pool.state.into(), + capacity: rpc_pool.capacity, + used: rpc_pool.used, + } +} + +/// convert rpc replica to a message bus replica +fn rpc_replica_to_bus( + rpc_replica: &rpc::mayastor::Replica, + id: String, +) -> Replica { + Replica { + node: id, + uuid: rpc_replica.uuid.clone(), + pool: rpc_replica.pool.clone(), + thin: rpc_replica.thin, + size: rpc_replica.size, + share: rpc_replica.share.into(), + uri: rpc_replica.uri.clone(), + } +} + +/// convert a message bus replica to an rpc replica +fn bus_replica_to_rpc( + request: &CreateReplica, +) -> rpc::mayastor::CreateReplicaRequest { + rpc::mayastor::CreateReplicaRequest { + uuid: request.uuid.clone(), + pool: request.pool.clone(), + thin: request.thin, + size: request.size, + share: request.share.clone() as i32, + } +} + +/// convert a message bus replica share to an rpc replica share +fn bus_replica_share_to_rpc( + request: &ShareReplica, +) -> rpc::mayastor::ShareReplicaRequest { + rpc::mayastor::ShareReplicaRequest { + uuid: request.uuid.clone(), + share: request.protocol.clone() as i32, + } +} + +/// convert a message bus replica unshare to an rpc replica unshare +fn bus_replica_unshare_to_rpc( + request: &UnshareReplica, +) -> rpc::mayastor::ShareReplicaRequest { + rpc::mayastor::ShareReplicaRequest { + uuid: request.uuid.clone(), + share: Protocol::Off as i32, + } +} + +/// convert a message bus replica share to an rpc replica share +fn bus_pool_to_rpc(request: &CreatePool) -> rpc::mayastor::CreatePoolRequest { + rpc::mayastor::CreatePoolRequest { + name: request.name.clone(), + disks: request.disks.clone(), + } +} + +/// convert a message bus replica destroy to an rpc replica destroy +fn bus_replica_destroy_to_rpc( + request: &DestroyReplica, +) -> rpc::mayastor::DestroyReplicaRequest { + rpc::mayastor::DestroyReplicaRequest { + uuid: request.uuid.clone(), + } +} + +/// convert a message bus pool destroy to an rpc pool destroy +fn bus_pool_destroy_to_rpc( + request: &DestroyPool, +) -> rpc::mayastor::DestroyPoolRequest { + rpc::mayastor::DestroyPoolRequest { + name: request.name.clone(), + } +} diff --git a/services/common/src/wrapper/v0/registry.rs b/services/common/src/wrapper/v0/registry.rs new file mode 100644 index 000000000..847658c01 --- /dev/null +++ b/services/common/src/wrapper/v0/registry.rs @@ -0,0 +1,400 @@ +use super::{node_traits::*, *}; + +/// When operating on a resource which is not found, determines whether to +/// Ignore/Fail the operation or try and fetch the latest version, if possible +#[derive(Clone, Debug, Eq, PartialEq)] +enum NotFoundPolicy { + #[allow(dead_code)] + Ignore, + Fetch, +} + +impl Default for NotFoundPolicy { + fn default() -> Self { + NotFoundPolicy::Fetch + } +} + +/// Registry with NodeWrapperTrait which allows us to get the resources either +/// via gRPC or message bus in a service specific way. +/// Event propagation from mayastor/services would be useful to avoid thrashing +/// mayastor instances with gRPC and services with message bus requests. For now +/// we update the the registry: +/// every `N` seconds as it queries the node service +/// for changes for every request that reaches the instances, it updates itself +/// with the result. +/// `T` is the specific type of the NodeWrapperTrait which allocates Node helper +/// Wrappers. +/// List operations list what the object has been built with or what the cache +/// has. Fetch operations make use of the node wrapper trait to fetch from +/// mayastor nodes/other services. +#[derive(Clone, Default, Debug)] +pub struct Registry { + nodes: Arc>>, + update_period: std::time::Duration, + not_found: NotFoundPolicy, + _t: PhantomData, +} + +impl Registry { + /// Create a new registry with the `period` for updates + pub fn new(period: std::time::Duration) -> Self { + Self { + update_period: period, + ..Default::default() + } + } + /// Start thread which updates the registry + pub fn start(&self) { + let registry = self.clone(); + tokio::spawn(async move { + registry.poller().await; + }); + } + + /// List all cached node wrappers + async fn list_nodes_wrapper(&self) -> Vec { + let nodes = self.nodes.lock().await; + nodes.values().cloned().collect() + } + + /// List all cached nodes + pub async fn list_nodes(&self) -> Vec { + let nodes = self.list_nodes_wrapper().await; + nodes.iter().map(|n| n.node()).collect() + } + + /// List all cached pools + pub async fn list_pools_wrapper(&self) -> Vec { + let nodes = self.nodes.lock().await; + nodes + .values() + .map(|node| node.pools_wrapper()) + .flatten() + .collect() + } + + /// Fetch all pools wrapper + pub async fn fetch_pools_wrapper(&self) -> Vec { + match T::fetch_nodes().await { + Ok(mut nodes) => { + for node in &mut nodes { + self.found_node(node).await; + } + } + Err(error) => { + tracing::error!( + "Failed to fetch the latest node information, '{}'", + error + ); + } + }; + + self.list_pools_wrapper().await + } + + /// List all cached pool wrappers + pub async fn list_pools(&self) -> Vec { + let nodes = self.nodes.lock().await; + nodes.values().map(|node| node.pools()).flatten().collect() + } + + /// List all cached pools from node + pub async fn list_node_pools(&self, node: &str) -> Vec { + let nodes = self.list_nodes_wrapper().await; + if let Some(node) = nodes.iter().find(|&n| n.id() == node) { + node.pools() + } else { + // or return error, node not found? + vec![] + } + } + + /// List all cached replicas + pub async fn list_replicas(&self) -> Vec { + let nodes = self.nodes.lock().await; + nodes + .values() + .map(|node| node.replicas()) + .flatten() + .collect() + } + + /// List all cached replicas from node + pub async fn list_node_replicas(&self, node: &str) -> Vec { + let nodes = self.list_nodes_wrapper().await; + if let Some(node) = nodes.iter().find(|&n| n.id() == node) { + node.replicas() + } else { + // or return error, node not found? + vec![] + } + } + + /// Create pool + pub async fn create_pool( + &self, + request: &CreatePool, + ) -> Result { + let pool = self + .get_node(&request.node) + .await? + .create_pool(request) + .await?; + self.on_pool_created(&pool).await; + Ok(pool) + } + + /// Get current list of known nodes + async fn get_known_nodes(&self, node_id: &str) -> Option { + let nodes = self.nodes.lock().await; + nodes.get(node_id).cloned() + } + /// Get node `node_id` + async fn get_node(&self, node_id: &str) -> Result { + let mut nodes = self.nodes.lock().await; + let node = match nodes.get(node_id) { + Some(node) => node.clone(), + None => { + if self.not_found == NotFoundPolicy::Fetch { + let node = T::new(node_id).await; + if let Ok(node) = node { + nodes.insert(node.id(), node.clone()); + node + } else { + return Err(SvcError::BusNodeNotFound { + node_id: node_id.to_string(), + }); + } + } else { + return Err(SvcError::BusNodeNotFound { + node_id: node_id.to_string(), + }); + } + } + }; + Ok(node) + } + /// Registry events on crud operations + async fn on_pool_created(&self, pool: &Pool) { + if let Ok(node) = self.get_node(&pool.node).await { + // most likely no replicas, but in case it's an "import" + // let's go ahead and fetch them + let replicas = node.fetch_replicas().await.unwrap_or_default(); + { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&pool.node); + if let Some(node) = node { + node.on_create_pool(pool, &replicas).await; + } + } + } + } + async fn on_pool_destroyed(&self, request: &DestroyPool) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&request.node); + if let Some(node) = node { + node.on_destroy_pool(&request.name) + } + } + async fn on_replica_added(&self, replica: &Replica) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&replica.node); + if let Some(node) = node { + node.on_create_replica(replica); + } + } + async fn on_replica_removed(&self, request: &DestroyReplica) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&request.node); + if let Some(node) = node { + node.on_destroy_replica(&request.pool, &request.uuid); + } + } + async fn reg_update_replica( + &self, + node: &str, + pool: &str, + id: &str, + share: &Protocol, + uri: &str, + ) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(node); + if let Some(node) = node { + node.on_update_replica(pool, id, share, uri); + } + } + + /// Destroy pool and update registry + pub async fn destroy_pool( + &self, + request: &DestroyPool, + ) -> Result<(), SvcError> { + let node = self.get_node(&request.node).await?; + node.destroy_pool(&request).await?; + self.on_pool_destroyed(&request).await; + Ok(()) + } + + /// Create replica and update registry + pub async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result { + let node = self.get_node(&request.node).await?; + let replica = node.create_replica(&request).await?; + self.on_replica_added(&replica).await; + Ok(replica) + } + + /// Destroy replica and update registry + pub async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError> { + let node = self.get_node(&request.node).await?; + node.destroy_replica(request).await?; + self.on_replica_removed(request).await; + Ok(()) + } + + /// Create replica and update registry + pub async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result { + let node = self.get_node(&request.node).await?; + let share = node.share_replica(request).await?; + self.reg_update_replica( + &request.node, + &request.pool, + &request.uuid, + &request.protocol, + &share, + ) + .await; + Ok(share) + } + + /// Create replica and update registry + pub async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError> { + let node = self.get_node(&request.node).await?; + node.unshare_replica(request).await?; + self.reg_update_replica( + &request.node, + &request.pool, + &request.uuid, + &Protocol::Off, + "", + ) + .await; + Ok(()) + } + + /// Found this node via the node service + /// Update its resource list or add it to the registry if not there yet + async fn found_node(&self, node: &Node) { + match &node.state { + NodeState::Online => { + self.add_or_update_node(node).await; + } + state => { + // if not online, then only update the node state if it already + // exists in the registry, and don't even try to + // add it + let mut registry = self.nodes.lock().await; + if let Some((_, existing_node)) = + registry.iter_mut().find(|(id, _)| id == &&node.id) + { + existing_node.set_state(state.clone()); + } + } + } + } + + /// Mark nodes as missing if they are no longer discoverable by the node + /// service + async fn mark_missing_nodes(&self, live_nodes: &[Node]) { + let mut registry = self.nodes.lock().await; + for (name, node) in registry.iter_mut() { + let found = live_nodes.iter().find(|n| &n.id == name); + // if a node from the registry is not found then mark it as missing + if found.is_none() { + node.set_state(NodeState::Unknown); + } + } + } + + /// Update node from the registry + async fn update_node(&self, mut node: NodeWrapper) { + // update all resources from the node: nexus, pools, etc... + // note this is done this way to avoid holding the lock whilst + // we're doing gRPC requests + node.update().await; + let mut registry = self.nodes.lock().await; + registry.insert(node.id(), node.clone()); + } + + /// Add new node to the registry + async fn add_node(&self, node: &Node) { + match T::new(&node.id).await { + Ok(node) => { + let mut registry = self.nodes.lock().await; + registry.insert(node.id(), node.clone()); + } + Err(error) => { + tracing::error!( + "Error when adding node '{}': {}", + node.id, + error + ); + } + } + } + + /// Add or update a node (depending on whether the registry it's already in + /// the registry or not) + async fn add_or_update_node(&self, node: &Node) { + let existing_node = self.get_known_nodes(&node.id).await; + if let Some(node) = existing_node { + self.update_node(node).await; + } else { + self.add_node(node).await; + } + } + + /// Poll the node service for the current nodes it knows about + /// and update our view of their resources by querying the specific + /// mayastor instances themselves + async fn poller(&self) { + loop { + // collect all the nodes from the node service and then collect + // all the nexus and pool information from the nodes themselves + // (depending on the specific trait implementations of T) + let found_nodes = T::fetch_nodes().await; + if let Ok(found_nodes) = found_nodes { + self.mark_missing_nodes(&found_nodes).await; + + for node in &found_nodes { + // todo: add "last seen online" kind of thing to the node to + // avoid retrying to connect to a crashed/missed node over + // and over again when the node service + // is not aware of this yet. + self.found_node(node).await; + } + } + + self.trace_all().await; + tokio::time::delay_for(self.update_period).await; + } + } + + async fn trace_all(&self) { + let registry = self.nodes.lock().await; + tracing::trace!("Registry update: {:?}", registry); + } +} diff --git a/services/node/src/server.rs b/services/node/src/server.rs index 9c6cf58e0..2a93379a9 100644 --- a/services/node/src/server.rs +++ b/services/node/src/server.rs @@ -200,6 +200,7 @@ async fn server(cli_args: CliArgs) { .with_subscription(ServiceHandler::::default()) .with_subscription(ServiceHandler::::default()) .with_channel(ChannelVs::Node) + .with_default_liveness() .with_subscription(ServiceHandler::::default()) .run() .await; diff --git a/services/pool/src/server.rs b/services/pool/src/server.rs new file mode 100644 index 000000000..eceda011f --- /dev/null +++ b/services/pool/src/server.rs @@ -0,0 +1,241 @@ +pub mod service; + +use async_trait::async_trait; +use common::*; +use mbus_api::{v0::*, *}; +use service::*; +use std::{convert::TryInto, marker::PhantomData}; +use structopt::StructOpt; +use tracing::info; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + nats: String, + + /// The period at which the registry updates its cache of all + /// resources from all nodes + #[structopt(long, short, default_value = "20s")] + period: humantime::Duration, +} + +/// Needed so we can implement the ServiceSubscriber trait for +/// the message types external to the crate +#[derive(Clone, Default)] +struct ServiceHandler { + data: PhantomData, +} + +macro_rules! impl_service_handler { + // RequestType is the message bus request type + // ServiceFnName is the name of the service function to route the request + // into + ($RequestType:ident, $ServiceFnName:ident) => { + #[async_trait] + impl ServiceSubscriber for ServiceHandler<$RequestType> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let request: ReceivedMessage<$RequestType> = + args.request.try_into()?; + + let service: &PoolSvc = args.context.get_state(); + let reply = service + .$ServiceFnName(&request.inner()) + .await + .map_err(|error| Error::ServiceError { + message: error.full_string(), + })?; + request.reply(reply).await + } + fn filter(&self) -> Vec { + vec![$RequestType::default().id()] + } + } + }; +} + +// todo: +// a service handler can actually specify a vector of message filters so could +// indeed do the filtering at our service specific code and have a single +// entrypoint here nexus +impl_service_handler!(GetPools, get_pools); +impl_service_handler!(GetReplicas, get_replicas); +impl_service_handler!(CreatePool, create_pool); +impl_service_handler!(DestroyPool, destroy_pool); +impl_service_handler!(CreateReplica, create_replica); +impl_service_handler!(DestroyReplica, destroy_replica); +impl_service_handler!(ShareReplica, share_replica); +impl_service_handler!(UnshareReplica, unshare_replica); + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[tokio::main] +async fn main() { + init_tracing(); + + let cli_args = CliArgs::from_args(); + info!("Using options: {:?}", &cli_args); + + server(cli_args).await; +} + +async fn server(cli_args: CliArgs) { + Service::builder(cli_args.nats, ChannelVs::Pool) + .connect() + .await + .with_shared_state(PoolSvc::new(cli_args.period.into())) + .with_default_liveness() + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .run() + .await; +} + +#[cfg(test)] +mod tests { + use super::*; + use composer::*; + use rpc::mayastor::Null; + + async fn wait_for_services() { + let _ = GetNodes {}.request().await.unwrap(); + Liveness {}.request_on(ChannelVs::Pool).await.unwrap(); + } + // to avoid waiting for timeouts + async fn orderly_start(test: &ComposeTest) { + test.start_containers(vec!["nats", "node", "pool"]) + .await + .unwrap(); + + test.connect_to_bus("nats").await; + wait_for_services().await; + + test.start("mayastor").await.unwrap(); + + let mut hdl = test.grpc_handle("mayastor").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.unwrap(); + } + + #[tokio::test] + async fn pool() { + let mayastor = "pool-test-name"; + let test = Builder::new() + .name("pool") + .add_container_bin( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + ) + .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) + .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_nats("-n") + .with_args(vec!["-N", mayastor]) + .with_args(vec!["-g", "10.1.0.5:10124"]), + ) + .with_default_tracing() + .autorun(false) + .build() + .await + .unwrap(); + + orderly_start(&test).await; + + let nodes = GetNodes {}.request().await.unwrap(); + tracing::info!("Nodes: {:?}", nodes); + + CreatePool { + node: mayastor.to_string(), + name: "pooloop".to_string(), + disks: vec!["malloc:///disk0?size_mb=100".into()], + } + .request() + .await + .unwrap(); + + let pools = GetPools::default().request().await.unwrap(); + tracing::info!("Pools: {:?}", pools); + + let replica = CreateReplica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + size: 12582912, /* actual size will be a multiple of 4MB so just + * create it like so */ + thin: true, + share: Protocol::Off, + } + .request() + .await + .unwrap(); + + let replicas = GetReplicas::default().request().await.unwrap(); + tracing::info!("Replicas: {:?}", replicas); + + assert_eq!( + replica, + Replica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + thin: false, + size: 12582912, + share: Protocol::Off, + uri: "bdev:///replica1".into() + } + ); + + let uri = ShareReplica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + protocol: Protocol::Nvmf, + } + .request() + .await + .unwrap(); + + let mut replica_updated = replica; + replica_updated.uri = uri; + replica_updated.share = Protocol::Nvmf; + let replica = GetReplicas::default().request().await.unwrap(); + let replica = replica.0.first().unwrap(); + assert_eq!(replica, &replica_updated); + + DestroyReplica { + node: mayastor.into(), + uuid: "replica1".into(), + pool: "pooloop".into(), + } + .request() + .await + .unwrap(); + + assert!(GetReplicas::default().request().await.unwrap().0.is_empty()); + + DestroyPool { + node: mayastor.into(), + name: "pooloop".into(), + } + .request() + .await + .unwrap(); + + assert!(GetPools::default().request().await.unwrap().0.is_empty()); + } +} diff --git a/services/pool/src/service.rs b/services/pool/src/service.rs new file mode 100644 index 000000000..9e15b5691 --- /dev/null +++ b/services/pool/src/service.rs @@ -0,0 +1,204 @@ +// clippy warning caused by the instrument macro +#![allow(clippy::unit_arg)] + +use super::*; +use common::wrapper::v0::*; + +/// Pool service implementation methods +#[derive(Clone, Debug, Default)] +pub(super) struct PoolSvc { + registry: Registry, +} + +impl PoolSvc { + /// New Service with the update `period` + pub fn new(period: std::time::Duration) -> Self { + let obj = Self { + registry: Registry::new(period), + }; + obj.start(); + obj + } + /// Start registry poller + fn start(&self) { + self.registry.start(); + } + + /// Get all pools from node or from all nodes + async fn get_node_pools( + &self, + node_id: Option, + ) -> Result, SvcError> { + Ok(match node_id { + None => self.registry.list_pools().await, + Some(node_id) => self.registry.list_node_pools(&node_id).await, + }) + } + + /// Get all replicas from node or from all nodes + async fn get_node_replicas( + &self, + node_id: Option, + ) -> Result, SvcError> { + Ok(match node_id { + None => self.registry.list_replicas().await, + Some(node_id) => self.registry.list_node_replicas(&node_id).await, + }) + } + + /// Get pools according to the filter + #[tracing::instrument(level = "debug", err)] + pub(super) async fn get_pools( + &self, + request: &GetPools, + ) -> Result { + let filter = request.filter.clone(); + Ok(Pools(match filter { + Filter::None => self.get_node_pools(None).await?, + Filter::Node(node_id) => self.get_node_pools(Some(node_id)).await?, + Filter::NodePool(node_id, pool_id) => { + let pools = self.get_node_pools(Some(node_id)).await?; + pools + .iter() + .filter(|&p| p.name == pool_id) + .cloned() + .collect() + } + Filter::Pool(pool_id) => { + let pools = self.get_node_pools(None).await?; + pools + .iter() + .filter(|&p| p.name == pool_id) + .cloned() + .collect() + } + _ => { + return Err(SvcError::InvalidFilter { + filter, + }) + } + })) + } + + /// Get replicas according to the filter + #[tracing::instrument(level = "debug", err)] + pub(super) async fn get_replicas( + &self, + request: &GetReplicas, + ) -> Result { + let filter = request.filter.clone(); + Ok(Replicas(match filter { + Filter::None => self.get_node_replicas(None).await?, + Filter::Node(node_id) => { + self.get_node_replicas(Some(node_id)).await? + } + Filter::NodePool(node_id, pool_id) => { + let replicas = self.get_node_replicas(Some(node_id)).await?; + replicas + .iter() + .filter(|&p| p.pool == pool_id) + .cloned() + .collect() + } + Filter::Pool(pool_id) => { + let replicas = self.get_node_replicas(None).await?; + replicas + .iter() + .filter(|&p| p.pool == pool_id) + .cloned() + .collect() + } + Filter::NodePoolReplica(node_id, pool_id, replica_id) => { + let replicas = self.get_node_replicas(Some(node_id)).await?; + replicas + .iter() + .filter(|&p| p.pool == pool_id && p.uuid == replica_id) + .cloned() + .collect() + } + Filter::NodeReplica(node_id, replica_id) => { + let replicas = self.get_node_replicas(Some(node_id)).await?; + replicas + .iter() + .filter(|&p| p.uuid == replica_id) + .cloned() + .collect() + } + Filter::PoolReplica(pool_id, replica_id) => { + let replicas = self.get_node_replicas(None).await?; + replicas + .iter() + .filter(|&p| p.pool == pool_id && p.uuid == replica_id) + .cloned() + .collect() + } + Filter::Replica(replica_id) => { + let replicas = self.get_node_replicas(None).await?; + replicas + .iter() + .filter(|&p| p.uuid == replica_id) + .cloned() + .collect() + } + _ => { + return Err(SvcError::InvalidFilter { + filter, + }) + } + })) + } + + /// Create replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result { + self.registry.create_replica(&request).await + } + + /// Destroy replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError> { + self.registry.destroy_replica(&request).await + } + + /// Create replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result { + self.registry.share_replica(&request).await + } + + /// Create replica + #[tracing::instrument(level = "debug", err)] + pub(super) async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError> { + self.registry.unshare_replica(&request).await + } + + /// Create pool + #[tracing::instrument(level = "debug", err)] + pub(super) async fn create_pool( + &self, + request: &CreatePool, + ) -> Result { + self.registry.create_pool(request).await + } + + /// Destroy pool + #[tracing::instrument(level = "debug", err)] + pub(super) async fn destroy_pool( + &self, + request: &DestroyPool, + ) -> Result<(), SvcError> { + self.registry.destroy_pool(request).await + } +} From da5282baa9350c5d7fdbf9f485179013ccd9b55c Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Fri, 11 Dec 2020 22:12:00 +0000 Subject: [PATCH 68/85] feat: add initial volume service and the rest ... server and client bindings. The volume service is comprised of the nexus and volume operations where the nexus is the "dumb" part of the service which creates nexuses where it's told and the volume is the slightly smarter version which attempts to decide where to create replicas, place nexuses etc... The volume will effectively be the brain of the composer testing and act as the equivalent to moac as csi controller plugin for mozart. todo: mozart will need a clean and simple way of starting all the required control plane services. Also, in the test do we want to "talk" to the services through rest or the message bus? --- Cargo.lock | 12 +- mayastor/Cargo.toml | 1 - mbus-api/src/mbus_nats.rs | 2 +- mbus-api/src/message_bus/v0.rs | 137 ++++++ mbus-api/src/v0.rs | 341 +++++++++++++- nix/pkgs/mayastor/default.nix | 2 +- rest/Cargo.toml | 1 + rest/service/src/main.rs | 3 + rest/service/src/v0/children.rs | 174 +++++++ rest/service/src/v0/mod.rs | 16 +- rest/service/src/v0/nexuses.rs | 113 +++++ rest/service/src/v0/volumes.rs | 58 +++ rest/src/lib.rs | 7 +- rest/src/versions/v0.rs | 372 +++++++++++++-- rest/tests/test.rs | 31 -- rest/tests/v0_test.rs | 199 +++++--- services/Cargo.toml | 7 +- services/common/src/wrapper/v0/mod.rs | 55 ++- services/common/src/wrapper/v0/node_traits.rs | 142 +++++- services/common/src/wrapper/v0/pool.rs | 3 + services/common/src/wrapper/v0/registry.rs | 125 +++++ services/common/src/wrapper/v0/volume.rs | 444 ++++++++++++++++++ services/volume/src/server.rs | 298 ++++++++++++ services/volume/src/service.rs | 333 +++++++++++++ 24 files changed, 2721 insertions(+), 155 deletions(-) create mode 100644 rest/service/src/v0/children.rs create mode 100644 rest/service/src/v0/nexuses.rs create mode 100644 rest/service/src/v0/volumes.rs delete mode 100644 rest/tests/test.rs create mode 100644 services/common/src/wrapper/v0/volume.rs create mode 100644 services/volume/src/server.rs create mode 100644 services/volume/src/service.rs diff --git a/Cargo.lock b/Cargo.lock index ed97bcbea..6807b3f5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,15 +439,6 @@ dependencies = [ "event-listener", ] -[[package]] -name = "async-mutex" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" -dependencies = [ - "event-listener", -] - [[package]] name = "async-net" version = "1.5.0" @@ -2489,7 +2480,6 @@ version = "0.1.0" dependencies = [ "ansi_term 0.12.1", "assert_matches", - "async-mutex", "async-task", "async-trait", "atty", @@ -3556,6 +3546,7 @@ dependencies = [ "tracing", "tracing-futures", "tracing-subscriber", + "url", ] [[package]] @@ -3885,6 +3876,7 @@ dependencies = [ "tracing", "tracing-futures", "tracing-subscriber", + "url", ] [[package]] diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index e36e9af6a..4b6c0fc0b 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -34,7 +34,6 @@ path = "src/bin/casperf.rs" [dependencies] ansi_term = "0.12" -async-mutex = "1.4.0" async-task = "4.0.2" async-trait = "0.1.36" atty = "0.2" diff --git a/mbus-api/src/mbus_nats.rs b/mbus-api/src/mbus_nats.rs index af4eacc9b..8c2d79668 100644 --- a/mbus-api/src/mbus_nats.rs +++ b/mbus-api/src/mbus_nats.rs @@ -47,7 +47,7 @@ pub fn bus() -> DynBus { Box::new( NATS_MSG_BUS .get() - .expect("Should be initialised before use") + .expect("Shared message bus should be initialised before use.") .clone(), ) } diff --git a/mbus-api/src/message_bus/v0.rs b/mbus-api/src/message_bus/v0.rs index f6362ff51..02bc40b0b 100644 --- a/mbus-api/src/message_bus/v0.rs +++ b/mbus-api/src/message_bus/v0.rs @@ -56,6 +56,40 @@ pub type ShareReplica = crate::v0::ShareReplica; pub type UnshareReplica = crate::v0::UnshareReplica; /// Query Filter pub type Filter = crate::v0::Filter; +/// Nexus from the volume service +pub type Nexus = crate::v0::Nexus; +/// Vector of Nexuses from the volume service +pub type Nexuses = crate::v0::Nexuses; +/// State of the nexus +pub type NexusState = crate::v0::NexusState; +/// Child of the nexus +pub type Child = crate::v0::Child; +/// State of the child +pub type ChildState = crate::v0::ChildState; +/// Nexus Create +pub type CreateNexus = crate::v0::CreateNexus; +/// Nexus Destroy +pub type DestroyNexus = crate::v0::DestroyNexus; +/// Nexus Share +pub type ShareNexus = crate::v0::ShareNexus; +/// Nexus Unshare +pub type UnshareNexus = crate::v0::UnshareNexus; +/// Remove Nexus Child +pub type RemoveNexusChild = crate::v0::RemoveNexusChild; +/// Add Nexus Child +pub type AddNexusChild = crate::v0::AddNexusChild; +/// Volume +pub type Volume = crate::v0::Volume; +/// Volumes +pub type Volumes = crate::v0::Volumes; +/// Add Volume +pub type CreateVolume = crate::v0::CreateVolume; +/// Delete Volume +pub type DestroyVolume = crate::v0::DestroyVolume; +/// Add Volume Nexus +pub type AddVolumeNexus = crate::v0::AddVolumeNexus; +/// Remove Volume Nexus +pub type RemoveVolumeNexus = crate::v0::RemoveVolumeNexus; macro_rules! only_one { ($list:ident) => { @@ -164,6 +198,109 @@ pub trait MessageBusTrait: Sized { let _ = request.request().await?; Ok(()) } + + /// Get nexuses with filter + #[tracing::instrument(level = "debug", err)] + async fn get_nexuses(filter: Filter) -> BusResult> { + let nexuses = GetNexuses { + filter, + } + .request() + .await?; + Ok(nexuses.into_inner()) + } + + /// Get nexus with filter + #[tracing::instrument(level = "debug", err)] + async fn get_nexus(filter: Filter) -> BusResult { + let nexuses = Self::get_nexuses(filter).await?; + only_one!(nexuses) + } + + /// create nexus + #[tracing::instrument(level = "debug", err)] + async fn create_nexus(request: CreateNexus) -> BusResult { + Ok(request.request().await?) + } + + /// destroy nexus + #[tracing::instrument(level = "debug", err)] + async fn destroy_nexus(request: DestroyNexus) -> BusResult<()> { + request.request().await?; + Ok(()) + } + + /// share nexus + #[tracing::instrument(level = "debug", err)] + async fn share_nexus(request: ShareNexus) -> BusResult { + Ok(request.request().await?) + } + + /// unshare nexus + #[tracing::instrument(level = "debug", err)] + async fn unshare_nexus(request: UnshareNexus) -> BusResult<()> { + request.request().await?; + Ok(()) + } + + /// add nexus child + #[tracing::instrument(level = "debug", err)] + #[allow(clippy::unit_arg)] + async fn add_nexus_child(request: AddNexusChild) -> BusResult { + Ok(request.request().await?) + } + + /// remove nexus child + #[tracing::instrument(level = "debug", err)] + #[allow(clippy::unit_arg)] + async fn remove_nexus_child(request: RemoveNexusChild) -> BusResult<()> { + request.request().await?; + Ok(()) + } + + /// Get volumes with filter + #[tracing::instrument(level = "debug", err)] + async fn get_volumes(filter: Filter) -> BusResult> { + let volumes = GetVolumes { + filter, + } + .request() + .await?; + Ok(volumes.into_inner()) + } + + /// Get volume with filter + #[tracing::instrument(level = "debug", err)] + async fn get_volume(filter: Filter) -> BusResult { + let volumes = Self::get_volumes(filter).await?; + only_one!(volumes) + } + + /// create volume + #[tracing::instrument(level = "debug", err)] + async fn create_volume(request: CreateVolume) -> BusResult { + Ok(request.request().await?) + } + + /// delete volume + #[tracing::instrument(level = "debug", err)] + async fn delete_volume(request: DestroyVolume) -> BusResult<()> { + request.request().await?; + Ok(()) + } + + /// add volume nexus + #[tracing::instrument(level = "debug", err)] + async fn add_volume_nexus(request: AddVolumeNexus) -> BusResult { + Ok(request.request().await?) + } + + /// remove volume nexus + #[tracing::instrument(level = "debug", err)] + async fn remove_volume_nexus(request: RemoveVolumeNexus) -> BusResult<()> { + request.request().await?; + Ok(()) + } } /// Implementation of the bus interface trait diff --git a/mbus-api/src/v0.rs b/mbus-api/src/v0.rs index 67753445b..9abd1ab35 100644 --- a/mbus-api/src/v0.rs +++ b/mbus-api/src/v0.rs @@ -1,6 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; +use std::{cmp::Ordering, fmt::Debug}; use strum_macros::{EnumString, ToString}; /// Versioned Channels @@ -15,6 +15,10 @@ pub enum ChannelVs { Node, /// Pool Service which manages mayastor pools and replicas Pool, + /// Volume Service which manages mayastor volumes + Volume, + /// Nexus Service which manages mayastor nexuses + Nexus, /// Keep it In Sync Service Kiiss, } @@ -67,6 +71,32 @@ pub enum MessageIdVs { ShareReplica, /// Unshare Replica, UnshareReplica, + /// Volume Service + /// + /// Get nexuses with filter + GetNexuses, + /// Create nexus + CreateNexus, + /// Destroy Nexus + DestroyNexus, + /// Share Nexus + ShareNexus, + /// Unshare Nexus + UnshareNexus, + /// Remove a child from its parent nexus + RemoveNexusChild, + /// Add a child to a nexus + AddNexusChild, + /// Get all volumes + GetVolumes, + /// Create Volume, + CreateVolume, + /// Delete Volume + DestroyVolume, + /// Add nexus to volume + AddVolumeNexus, + /// Remove nexus from volume + RemoveVolumeNexus, } // Only V0 should export this macro @@ -226,6 +256,16 @@ pub enum Filter { PoolReplica(String, String), /// Filter by Replica id Replica(String), + /// Volume filters + /// + /// Filter by Node and Nexus + NodeNexus(String, String), + /// Filter by Nexus + Nexus(String), + /// Filter by Node and Volume + NodeVolume(String, String), + /// Filter by Volume + Volume(String), } impl Default for Filter { fn default() -> Self { @@ -290,6 +330,38 @@ pub struct Pool { pub used: u64, } +// online > degraded > unknown/faulted +impl PartialOrd for PoolState { + fn partial_cmp(&self, other: &Self) -> Option { + match self { + PoolState::Unknown => match other { + PoolState::Unknown => None, + PoolState::Online => Some(Ordering::Less), + PoolState::Degraded => Some(Ordering::Less), + PoolState::Faulted => None, + }, + PoolState::Online => match other { + PoolState::Unknown => Some(Ordering::Greater), + PoolState::Online => Some(Ordering::Equal), + PoolState::Degraded => Some(Ordering::Greater), + PoolState::Faulted => Some(Ordering::Greater), + }, + PoolState::Degraded => match other { + PoolState::Unknown => Some(Ordering::Greater), + PoolState::Online => Some(Ordering::Less), + PoolState::Degraded => Some(Ordering::Equal), + PoolState::Faulted => Some(Ordering::Greater), + }, + PoolState::Faulted => match other { + PoolState::Unknown => None, + PoolState::Online => Some(Ordering::Less), + PoolState::Degraded => Some(Ordering::Less), + PoolState::Faulted => Some(Ordering::Equal), + }, + } + } +} + /// Create Pool Request #[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] #[serde(rename_all = "camelCase")] @@ -473,3 +545,270 @@ impl From for ReplicaState { } } } + +/// Volume Nexuses +/// +/// Get all the nexuses with a filter selection +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct GetNexuses { + /// Filter request + pub filter: Filter, +} + +/// Nexus information +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Nexus { + /// id of the mayastor instance + pub node: String, + /// uuid of the nexus + pub uuid: String, + /// size of the volume in bytes + pub size: u64, + /// current state of the nexus + pub state: NexusState, + /// array of children + pub children: Vec, + /// URI of the device for the volume (missing if not published). + /// Missing property and empty string are treated the same. + pub device_uri: String, + /// total number of rebuild tasks + pub rebuilds: u32, +} + +/// Child information +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Child { + /// uri of the child device + pub uri: String, + /// state of the child + pub state: ChildState, + /// current rebuild progress (%) + pub rebuild_progress: Option, +} + +/// Child State information +#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)] +pub enum ChildState { + /// Default Unknown state + Unknown = 0, + /// healthy and contains the latest bits + Online = 1, + /// rebuild is in progress (or other recoverable error) + Degraded = 2, + /// unrecoverable error (control plane must act) + Faulted = 3, +} +impl Default for ChildState { + fn default() -> Self { + Self::Unknown + } +} +impl From for ChildState { + fn from(src: i32) -> Self { + match src { + 1 => Self::Online, + 2 => Self::Degraded, + 3 => Self::Faulted, + _ => Self::Unknown, + } + } +} + +/// Nexus State information +#[derive( + Serialize, Deserialize, Debug, Clone, EnumString, ToString, Eq, PartialEq, +)] +pub enum NexusState { + /// Default Unknown state + Unknown = 0, + /// healthy and working + Online = 1, + /// not healthy but is able to serve IO (i.e. rebuild is in progress) + Degraded = 2, + /// broken and unable to serve IO + Faulted = 3, +} +impl Default for NexusState { + fn default() -> Self { + Self::Unknown + } +} +impl From for NexusState { + fn from(src: i32) -> Self { + match src { + 1 => Self::Online, + 2 => Self::Degraded, + 3 => Self::Faulted, + _ => Self::Unknown, + } + } +} + +bus_impl_vector_request!(Nexuses, Nexus); +bus_impl_message_all!(GetNexuses, GetNexuses, Nexuses, Nexus); + +/// Create Nexus Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct CreateNexus { + /// id of the mayastor instance + pub node: String, + /// this UUID will be set in as the UUID + pub uuid: String, + /// size of the device in bytes + pub size: u64, + /// replica can be iscsi and nvmf remote targets or a local spdk bdev + /// (i.e. bdev:///name-of-the-bdev). + /// + /// uris to the targets we connect to + pub children: Vec, +} +bus_impl_message_all!(CreateNexus, CreateNexus, Nexus, Nexus); + +/// Destroy Nexus Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct DestroyNexus { + /// id of the mayastor instance + pub node: String, + /// uuid of the nexus + pub uuid: String, +} +bus_impl_message_all!(DestroyNexus, DestroyNexus, (), Nexus); + +/// Share Nexus Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct ShareNexus { + /// id of the mayastor instance + pub node: String, + /// uuid of the nexus + pub uuid: String, + /// encryption key + pub key: Option, + /// share protocol + pub protocol: Protocol, +} +bus_impl_message_all!(ShareNexus, ShareNexus, String, Nexus); + +/// Unshare Nexus Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct UnshareNexus { + /// id of the mayastor instance + pub node: String, + /// uuid of the nexus + pub uuid: String, +} +bus_impl_message_all!(UnshareNexus, UnshareNexus, (), Nexus); + +/// Remove Child from Nexus Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct RemoveNexusChild { + /// id of the mayastor instance + pub node: String, + /// uuid of the nexus + pub nexus: String, + /// URI of the child device to be removed + pub uri: String, +} +bus_impl_message_all!(RemoveNexusChild, RemoveNexusChild, (), Nexus); + +/// Add child to Nexus Request +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct AddNexusChild { + /// id of the mayastor instance + pub node: String, + /// uuid of the nexus + pub nexus: String, + /// URI of the child device to be added + pub uri: String, + /// auto start rebuilding + pub auto_rebuild: bool, +} +bus_impl_message_all!(AddNexusChild, AddNexusChild, Child, Nexus); + +/// Volumes +/// +/// Volume information +#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Volume { + /// name of the volume + pub uuid: String, + /// size of the volume in bytes + pub size: u64, + /// current state of the volume + pub state: NexusState, + /// array of children nexuses + pub children: Vec, +} + +/// Get volumes +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct GetVolumes { + /// filter volumes + pub filter: Filter, +} +bus_impl_vector_request!(Volumes, Volume); +bus_impl_message_all!(GetVolumes, GetVolumes, Volumes, Volume); + +/// Create volume +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CreateVolume { + /// uuid of the volume + pub uuid: String, + /// size of the volume in bytes + pub size: u64, + /// number of children nexuses (ANA) + pub nexuses: u64, + /// number of replicas per nexus + pub replicas: u64, + /// only these nodes can be used for the replicas + #[serde(default)] + pub allowed_nodes: Vec, + /// preferred nodes for the replicas + #[serde(default)] + pub preferred_nodes: Vec, + /// preferred nodes for the nexuses + #[serde(default)] + pub preferred_nexus_nodes: Vec, +} +bus_impl_message_all!(CreateVolume, CreateVolume, Volume, Volume); + +/// Delete volume +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct DestroyVolume { + /// uuid of the volume + pub uuid: String, +} +bus_impl_message_all!(DestroyVolume, DestroyVolume, (), Volume); + +/// Add ANA Nexus to volume +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct AddVolumeNexus { + /// uuid of the volume + pub uuid: String, + /// preferred node id for the nexus + pub preferred_node: Option, +} +bus_impl_message_all!(AddVolumeNexus, AddVolumeNexus, Nexus, Volume); + +/// Add ANA Nexus to volume +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct RemoveVolumeNexus { + /// uuid of the volume + pub uuid: String, + /// id of the node where the nexus lives + pub node: Option, +} +bus_impl_message_all!(RemoveVolumeNexus, RemoveVolumeNexus, (), Volume); diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index b69c1988e..3cac55000 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -39,7 +39,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1jpp98vnshymzfm1rhm7hpkgkiah47k0xgpa8ywji1znsvp8wqsc"; + cargoSha256 = "127jpjmsqdhpbgkvp4q1j7xzmbp5d3allcnpcbwxmk701f2z3bmh"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/rest/Cargo.toml b/rest/Cargo.toml index e4d556482..08531806d 100644 --- a/rest/Cargo.toml +++ b/rest/Cargo.toml @@ -29,6 +29,7 @@ strum = "0.19" strum_macros = "0.19" anyhow = "1.0.32" snafu = "0.6" +url = "2.2.0" [dev-dependencies] composer = { path = "../composer" } diff --git a/rest/service/src/main.rs b/rest/service/src/main.rs index 7cb5bee7f..599a99f45 100644 --- a/rest/service/src/main.rs +++ b/rest/service/src/main.rs @@ -52,6 +52,9 @@ async fn main() -> std::io::Result<()> { .service(v0::nodes::factory()) .service(v0::pools::factory()) .service(v0::replicas::factory()) + .service(v0::nexuses::factory()) + .service(v0::children::factory()) + .service(v0::volumes::factory()) }) .bind_rustls(CliArgs::from_args().rest, config)? .run() diff --git a/rest/service/src/v0/children.rs b/rest/service/src/v0/children.rs new file mode 100644 index 000000000..d9b97e652 --- /dev/null +++ b/rest/service/src/v0/children.rs @@ -0,0 +1,174 @@ +use super::*; + +struct Factory {} +impl HttpServiceFactory for Factory { + fn register(self, config: &mut AppService) { + get_nexus_children.register(config); + get_nexus_child.register(config); + get_node_nexus_children.register(config); + get_node_nexus_child.register(config); + add_nexus_child.register(config); + add_node_nexus_child.register(config); + delete_nexus_child.register(config); + delete_node_nexus_child.register(config); + } +} +pub(crate) fn factory() -> impl HttpServiceFactory { + Factory {} +} + +#[get("/v0/nexuses/{nexus_id}/children")] +async fn get_nexus_children( + web::Path(nexus_id): web::Path, +) -> impl Responder { + get_children_response(Filter::Nexus(nexus_id)).await +} +#[get("/v0/nodes/{node_id}/nexuses/{nexus_id}/children")] +async fn get_node_nexus_children( + web::Path((node_id, nexus_id)): web::Path<(String, String)>, +) -> impl Responder { + get_children_response(Filter::NodeNexus(node_id, nexus_id)).await +} + +#[get("/v0/nexuses/{nexus_id}/children/{child_id:.*}")] +async fn get_nexus_child( + web::Path((nexus_id, child_id)): web::Path<(String, String)>, + req: HttpRequest, +) -> impl Responder { + get_child_response(child_id, req, Filter::Nexus(nexus_id)).await +} +#[get("/v0/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}")] +async fn get_node_nexus_child( + web::Path((node_id, nexus_id, child_id)): web::Path<( + String, + String, + String, + )>, + req: HttpRequest, +) -> impl Responder { + get_child_response(child_id, req, Filter::NodeNexus(node_id, nexus_id)) + .await +} + +#[put("/v0/nexuses/{nexus_id}/children/{child_id:.*}")] +async fn add_nexus_child( + web::Path((nexus_id, child_id)): web::Path<(String, String)>, + req: HttpRequest, +) -> impl Responder { + add_child_filtered(child_id, req, Filter::Nexus(nexus_id)).await +} +#[put("/v0/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}")] +async fn add_node_nexus_child( + web::Path((node_id, nexus_id, child_id)): web::Path<( + String, + String, + String, + )>, + req: HttpRequest, +) -> impl Responder { + add_child_filtered(child_id, req, Filter::NodeNexus(node_id, nexus_id)) + .await +} + +#[delete("/v0/nexuses/{nexus_id}/children/{child_id:.*}")] +async fn delete_nexus_child( + web::Path((nexus_id, child_id)): web::Path<(String, String)>, + req: HttpRequest, +) -> impl Responder { + delete_child_filtered(child_id, req, Filter::Nexus(nexus_id)).await +} +#[delete("/v0/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}")] +async fn delete_node_nexus_child( + web::Path((node_id, nexus_id, child_id)): web::Path<( + String, + String, + String, + )>, + req: HttpRequest, +) -> impl Responder { + delete_child_filtered(child_id, req, Filter::NodeNexus(node_id, nexus_id)) + .await +} + +async fn get_children_response( + filter: Filter, +) -> Result { + let nexus = MessageBus::get_nexus(filter).await?; + RestRespond::ok(nexus.children) +} + +async fn get_child_response( + child_id: String, + req: HttpRequest, + filter: Filter, +) -> Result { + let child_id = build_child_uri(child_id, req); + let nexus = MessageBus::get_nexus(filter).await?; + let child = find_nexus_child(&nexus, &child_id)?; + RestRespond::ok(child) +} + +fn find_nexus_child(nexus: &Nexus, child_uri: &str) -> Result { + if let Some(child) = nexus.children.iter().find(|&c| c.uri == child_uri) { + Ok(child.clone()) + } else { + Err(BusError::NotFound) + } +} + +async fn add_child_filtered( + child_id: String, + req: HttpRequest, + filter: Filter, +) -> impl Responder { + let child_uri = build_child_uri(child_id, req); + + let nexus = match MessageBus::get_nexus(filter).await { + Ok(nexus) => nexus, + Err(error) => return (RestError::from(error)).into(), + }; + + let create = AddNexusChild { + node: nexus.node, + nexus: nexus.uuid, + uri: child_uri, + auto_rebuild: true, + }; + RestRespond::result(MessageBus::add_nexus_child(create).await) +} + +async fn delete_child_filtered( + child_id: String, + req: HttpRequest, + filter: Filter, +) -> impl Responder { + let child_uri = build_child_uri(child_id, req); + + let nexus = match MessageBus::get_nexus(filter).await { + Ok(nexus) => nexus, + Err(error) => return (RestError::from(error)).into(), + }; + + let destroy = RemoveNexusChild { + node: nexus.node, + nexus: nexus.uuid, + uri: child_uri, + }; + RestRespond::result(MessageBus::remove_nexus_child(destroy).await) +} + +fn build_child_uri(child_id: String, req: HttpRequest) -> String { + match url::Url::parse(&child_id) { + Ok(_) => { + if req.query_string().is_empty() { + child_id + } else { + format!("{}?{}", child_id, req.query_string()) + } + } + _ => { + // not a URL, it's probably legacy, default to AIO + format!("aio://{}", child_id) + } + } +} diff --git a/rest/service/src/v0/mod.rs b/rest/service/src/v0/mod.rs index 44b4ab46a..acf3f4569 100644 --- a/rest/service/src/v0/mod.rs +++ b/rest/service/src/v0/mod.rs @@ -1,9 +1,12 @@ //! Version 0 of the URI's //! Ex: /v0/nodes +pub mod children; +pub mod nexuses; pub mod nodes; pub mod pools; pub mod replicas; +pub mod volumes; use mbus_api::{ message_bus::v0::{MessageBus, *}, @@ -11,6 +14,13 @@ use mbus_api::{ }; use rest_client::versions::v0::*; -use actix_web::{delete, get, put, web, Responder}; - -use actix_web::dev::{AppService, HttpServiceFactory}; +use actix_web::{ + delete, + dev::{AppService, HttpServiceFactory}, + get, + put, + web, + HttpRequest, + HttpResponse, + Responder, +}; diff --git a/rest/service/src/v0/nexuses.rs b/rest/service/src/v0/nexuses.rs new file mode 100644 index 000000000..4e333ad53 --- /dev/null +++ b/rest/service/src/v0/nexuses.rs @@ -0,0 +1,113 @@ +use super::*; + +struct Factory {} +impl HttpServiceFactory for Factory { + fn register(self, config: &mut AppService) { + get_nexuses.register(config); + get_nexus.register(config); + get_node_nexuses.register(config); + get_node_nexus.register(config); + put_node_nexus.register(config); + del_node_nexus.register(config); + del_nexus.register(config); + put_node_nexus_share.register(config); + del_node_nexus_share.register(config); + } +} +pub(crate) fn factory() -> impl HttpServiceFactory { + Factory {} +} + +#[get("/v0/nexuses")] +async fn get_nexuses() -> impl Responder { + RestRespond::result(MessageBus::get_nexuses(Filter::None).await) +} +#[get("/v0/nexuses/{nexus_id}")] +async fn get_nexus(web::Path(nexus_id): web::Path) -> impl Responder { + RestRespond::result(MessageBus::get_nexuses(Filter::Nexus(nexus_id)).await) +} + +#[get("/v0/nodes/{id}/nexuses")] +async fn get_node_nexuses( + web::Path(node_id): web::Path, +) -> impl Responder { + RestRespond::result(MessageBus::get_nexuses(Filter::Node(node_id)).await) +} +#[get("/v0/nodes/{node_id}/nexuses/{nexus_id}")] +async fn get_node_nexus( + web::Path((node_id, nexus_id)): web::Path<(String, String)>, +) -> impl Responder { + RestRespond::result( + MessageBus::get_nexus(Filter::NodeNexus(node_id, nexus_id)).await, + ) +} + +#[put("/v0/nodes/{node_id}/nexuses/{nexus_id}")] +async fn put_node_nexus( + web::Path((node_id, nexus_id)): web::Path<(String, String)>, + create: web::Json, +) -> impl Responder { + let create = create.into_inner().bus_request(node_id, nexus_id); + RestRespond::result(MessageBus::create_nexus(create).await) +} + +#[delete("/v0/nodes/{node_id}/nexuses/{nexus_id}")] +async fn del_node_nexus( + web::Path((node_id, nexus_id)): web::Path<(String, String)>, +) -> impl Responder { + destroy_nexus(Filter::NodeNexus(node_id, nexus_id)).await +} +#[delete("/v0/nexuses/{nexus_id}")] +async fn del_nexus(web::Path(nexus_id): web::Path) -> impl Responder { + destroy_nexus(Filter::Nexus(nexus_id)).await +} + +#[put("/v0/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}")] +async fn put_node_nexus_share( + web::Path((node_id, nexus_id, protocol)): web::Path<( + String, + String, + Protocol, + )>, +) -> impl Responder { + let share = ShareNexus { + node: node_id, + uuid: nexus_id, + key: None, + protocol, + }; + RestRespond::result(MessageBus::share_nexus(share).await) +} + +#[delete("/v0/nodes/{node_id}/nexuses/{nexus_id}/share")] +async fn del_node_nexus_share( + web::Path((node_id, nexus_id)): web::Path<(String, String)>, +) -> impl Responder { + let unshare = UnshareNexus { + node: node_id, + uuid: nexus_id, + }; + RestRespond::result(MessageBus::unshare_nexus(unshare).await) +} + +async fn destroy_nexus(filter: Filter) -> impl Responder { + let destroy = match filter.clone() { + Filter::NodeNexus(node_id, nexus_id) => DestroyNexus { + node: node_id, + uuid: nexus_id, + }, + Filter::Nexus(nexus_id) => { + let node_id = match MessageBus::get_nexus(filter).await { + Ok(nexus) => nexus.node, + Err(error) => return (RestError::from(error)).into(), + }; + DestroyNexus { + node: node_id, + uuid: nexus_id, + } + } + _ => return (RestError::from(BusError::NotFound)).into(), + }; + + RestRespond::result(MessageBus::destroy_nexus(destroy).await) +} diff --git a/rest/service/src/v0/volumes.rs b/rest/service/src/v0/volumes.rs new file mode 100644 index 000000000..0b32187ce --- /dev/null +++ b/rest/service/src/v0/volumes.rs @@ -0,0 +1,58 @@ +use super::*; + +struct Factory {} +impl HttpServiceFactory for Factory { + fn register(self, config: &mut AppService) { + get_volumes.register(config); + get_volume.register(config); + get_node_volumes.register(config); + get_node_volume.register(config); + put_volume.register(config); + del_nexus.register(config); + } +} +pub(crate) fn factory() -> impl HttpServiceFactory { + Factory {} +} + +#[get("/v0/volumes")] +async fn get_volumes() -> impl Responder { + RestRespond::result(MessageBus::get_volumes(Filter::None).await) +} + +#[get("/v0/volumes/{volume_id}")] +async fn get_volume(web::Path(volume_id): web::Path) -> impl Responder { + RestRespond::result(MessageBus::get_volume(Filter::Volume(volume_id)).await) +} + +#[get("/v0/nodes/{node_id}/volumes")] +async fn get_node_volumes( + web::Path(node_id): web::Path, +) -> impl Responder { + RestRespond::result(MessageBus::get_volumes(Filter::Node(node_id)).await) +} +#[get("/v0/nodes/{node_id}/volumes/{volume_id}")] +async fn get_node_volume( + web::Path((node_id, volume_id)): web::Path<(String, String)>, +) -> impl Responder { + RestRespond::result( + MessageBus::get_volume(Filter::NodeVolume(node_id, volume_id)).await, + ) +} + +#[put("/v0/volumes/{volume_id}")] +async fn put_volume( + web::Path(volume_id): web::Path, + create: web::Json, +) -> impl Responder { + let create = create.into_inner().bus_request(volume_id); + RestRespond::result(MessageBus::create_volume(create).await) +} + +#[delete("/v0/volumes/{volume_id}")] +async fn del_nexus(web::Path(volume_id): web::Path) -> impl Responder { + let request = DestroyVolume { + uuid: volume_id.to_string(), + }; + RestRespond::result(MessageBus::delete_volume(request).await) +} diff --git a/rest/src/lib.rs b/rest/src/lib.rs index b1d30774f..c8b5c63b6 100644 --- a/rest/src/lib.rs +++ b/rest/src/lib.rs @@ -47,7 +47,7 @@ impl ActixRestClient { url: url.to_string(), }) } - async fn get(&self, urn: String, _: fn(R) -> Y) -> anyhow::Result + async fn get_vec(&self, urn: String) -> anyhow::Result> where for<'de> R: Deserialize<'de>, { @@ -63,7 +63,10 @@ impl ActixRestClient { })?; let rest_body = rest_response.body().await?; - Ok(serde_json::from_slice::(&rest_body)?) + match serde_json::from_slice(&rest_body) { + Ok(result) => Ok(result), + Err(_) => Ok(vec![serde_json::from_slice::(&rest_body)?]), + } } async fn put>( &self, diff --git a/rest/src/versions/v0.rs b/rest/src/versions/v0.rs index a67f0a748..d160f1fd6 100644 --- a/rest/src/versions/v0.rs +++ b/rest/src/versions/v0.rs @@ -32,6 +32,10 @@ pub type CreatePool = v0::CreatePool; pub type CreateReplica = v0::CreateReplica; /// Replica Destroy pub type DestroyReplica = v0::DestroyReplica; +/// Replica Share +pub type ShareReplica = v0::ShareReplica; +/// Replica Unshare +pub type UnshareReplica = v0::UnshareReplica; /// Pool Destroy pub type DestroyPool = v0::DestroyPool; /// Create Replica Body JSON @@ -98,8 +102,119 @@ impl CreateReplicaBody { } } } -/// Filter Nodes, Pools, Replicas +/// Filter Nodes, Pools, Replicas, Nexuses pub type Filter = v0::Filter; +/// Nexus from the volume service +pub type Nexus = v0::Nexus; +/// Vector of Nexuses from the volume service +pub type Nexuses = v0::Nexuses; +/// State of the nexus +pub type NexusState = v0::NexusState; +/// Child of the nexus +pub type Child = v0::Child; +/// State of the child +pub type ChildState = v0::ChildState; +/// Nexus Create +pub type CreateNexus = v0::CreateNexus; +/// Nexus Destroy +pub type DestroyNexus = v0::DestroyNexus; +/// Nexus Share +pub type ShareNexus = v0::ShareNexus; +/// Nexus Unshare +pub type UnshareNexus = v0::UnshareNexus; + +/// Create Nexus Body JSON +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct CreateNexusBody { + /// size of the device in bytes + pub size: u64, + /// replica can be iscsi and nvmf remote targets or a local spdk bdev + /// (i.e. bdev:///name-of-the-bdev). + /// + /// uris to the targets we connect to + pub children: Vec, +} +impl From for CreateNexusBody { + fn from(create: CreateNexus) -> Self { + CreateNexusBody { + size: create.size, + children: create.children, + } + } +} +impl CreateNexusBody { + /// convert into message bus type + pub fn bus_request( + &self, + node_id: String, + nexus_id: String, + ) -> v0::CreateNexus { + v0::CreateNexus { + node: node_id, + uuid: nexus_id, + size: self.size, + children: self.children.clone(), + } + } +} +/// Remove Nexus Child +pub type RemoveNexusChild = v0::RemoveNexusChild; +/// Add Nexus Child +pub type AddNexusChild = v0::AddNexusChild; +/// Volume +pub type Volume = v0::Volume; +/// Volumes +pub type Volumes = v0::Volumes; +/// Add Volume +pub type CreateVolume = v0::CreateVolume; +/// Destroy Volume +pub type DestroyVolume = v0::DestroyVolume; + +/// Create Volume Body JSON +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct CreateVolumeBody { + /// size of the volume in bytes + pub size: u64, + /// number of children nexuses (ANA) + pub nexuses: u64, + /// number of replicas per nexus + pub replicas: u64, + /// only these nodes can be used for the replicas + #[serde(default)] + pub allowed_nodes: Vec, + /// preferred nodes for the replicas + #[serde(default)] + pub preferred_nodes: Vec, + /// preferred nodes for the nexuses + #[serde(default)] + pub preferred_nexus_nodes: Vec, +} +impl From for CreateVolumeBody { + fn from(create: CreateVolume) -> Self { + CreateVolumeBody { + size: create.size, + nexuses: create.nexuses, + replicas: create.replicas, + preferred_nodes: create.preferred_nodes, + allowed_nodes: create.allowed_nodes, + preferred_nexus_nodes: create.preferred_nexus_nodes, + } + } +} +impl CreateVolumeBody { + /// convert into message bus type + pub fn bus_request(&self, volume_id: String) -> CreateVolume { + CreateVolume { + uuid: volume_id, + size: self.size, + nexuses: self.nexuses, + replicas: self.replicas, + allowed_nodes: self.allowed_nodes.clone(), + preferred_nodes: self.preferred_nodes.clone(), + preferred_nexus_nodes: self.preferred_nexus_nodes.clone(), + } + } +} /// RestClient interface #[async_trait(?Send)] @@ -108,34 +223,78 @@ pub trait RestClient { async fn get_nodes(&self) -> anyhow::Result>; /// Get all the known pools async fn get_pools(&self, filter: Filter) -> anyhow::Result>; + /// Create new pool with arguments + async fn create_pool(&self, args: CreatePool) -> anyhow::Result; + /// Destroy pool with arguments + async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()>; /// Get all the known replicas async fn get_replicas( &self, filter: Filter, ) -> anyhow::Result>; - /// Create new pool with arguments - async fn create_pool(&self, args: CreatePool) -> anyhow::Result; /// Create new replica with arguments async fn create_replica( &self, args: CreateReplica, ) -> anyhow::Result; - /// Destroy pool with arguments - async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()>; /// Destroy replica with arguments async fn destroy_replica(&self, args: DestroyReplica) -> anyhow::Result<()>; + /// Share replica with arguments + async fn share_replica(&self, args: ShareReplica) + -> anyhow::Result; + /// Unshare replica with arguments + async fn unshare_replica(&self, args: UnshareReplica) + -> anyhow::Result<()>; + /// Get all the known pools + async fn get_nexuses(&self, filter: Filter) -> anyhow::Result>; + /// Create new nexus with arguments + async fn create_nexus(&self, args: CreateNexus) -> anyhow::Result; + /// Destroy nexus with arguments + async fn destroy_nexus(&self, args: DestroyNexus) -> anyhow::Result<()>; + /// Share nexus + async fn share_nexus(&self, args: ShareNexus) -> anyhow::Result; + /// Unshare nexus + async fn unshare_nexus(&self, args: UnshareNexus) -> anyhow::Result<()>; + /// Remove nexus child + async fn remove_nexus_child( + &self, + args: RemoveNexusChild, + ) -> anyhow::Result<()>; + /// Add nexus child + async fn add_nexus_child( + &self, + args: AddNexusChild, + ) -> anyhow::Result; + /// Get all children by filter + async fn get_nexus_children( + &self, + filter: Filter, + ) -> anyhow::Result>; + /// Get all volumes by filter + async fn get_volumes(&self, filter: Filter) -> anyhow::Result>; + /// Create volume + async fn create_volume(&self, args: CreateVolume) + -> anyhow::Result; + /// Destroy volume + async fn destroy_volume(&self, args: DestroyVolume) -> anyhow::Result<()>; } #[derive(Display, Debug)] #[allow(clippy::enum_variant_names)] enum RestURNs { #[strum(serialize = "nodes")] - GetNodes(Nodes), + GetNodes(Node), #[strum(serialize = "pools")] - GetPools(Pools), + GetPools(Pool), #[strum(serialize = "replicas")] - GetReplicas(Replicas), + GetReplicas(Replica), + #[strum(serialize = "nexuses")] + GetNexuses(Nexus), + #[strum(serialize = "children")] + GetChildren(Child), + #[strum(serialize = "volumes")] + GetVolumes(Volume), /* does not work as expect as format! only takes literals... * #[strum(serialize = "nodes/{}/pools/{}")] * PutPool(Pool), */ @@ -143,21 +302,18 @@ enum RestURNs { macro_rules! get_all { ($S:ident, $T:ident) => { - $S.get( - format!("/v0/{}", RestURNs::$T(Default::default()).to_string()), - RestURNs::$T, - ) + $S.get_vec(format!( + "/v0/{}", + RestURNs::$T(Default::default()).to_string() + )) }; } macro_rules! get_filter { ($S:ident, $F:ident, $T:ident) => { - $S.get( - format!( - "/v0/{}", - get_filtered_urn($F, &RestURNs::$T(Default::default()))? - ), - RestURNs::$T, - ) + $S.get_vec(format!( + "/v0/{}", + get_filtered_urn($F, &RestURNs::$T(Default::default()))? + )) }; } @@ -188,6 +344,27 @@ fn get_filtered_urn(filter: Filter, r: &RestURNs) -> anyhow::Result { format!("nodes/{}/pools/{}/replicas/{}", n, p, r) } Filter::PoolReplica(p, r) => format!("pools/{}/replicas/{}", p, r), + _ => return Err(anyhow::Error::msg("Invalid filter for replicas")), + }, + RestURNs::GetNexuses(_) => match filter { + Filter::None => "nexuses".to_string(), + Filter::Node(n) => format!("nodes/{}/nexuses", n), + Filter::NodeNexus(n, x) => format!("nodes/{}/nexuses/{}", n, x), + Filter::Nexus(x) => format!("nexuses/{}", x), + _ => return Err(anyhow::Error::msg("Invalid filter for nexuses")), + }, + RestURNs::GetChildren(_) => match filter { + Filter::NodeNexus(n, x) => { + format!("nodes/{}/nexuses/{}/children", n, x) + } + Filter::Nexus(x) => format!("nexuses/{}/children", x), + _ => return Err(anyhow::Error::msg("Invalid filter for nexuses")), + }, + RestURNs::GetVolumes(_) => match filter { + Filter::None => "volumes".to_string(), + Filter::Node(n) => format!("nodes/{}/volumes", n), + Filter::Volume(x) => format!("volumes/{}", x), + _ => return Err(anyhow::Error::msg("Invalid filter for volumes")), }, }; @@ -198,12 +375,24 @@ fn get_filtered_urn(filter: Filter, r: &RestURNs) -> anyhow::Result { impl RestClient for ActixRestClient { async fn get_nodes(&self) -> anyhow::Result> { let nodes = get_all!(self, GetNodes).await?; - Ok(nodes.into_inner()) + Ok(nodes) } async fn get_pools(&self, filter: Filter) -> anyhow::Result> { let pools = get_filter!(self, filter, GetPools).await?; - Ok(pools.into_inner()) + Ok(pools) + } + + async fn create_pool(&self, args: CreatePool) -> anyhow::Result { + let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + let pool = self.put(urn, CreatePoolBody::from(args)).await?; + Ok(pool) + } + + async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()> { + let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + self.del(urn).await?; + Ok(()) } async fn get_replicas( @@ -211,13 +400,7 @@ impl RestClient for ActixRestClient { filter: Filter, ) -> anyhow::Result> { let replicas = get_filter!(self, filter, GetReplicas).await?; - Ok(replicas.into_inner()) - } - - async fn create_pool(&self, args: CreatePool) -> anyhow::Result { - let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); - let pool = self.put(urn, CreatePoolBody::from(args)).await?; - Ok(pool) + Ok(replicas) } async fn create_replica( @@ -232,23 +415,136 @@ impl RestClient for ActixRestClient { Ok(replica) } - async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()> { - let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + async fn destroy_replica( + &self, + args: DestroyReplica, + ) -> anyhow::Result<()> { + let urn = format!( + "/v0/nodes/{}/pools/{}/replicas/{}", + &args.node, &args.pool, &args.uuid + ); self.del(urn).await?; Ok(()) } - async fn destroy_replica( + /// Share replica with arguments + async fn share_replica( &self, - args: DestroyReplica, + args: ShareReplica, + ) -> anyhow::Result { + let urn = format!( + "/v0/nodes/{}/pools/{}/replicas/{}/share/{}", + &args.node, + &args.pool, + &args.uuid, + args.protocol.to_string() + ); + let share = self.put(urn, Body::Empty).await?; + Ok(share) + } + /// Unshare replica with arguments + async fn unshare_replica( + &self, + args: UnshareReplica, ) -> anyhow::Result<()> { let urn = format!( - "/v0/nodes/{}/pools/{}/replicas/{}", + "/v0/nodes/{}/pools/{}/replicas/{}/share", &args.node, &args.pool, &args.uuid ); self.del(urn).await?; Ok(()) } + + async fn get_nexuses(&self, filter: Filter) -> anyhow::Result> { + let nexuses = get_filter!(self, filter, GetNexuses).await?; + Ok(nexuses) + } + + async fn get_nexus_children( + &self, + filter: Filter, + ) -> anyhow::Result> { + let children = get_filter!(self, filter, GetChildren).await?; + Ok(children) + } + + async fn create_nexus(&self, args: CreateNexus) -> anyhow::Result { + let urn = format!("/v0/nodes/{}/nexuses/{}", &args.node, &args.uuid); + let replica = self.put(urn, CreateNexusBody::from(args)).await?; + Ok(replica) + } + + async fn destroy_nexus(&self, args: DestroyNexus) -> anyhow::Result<()> { + let urn = format!("/v0/nodes/{}/nexuses/{}", &args.node, &args.uuid); + self.del(urn).await?; + Ok(()) + } + + /// Share nexus + async fn share_nexus(&self, args: ShareNexus) -> anyhow::Result { + let urn = format!( + "/v0/nodes/{}/nexuses/{}/share/{}", + &args.node, + &args.uuid, + args.protocol.to_string() + ); + let nexus = self.put(urn, Body::Empty).await?; + Ok(nexus) + } + + /// Unshare nexus + async fn unshare_nexus(&self, args: UnshareNexus) -> anyhow::Result<()> { + let urn = + format!("/v0/nodes/{}/nexuses/{}/share", &args.node, &args.uuid); + self.del(urn).await?; + Ok(()) + } + + async fn remove_nexus_child( + &self, + args: RemoveNexusChild, + ) -> anyhow::Result<()> { + let urn = match url::Url::parse(&args.uri) { + Ok(uri) => { + // remove initial '/' + uri.path()[1 ..].to_string() + } + _ => args.uri.clone(), + }; + self.del(urn).await?; + Ok(()) + } + async fn add_nexus_child( + &self, + args: AddNexusChild, + ) -> anyhow::Result { + let urn = format!( + "/v0/nodes/{}/nexuses/{}/children/{}", + &args.node, &args.nexus, &args.uri + ); + let replica = self.put(urn, Body::Empty).await?; + Ok(replica) + } + + async fn get_volumes(&self, filter: Filter) -> anyhow::Result> { + let volumes = get_filter!(self, filter, GetVolumes).await?; + Ok(volumes) + } + + async fn create_volume( + &self, + args: CreateVolume, + ) -> anyhow::Result { + let urn = format!("/v0/volumes/{}", &args.uuid); + let volume = self.put(urn, CreateVolumeBody::from(args)).await?; + Ok(volume) + } + + async fn destroy_volume(&self, args: DestroyVolume) -> anyhow::Result<()> { + let urn = format!("/v0/volumes/{}", &args.uuid); + self.del(urn).await?; + Ok(()) + } } impl Into for CreatePoolBody { @@ -261,6 +557,16 @@ impl Into for CreateReplicaBody { Body::from(serde_json::to_value(self).unwrap()) } } +impl Into for CreateNexusBody { + fn into(self) -> Body { + Body::from(serde_json::to_value(self).unwrap()) + } +} +impl Into for CreateVolumeBody { + fn into(self) -> Body { + Body::from(serde_json::to_value(self).unwrap()) + } +} impl ActixRestClient { /// Get RestClient v0 diff --git a/rest/tests/test.rs b/rest/tests/test.rs deleted file mode 100644 index 4204929b3..000000000 --- a/rest/tests/test.rs +++ /dev/null @@ -1,31 +0,0 @@ -pub use composer::*; -use mbus_api::{message_bus_init_options, TimeoutOptions}; -use std::time::Duration; -pub use tracing::info; - -fn init_tracing() { - if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { - tracing_subscriber::fmt().with_env_filter(filter).init(); - } else { - tracing_subscriber::fmt().with_env_filter("debug,h2=info,bollard=info,hyper=info,trust_dns_resolver=info,rustls=info,tower_buffer=info").init(); - } -} - -pub fn init() { - init_tracing(); -} - -pub async fn bus_init(nats: &str) -> Result<(), Box> { - tokio::time::timeout(Duration::from_secs(2), async { - message_bus_init_options( - nats.into(), - TimeoutOptions::new() - .with_timeout(Duration::from_millis(150)) - .with_max_retries(10) - .with_timeout_backoff(Duration::from_millis(100)), - ) - .await - }) - .await?; - Ok(()) -} diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index a6cb984d9..ed6c3e690 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -1,60 +1,45 @@ -mod test; +use composer::{Binary, Builder, ComposeTest, ContainerSpec}; use mbus_api::{ - v0::{GetNodes, GetPools, NodeState, PoolState}, + v0::{ChannelVs, Liveness, NodeState, PoolState}, Message, }; use rest_client::{versions::v0::*, ActixRestClient}; use rpc::mayastor::Null; -use test::{Binary, Builder, ComposeTest, ContainerSpec}; use tracing::info; -async fn wait_for_node() -> Result<(), Box> { - let _ = GetNodes {}.request().await?; - Ok(()) -} -async fn wait_for_pool() -> Result<(), Box> { - let _ = GetPools { - filter: Default::default(), - } - .request() - .await?; - Ok(()) +async fn wait_for_services() { + Liveness {}.request_on(ChannelVs::Node).await.unwrap(); + Liveness {}.request_on(ChannelVs::Pool).await.unwrap(); + Liveness {}.request_on(ChannelVs::Volume).await.unwrap(); } // to avoid waiting for timeouts -async fn orderly_start( - test: &ComposeTest, -) -> Result<(), Box> { - test.start_containers(vec!["nats", "node", "pool", "rest"]) - .await?; +async fn orderly_start(test: &ComposeTest) { + test.start_containers(vec!["nats", "node", "pool", "volume", "rest"]) + .await + .unwrap(); - test::bus_init("localhost").await?; - wait_for_node().await?; - wait_for_pool().await?; + test.connect_to_bus("nats").await; + wait_for_services().await; - test.start("mayastor").await?; + test.start("mayastor").await.unwrap(); - let mut hdl = test.grpc_handle("mayastor").await?; - hdl.mayastor.list_nexus(Null {}).await?; - Ok(()) + let mut hdl = test.grpc_handle("mayastor").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.unwrap(); } #[actix_rt::test] -async fn client() -> Result<(), Box> { - test::init(); - +async fn client() { let mayastor = "node-test-name"; let test = Builder::new() .name("rest") - .add_container_spec( - ContainerSpec::from_binary( - "nats", - Binary::from_nix("nats-server").with_arg("-DV"), - ) - .with_portmap("4222", "4222"), - ) + .add_container_spec(ContainerSpec::from_binary( + "nats", + Binary::from_nix("nats-server").with_arg("-DV"), + )) .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) + .add_container_bin("volume", Binary::from_dbg("volume").with_nats("-n")) .add_container_spec( ContainerSpec::from_binary( "rest", @@ -67,46 +52,38 @@ async fn client() -> Result<(), Box> { Binary::from_dbg("mayastor") .with_nats("-n") .with_args(vec!["-N", mayastor]) - .with_args(vec!["-g", "10.1.0.6:10124"]), + .with_args(vec!["-g", "10.1.0.7:10124"]), ) + .with_default_tracing() .autorun(false) .build() - .await?; - - let result = client_test(mayastor, &test).await; - - // run with --nocapture to see all the logs - test.logs_all().await?; - - result?; + .await + .unwrap(); - Ok(()) + client_test(mayastor, &test).await; } -async fn client_test( - mayastor: &str, - test: &ComposeTest, -) -> Result<(), Box> { - orderly_start(&test).await?; +async fn client_test(mayastor: &str, test: &ComposeTest) { + orderly_start(&test).await; - let client = ActixRestClient::new("https://localhost:8080")?.v0(); - let nodes = client.get_nodes().await?; + let client = ActixRestClient::new("https://localhost:8080").unwrap().v0(); + let nodes = client.get_nodes().await.unwrap(); assert_eq!(nodes.len(), 1); assert_eq!( nodes.first().unwrap(), &Node { id: mayastor.to_string(), - grpc_endpoint: "10.1.0.6:10124".to_string(), + grpc_endpoint: "10.1.0.7:10124".to_string(), state: NodeState::Online, } ); info!("Nodes: {:#?}", nodes); - let _ = client.get_pools(Filter::None).await?; + let _ = client.get_pools(Filter::None).await.unwrap(); let pool = client.create_pool(CreatePool { node: mayastor.to_string(), name: "pooloop".to_string(), - disks: vec!["malloc:///malloc0?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()] - }).await?; + disks: + vec!["malloc:///malloc0?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()] }).await.unwrap(); info!("Pools: {:#?}", pool); assert_eq!( pool, @@ -119,8 +96,11 @@ async fn client_test( used: 0, } ); - assert_eq!(Some(&pool), client.get_pools(Filter::None).await?.first()); - let _ = client.get_replicas(Filter::None).await?; + assert_eq!( + Some(&pool), + client.get_pools(Filter::None).await.unwrap().first() + ); + let _ = client.get_replicas(Filter::None).await.unwrap(); let replica = client .create_replica(CreateReplica { node: pool.node.clone(), @@ -131,7 +111,8 @@ async fn client_test( thin: true, share: Protocol::Nvmf, }) - .await?; + .await + .unwrap(); info!("Replica: {:#?}", replica); assert_eq!( replica, @@ -142,13 +123,13 @@ async fn client_test( thin: false, size: 12582912, share: Protocol::Nvmf, - uri: "nvmf://10.1.0.6:8420/nqn.2019-05.io.openebs:replica1" + uri: "nvmf://10.1.0.7:8420/nqn.2019-05.io.openebs:replica1" .to_string(), } ); assert_eq!( Some(&replica), - client.get_replicas(Filter::None).await?.first() + client.get_replicas(Filter::None).await.unwrap().first() ); client .destroy_replica(DestroyReplica { @@ -156,18 +137,98 @@ async fn client_test( pool: replica.pool.clone(), uuid: replica.uuid, }) - .await?; - assert_eq!(client.get_replicas(Filter::None).await?.is_empty(), true); + .await + .unwrap(); + assert!(client.get_replicas(Filter::None).await.unwrap().is_empty()); + + let nexuses = client.get_nexuses(Filter::None).await.unwrap(); + assert_eq!(nexuses.len(), 0); + let nexus = client + .create_nexus(CreateNexus { + node: "node-test-name".to_string(), + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + size: 12582912, + children: vec!["malloc:///malloc1?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()]}) + .await.unwrap(); + info!("Nexus: {:#?}", nexus); + + assert_eq!( + nexus, + Nexus { + node: "node-test-name".to_string(), + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + size: 12582912, + state: NexusState::Online, + children: vec![Child { + uri: "malloc:///malloc1?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string(), + state: ChildState::Online, + rebuild_progress: None + }], + device_uri: "".to_string(), + rebuilds: 0, + } + ); + + let _ = client.add_nexus_child(AddNexusChild { + node: nexus.node.clone(), + nexus: nexus.uuid.clone(), + uri: "malloc:///malloc2?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b1".to_string(), + auto_rebuild: true, + }).await.unwrap(); + + client + .destroy_nexus(DestroyNexus { + node: nexus.node.clone(), + uuid: nexus.uuid.clone(), + }) + .await + .unwrap(); + assert!(client.get_nexuses(Filter::None).await.unwrap().is_empty()); + + let volume = client + .create_volume(CreateVolume { + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + size: 12582912, + nexuses: 1, + replicas: 1, + allowed_nodes: vec![], + preferred_nodes: vec![], + preferred_nexus_nodes: vec![], + }) + .await + .unwrap(); + + tracing::info!("Volume: {:#?}", volume); + assert_eq!( + Some(&volume), + client + .get_volumes(Filter::Volume( + "058a95e5-cee6-4e81-b682-fe864ca99b9c".into() + )) + .await + .unwrap() + .first() + ); + + client + .destroy_volume(DestroyVolume { + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + }) + .await + .unwrap(); + + assert!(client.get_volumes(Filter::None).await.unwrap().is_empty()); + client .destroy_pool(DestroyPool { node: pool.node.clone(), name: pool.name, }) - .await?; - assert_eq!(client.get_pools(Filter::None).await?.is_empty(), true); + .await + .unwrap(); + assert!(client.get_pools(Filter::None).await.unwrap().is_empty()); - test.stop("mayastor").await?; + test.stop("mayastor").await.unwrap(); tokio::time::delay_for(std::time::Duration::from_millis(250)).await; - assert!(client.get_nodes().await?.is_empty()); - Ok(()) + assert!(client.get_nodes().await.unwrap().is_empty()); } diff --git a/services/Cargo.toml b/services/Cargo.toml index 71edb0f70..9d919a50d 100644 --- a/services/Cargo.toml +++ b/services/Cargo.toml @@ -16,6 +16,10 @@ path = "node/src/server.rs" name = "pool" path = "pool/src/server.rs" +[[bin]] +name = "volume" +path = "volume/src/server.rs" + [lib] name = "common" path = "common/src/lib.rs" @@ -39,7 +43,8 @@ tracing = "0.1" tracing-subscriber = "0.2" tracing-futures = "0.2.4" rpc = { path = "../rpc" } -http = "0.2.2" +url = "2.2.0" +http = "0.2.1" [dev-dependencies] composer = { path = "../composer" } diff --git a/services/common/src/wrapper/v0/mod.rs b/services/common/src/wrapper/v0/mod.rs index 8bde247f1..c929ad668 100644 --- a/services/common/src/wrapper/v0/mod.rs +++ b/services/common/src/wrapper/v0/mod.rs @@ -6,6 +6,7 @@ mod registry; pub use pool::NodeWrapperPool; pub use registry::Registry; +pub use volume::NodeWrapperVolume; use async_trait::async_trait; use dyn_clonable::clonable; @@ -16,6 +17,7 @@ use mbus_api::{ use rpc::mayastor::{mayastor_client::MayastorClient, Null}; use snafu::{ResultExt, Snafu}; use std::{ + cmp::Ordering, collections::HashMap, fmt::Debug, marker::PhantomData, @@ -31,6 +33,14 @@ use tonic::transport::Channel; pub enum SvcError { #[snafu(display("Failed to get nodes from the node service"))] BusGetNodes { source: BusError }, + #[snafu(display("Failed to get pools from the pool service"))] + BusGetPools { source: mbus_api::Error }, + #[snafu(display("Failed to create pool from the pool service"))] + BusCreatePool { source: mbus_api::Error }, + #[snafu(display("Failed to destroy pool from the pool service"))] + BusDestroyPool { source: mbus_api::Error }, + #[snafu(display("Failed to destroy pool from the pool service"))] + BusGetReplicas { source: mbus_api::Error }, #[snafu(display("Failed to get node '{}' from the node service", node))] BusGetNode { source: BusError, node: String }, #[snafu(display("Node '{}' is not online", node))] @@ -70,12 +80,55 @@ pub enum SvcError { #[snafu(display("Failed to unshare nexus via gRPC"))] GrpcUnshareNexus { source: tonic::Status }, #[snafu(display("Failed to volume due to insufficient resources"))] - NotEnoughResources {}, + NotEnoughResources { source: NotEnough }, #[snafu(display("Invalid arguments"))] InvalidArguments {}, #[snafu(display("Not implemented"))] NotImplemented {}, } +impl From for SvcError { + fn from(source: NotEnough) -> Self { + Self::NotEnoughResources { + source, + } + } +} + +/// Not enough resources available +#[derive(Debug, Snafu)] +#[allow(missing_docs)] +pub enum NotEnough { + #[snafu(display( + "Not enough suitable pools available, {}/{}", + have, + need + ))] + OfPools { have: u64, need: u64 }, + #[snafu(display("Not enough replicas available, {}/{}", have, need))] + OfReplicas { have: u64, need: u64 }, + #[snafu(display("Not enough nexuses available, {}/{}", have, need))] + OfNexuses { have: u64, need: u64 }, +} + +/// Implement default fake NodeNexusChildTrait for a type +#[macro_export] +macro_rules! impl_no_nexus_child { + ($F:ident) => { + #[async_trait] + impl NodeNexusChildTrait for $F {} + }; +} + +/// Implement default fake NodeNexusTrait for a type +#[macro_export] +macro_rules! impl_no_nexus { + ($F:ident) => { + #[async_trait] + impl NodeNexusTrait for $F {} + }; +} + mod node_traits; mod pool; +mod volume; diff --git a/services/common/src/wrapper/v0/node_traits.rs b/services/common/src/wrapper/v0/node_traits.rs index d50d4c2cf..6925fba14 100644 --- a/services/common/src/wrapper/v0/node_traits.rs +++ b/services/common/src/wrapper/v0/node_traits.rs @@ -89,12 +89,109 @@ pub trait NodePoolTrait: Send + Sync + Debug + Clone { fn on_destroy_pool(&mut self, pool: &str); } +/// Trait for a Node Nexus which can be implemented to interact with mayastor +/// node nexuses either via gRPC or MBUS or with a service via MBUS +#[async_trait] +#[clonable] +#[allow(unused_variables)] +pub trait NodeNexusTrait: Send + Sync + Debug + Clone { + /// Get the internal nexuses + fn nexuses(&self) -> Vec { + vec![] + } + + /// Fetch all nexuses via gRPC or MBUS + async fn fetch_nexuses(&self) -> Result, SvcError> { + Err(SvcError::NotImplemented {}) + } + + /// Create a nexus on a node via gRPC or MBUS + async fn create_nexus( + &self, + request: &CreateNexus, + ) -> Result { + Err(SvcError::NotImplemented {}) + } + + /// Destroy a nexus on a node via gRPC or MBUS + async fn destroy_nexus( + &self, + request: &DestroyNexus, + ) -> Result<(), SvcError> { + Err(SvcError::NotImplemented {}) + } + + /// Share a nexus on the node via gRPC + async fn share_nexus( + &self, + request: &ShareNexus, + ) -> Result { + Err(SvcError::NotImplemented {}) + } + + /// Unshare a nexus on the node via gRPC + async fn unshare_nexus( + &self, + request: &UnshareNexus, + ) -> Result<(), SvcError> { + Err(SvcError::NotImplemented {}) + } + + /// Update internal nexus list following a create + fn on_create_nexus(&mut self, nexus: &Nexus) {} + /// Update internal nexus following a share/unshare + fn on_update_nexus(&mut self, nexus: &str, uri: &str) {} + /// Update internal nexus list following a destroy + fn on_destroy_nexus(&mut self, nexus: &str) {} +} + +/// Trait for a Node Nexus Children which can be implemented to interact with +/// mayastor node nexus children either via gRPC or MBUS or with a service via +/// MBUS +#[async_trait] +#[clonable] +#[allow(unused_variables)] +pub trait NodeNexusChildTrait: Send + Sync + Debug + Clone { + /// Fetch all children via gRPC or MBUS + async fn fetch_children(&self) -> Result, SvcError> { + Err(SvcError::NotImplemented {}) + } + + /// Add a child to a nexus via gRPC or MBUS + async fn add_child( + &self, + request: &AddNexusChild, + ) -> Result { + Err(SvcError::NotImplemented {}) + } + + /// Remove a child from a nexus via gRPC or MBUS + async fn remove_child( + &self, + request: &RemoveNexusChild, + ) -> Result<(), SvcError> { + Err(SvcError::NotImplemented {}) + } + + /// Update internal nexus children following a create + fn on_add_child(&mut self, nexus: &str, child: &Child) {} + /// Update internal nexus children following a remove + fn on_remove_child(&mut self, request: &RemoveNexusChild) {} +} + /// Trait for a Node which can be implemented to interact with mayastor /// node replicas either via gRPC or MBUS or with a service via MBUS #[async_trait] #[clonable] pub trait NodeWrapperTrait: - Send + Sync + Debug + Clone + NodeReplicaTrait + NodePoolTrait + Send + + Sync + + Debug + + Clone + + NodeReplicaTrait + + NodePoolTrait + + NodeNexusTrait + + NodeNexusChildTrait { /// New NodeWrapper for the node #[allow(clippy::new_ret_no_self)] @@ -223,3 +320,46 @@ impl PoolWrapper { } } } + +// 1. state ( online > degraded ) +// 2. smaller n replicas +// (here we should have pool IO stats over time so we can pick less active +// pools rather than the number of replicas which is useless if the volumes +// are not active) +impl PartialOrd for PoolWrapper { + fn partial_cmp(&self, other: &Self) -> Option { + match self.pool.state.partial_cmp(&other.pool.state) { + Some(Ordering::Greater) => Some(Ordering::Greater), + Some(Ordering::Less) => Some(Ordering::Less), + Some(Ordering::Equal) => { + match self.replicas.len().cmp(&other.replicas.len()) { + Ordering::Greater => Some(Ordering::Greater), + Ordering::Less => Some(Ordering::Less), + Ordering::Equal => { + Some(self.free_space().cmp(&other.free_space())) + } + } + } + None => None, + } + } +} + +impl Ord for PoolWrapper { + fn cmp(&self, other: &Self) -> Ordering { + match self.pool.state.partial_cmp(&other.pool.state) { + Some(Ordering::Greater) => Ordering::Greater, + Some(Ordering::Less) => Ordering::Less, + Some(Ordering::Equal) => { + match self.replicas.len().cmp(&other.replicas.len()) { + Ordering::Greater => Ordering::Greater, + Ordering::Less => Ordering::Less, + Ordering::Equal => { + self.free_space().cmp(&other.free_space()) + } + } + } + None => Ordering::Equal, + } + } +} diff --git a/services/common/src/wrapper/v0/pool.rs b/services/common/src/wrapper/v0/pool.rs index 79a269df0..a330ba461 100644 --- a/services/common/src/wrapper/v0/pool.rs +++ b/services/common/src/wrapper/v0/pool.rs @@ -273,6 +273,9 @@ impl NodeWrapperPool { } } +impl_no_nexus_child!(NodeWrapperPool); +impl_no_nexus!(NodeWrapperPool); + /// Helper methods to convert between the message bus types and the /// mayastor gRPC types diff --git a/services/common/src/wrapper/v0/registry.rs b/services/common/src/wrapper/v0/registry.rs index 847658c01..ecdde6471 100644 --- a/services/common/src/wrapper/v0/registry.rs +++ b/services/common/src/wrapper/v0/registry.rs @@ -295,6 +295,131 @@ impl Registry { Ok(()) } + async fn on_create_nexus(&self, nexus: &Nexus) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&nexus.node); + if let Some(node) = node { + node.on_create_nexus(nexus); + } + } + async fn on_destroy_nexus(&self, request: &DestroyNexus) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&request.node); + if let Some(node) = node { + node.on_destroy_nexus(&request.uuid); + } + } + async fn on_add_nexus_child(&self, node: &str, nexus: &str, child: &Child) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(node); + if let Some(node) = node { + node.on_add_child(nexus, child); + } + } + async fn on_remove_nexus_child(&self, request: &RemoveNexusChild) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(&request.node); + if let Some(node) = node { + node.on_remove_child(request); + } + } + async fn on_update_nexus(&self, node: &str, nexus: &str, uri: &str) { + let mut nodes = self.nodes.lock().await; + let node = nodes.get_mut(node); + if let Some(node) = node { + node.on_update_nexus(nexus, uri); + } + } + + /// List all cached nexuses + pub async fn list_nexuses(&self) -> Vec { + let nodes = self.nodes.lock().await; + nodes + .values() + .map(|node| node.nexuses()) + .flatten() + .collect() + } + + /// List all cached nexuses from node + pub async fn list_node_nexuses(&self, node: &str) -> Vec { + let nodes = self.list_nodes_wrapper().await; + if let Some(node) = nodes.iter().find(|&n| n.id() == node) { + node.nexuses() + } else { + // hmm, or return error, node not found? + vec![] + } + } + + /// Create nexus + pub async fn create_nexus( + &self, + request: &CreateNexus, + ) -> Result { + let node = self.get_node(&request.node).await?; + let nexus = node.create_nexus(request).await?; + self.on_create_nexus(&nexus).await; + Ok(nexus) + } + + /// Destroy nexus + pub async fn destroy_nexus( + &self, + request: &DestroyNexus, + ) -> Result<(), SvcError> { + let node = self.get_node(&request.node).await?; + node.destroy_nexus(request).await?; + self.on_destroy_nexus(request).await; + Ok(()) + } + + /// Create nexus + pub async fn share_nexus( + &self, + request: &ShareNexus, + ) -> Result { + let node = self.get_node(&request.node).await?; + let share = node.share_nexus(request).await?; + self.on_update_nexus(&request.node, &request.uuid, &share) + .await; + Ok(share) + } + + /// Destroy nexus + pub async fn unshare_nexus( + &self, + request: &UnshareNexus, + ) -> Result<(), SvcError> { + let node = self.get_node(&request.node).await?; + node.unshare_nexus(request).await?; + self.on_update_nexus(&request.node, &request.uuid, "").await; + Ok(()) + } + + /// Add nexus child + pub async fn add_nexus_child( + &self, + request: &AddNexusChild, + ) -> Result { + let node = self.get_node(&request.node).await?; + let child = node.add_child(request).await?; + self.on_add_nexus_child(&request.node, &request.nexus, &child) + .await; + Ok(child) + } + + /// Remove nexus child + pub async fn remove_nexus_child( + &self, + request: &RemoveNexusChild, + ) -> Result<(), SvcError> { + let node = self.get_node(&request.node).await?; + node.remove_child(request).await?; + self.on_remove_nexus_child(request).await; + Ok(()) + } + /// Found this node via the node service /// Update its resource list or add it to the registry if not there yet async fn found_node(&self, node: &Node) { diff --git a/services/common/src/wrapper/v0/volume.rs b/services/common/src/wrapper/v0/volume.rs new file mode 100644 index 000000000..eecfa2776 --- /dev/null +++ b/services/common/src/wrapper/v0/volume.rs @@ -0,0 +1,444 @@ +use super::{node_traits::*, *}; +use mbus_api::Message; + +/// Implementation of the trait NodeWrapperVolume for the pool service +#[derive(Debug, Default, Clone)] +pub struct NodeWrapperVolume { + node: Node, + pools: HashMap, + nexuses: HashMap, +} + +#[async_trait] +impl NodePoolTrait for NodeWrapperVolume { + /// Fetch all pools from this node via MBUS + async fn fetch_pools(&self) -> Result, SvcError> { + MessageBus::get_pools(Filter::Node(self.id())) + .await + .context(BusGetNodes {}) + } + + /// Create a pool on the node via gRPC + async fn create_pool( + &self, + request: &CreatePool, + ) -> Result { + request.request().await.context(BusCreatePool {}) + } + + /// Destroy a pool on the node via gRPC + async fn destroy_pool( + &self, + request: &DestroyPool, + ) -> Result<(), SvcError> { + request.request().await.context(BusCreatePool {}) + } + + async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]) { + self.pools + .insert(pool.name.clone(), PoolWrapper::new_from(&pool, replicas)); + } + + fn on_destroy_pool(&mut self, pool: &str) { + self.pools.remove(pool); + } +} + +#[async_trait] +impl NodeReplicaTrait for NodeWrapperVolume { + /// Fetch all replicas from this node via gRPC + async fn fetch_replicas(&self) -> Result, SvcError> { + GetReplicas { + filter: Filter::Node(self.id()), + } + .request() + .await + .context(BusGetReplicas {}) + .map(|r| r.0) + } + + /// Create a replica on the pool via gRPC + async fn create_replica( + &self, + request: &CreateReplica, + ) -> Result { + request.request().await.context(BusGetReplicas {}) + } + + /// Share a replica on the pool via gRPC + async fn share_replica( + &self, + request: &ShareReplica, + ) -> Result { + request.request().await.context(BusGetReplicas {}) + } + + /// Unshare a replica on the pool via gRPC + async fn unshare_replica( + &self, + request: &UnshareReplica, + ) -> Result<(), SvcError> { + request.request().await.context(BusGetReplicas {}) + } + + /// Destroy a replica on the pool via gRPC + async fn destroy_replica( + &self, + request: &DestroyReplica, + ) -> Result<(), SvcError> { + request.request().await.context(BusGetReplicas {}) + } + + fn on_create_replica(&mut self, replica: &Replica) { + if let Some(pool) = self.pools.get_mut(&replica.pool) { + pool.added_replica(replica); + } + } + + fn on_destroy_replica(&mut self, pool: &str, replica: &str) { + if let Some(pool) = self.pools.get_mut(pool) { + pool.removed_replica(replica) + } + } + + fn on_update_replica( + &mut self, + pool: &str, + replica: &str, + share: &Protocol, + uri: &str, + ) { + if let Some(pool) = self.pools.get_mut(pool) { + pool.updated_replica(replica, share, uri); + } + } +} + +#[async_trait] +impl NodeNexusTrait for NodeWrapperVolume { + fn nexuses(&self) -> Vec { + self.nexuses.values().cloned().collect() + } + + /// Fetch all nexuses from the node via gRPC + async fn fetch_nexuses(&self) -> Result, SvcError> { + let mut ctx = self.grpc_client().await?; + let rpc_nexuses = ctx + .client + .list_nexus(Null {}) + .await + .context(GrpcListNexuses {})?; + let rpc_nexuses = &rpc_nexuses.get_ref().nexus_list; + let nexuses = rpc_nexuses + .iter() + .map(|n| rpc_nexus_to_bus(n, self.node.id.clone())) + .collect(); + Ok(nexuses) + } + + /// Create a nexus on the node via gRPC + async fn create_nexus( + &self, + request: &CreateNexus, + ) -> Result { + let mut ctx = self.grpc_client().await?; + let rpc_nexus = ctx + .client + .create_nexus(bus_nexus_to_rpc(request)) + .await + .context(GrpcCreateNexus {})?; + Ok(rpc_nexus_to_bus( + &rpc_nexus.into_inner(), + self.node.id.clone(), + )) + } + + /// Destroy a nexus on the node via gRPC + async fn destroy_nexus( + &self, + request: &DestroyNexus, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client().await?; + let _ = ctx + .client + .destroy_nexus(bus_nexus_destroy_to_rpc(request)) + .await + .context(GrpcDestroyNexus {})?; + Ok(()) + } + + /// Share a nexus on the node via gRPC + async fn share_nexus( + &self, + request: &ShareNexus, + ) -> Result { + let mut ctx = self.grpc_client().await?; + let share = ctx + .client + .publish_nexus(bus_nexus_share_to_rpc(request)) + .await + .context(GrpcShareNexus {})?; + Ok(share.into_inner().device_uri) + } + + /// Unshare a nexus on the node via gRPC + async fn unshare_nexus( + &self, + request: &UnshareNexus, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client().await?; + let _ = ctx + .client + .unpublish_nexus(bus_nexus_unshare_to_rpc(request)) + .await + .context(GrpcUnshareNexus {})?; + Ok(()) + } + + fn on_create_nexus(&mut self, nexus: &Nexus) { + self.nexuses.insert(nexus.uuid.clone(), nexus.clone()); + } + + fn on_update_nexus(&mut self, nexus: &str, uri: &str) { + if let Some(nexus) = self.nexuses.get_mut(nexus) { + nexus.device_uri = uri.to_string(); + } + } + + fn on_destroy_nexus(&mut self, nexus: &str) { + self.nexuses.remove(nexus); + } +} + +#[async_trait] +impl NodeNexusChildTrait for NodeWrapperVolume { + async fn fetch_children(&self) -> Result, SvcError> { + unimplemented!() + } + + /// Add a child to a nexus via gRPC + async fn add_child( + &self, + request: &AddNexusChild, + ) -> Result { + let mut ctx = self.grpc_client().await?; + let rpc_child = ctx + .client + .add_child_nexus(bus_nexus_child_add_to_rpc(request)) + .await + .context(GrpcDestroyNexus {})?; + Ok(rpc_child_to_bus(&rpc_child.into_inner())) + } + + /// Remove a child from its parent nexus via gRPC + async fn remove_child( + &self, + request: &RemoveNexusChild, + ) -> Result<(), SvcError> { + let mut ctx = self.grpc_client().await?; + let _ = ctx + .client + .remove_child_nexus(bus_nexus_child_remove_to_rpc(request)) + .await + .context(GrpcDestroyNexus {})?; + Ok(()) + } + + fn on_add_child(&mut self, nexus: &str, child: &Child) { + if let Some(nexus) = self.nexuses.get_mut(nexus) { + nexus.children.push(child.clone()); + } + } + + fn on_remove_child(&mut self, request: &RemoveNexusChild) { + if let Some(nexus) = self.nexuses.get_mut(&request.nexus) { + nexus.children.retain(|replica| replica.uri != request.uri) + } + } +} + +#[async_trait] +impl NodeWrapperTrait for NodeWrapperVolume { + async fn new(node: &str) -> Result { + Ok(Box::new(Self::new_wrapper(node).await?)) + } + + fn id(&self) -> String { + self.node.id.clone() + } + fn node(&self) -> Node { + self.node.clone() + } + fn pools(&self) -> Vec { + self.pools.values().map(|p| p.pool()).collect() + } + fn pools_wrapper(&self) -> Vec { + self.pools.values().cloned().collect() + } + fn replicas(&self) -> Vec { + self.pools + .values() + .map(|p| p.replicas()) + .flatten() + .collect() + } + fn is_online(&self) -> bool { + self.node.state == NodeState::Online + } + + async fn update(&mut self) { + match Self::new_wrapper(&self.node.id).await { + Ok(node) => { + let old_state = self.node.state.clone(); + *self = node; + if old_state != self.node.state { + tracing::error!( + "Node '{}' changed state from '{}' to '{}'", + self.node.id, + old_state.to_string(), + self.node.state.to_string() + ) + } + } + Err(error) => { + tracing::error!( + "Failed to update the node '{}', error: {}", + self.node.id, + error + ); + self.set_state(NodeState::Unknown); + } + } + } + fn set_state(&mut self, state: NodeState) { + if self.node.state != state { + tracing::info!( + "Node '{}' state is now {}", + self.node.id, + state.to_string() + ); + self.node.state = state; + for (_, pool) in self.pools.iter_mut() { + pool.set_unknown(); + } + } + } +} + +impl NodeWrapperVolume { + /// Fetch node via the message bus + async fn fetch_node(node: &str) -> Result { + MessageBus::get_node(node).await.context(BusGetNode { + node, + }) + } + + /// New node wrapper for the pool service containing + /// a list of pools and replicas + async fn new_wrapper(node: &str) -> Result { + let mut node = Self { + // if we can't even fetch the node, then no point in proceeding + node: NodeWrapperVolume::fetch_node(node).await?, + ..Default::default() + }; + + // if the node is not online, don't even bother trying to connect + if node.is_online() { + let pools = node.fetch_pools().await?; + let replicas = node.fetch_replicas().await?; + let nexuses = node.fetch_nexuses().await?; + + for pool in &pools { + let replicas = replicas + .iter() + .filter(|r| r.pool == pool.name) + .cloned() + .collect::>(); + node.on_create_pool(pool, &replicas).await; + } + + for nexus in &nexuses { + node.on_create_nexus(nexus); + } + } + // we've got a node, but we might not have the full picture if it's + // offline + Ok(node) + } +} + +fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: String) -> Nexus { + Nexus { + node: id, + uuid: rpc_nexus.uuid.clone(), + size: rpc_nexus.size, + state: NexusState::from(rpc_nexus.state), + children: rpc_nexus + .children + .iter() + .map(|c| rpc_child_to_bus(&c)) + .collect(), + device_uri: rpc_nexus.device_uri.clone(), + rebuilds: rpc_nexus.rebuilds, + } +} +fn rpc_child_to_bus(rpc_child: &rpc::mayastor::Child) -> Child { + Child { + uri: rpc_child.uri.clone(), + state: ChildState::from(rpc_child.state), + rebuild_progress: if rpc_child.rebuild_progress >= 0 { + Some(rpc_child.rebuild_progress) + } else { + None + }, + } +} +fn bus_nexus_to_rpc( + request: &CreateNexus, +) -> rpc::mayastor::CreateNexusRequest { + rpc::mayastor::CreateNexusRequest { + uuid: request.uuid.clone(), + size: request.size, + children: request.children.clone(), + } +} +fn bus_nexus_share_to_rpc( + request: &ShareNexus, +) -> rpc::mayastor::PublishNexusRequest { + rpc::mayastor::PublishNexusRequest { + uuid: request.uuid.clone(), + key: request.key.clone().unwrap_or_default(), + share: request.protocol.clone() as i32, + } +} +fn bus_nexus_unshare_to_rpc( + request: &UnshareNexus, +) -> rpc::mayastor::UnpublishNexusRequest { + rpc::mayastor::UnpublishNexusRequest { + uuid: request.uuid.clone(), + } +} +fn bus_nexus_destroy_to_rpc( + request: &DestroyNexus, +) -> rpc::mayastor::DestroyNexusRequest { + rpc::mayastor::DestroyNexusRequest { + uuid: request.uuid.clone(), + } +} +fn bus_nexus_child_add_to_rpc( + request: &AddNexusChild, +) -> rpc::mayastor::AddChildNexusRequest { + rpc::mayastor::AddChildNexusRequest { + uuid: request.nexus.clone(), + uri: request.uri.clone(), + norebuild: !request.auto_rebuild, + } +} +fn bus_nexus_child_remove_to_rpc( + request: &RemoveNexusChild, +) -> rpc::mayastor::RemoveChildNexusRequest { + rpc::mayastor::RemoveChildNexusRequest { + uuid: request.nexus.clone(), + uri: request.uri.clone(), + } +} diff --git a/services/volume/src/server.rs b/services/volume/src/server.rs new file mode 100644 index 000000000..d3f8448b0 --- /dev/null +++ b/services/volume/src/server.rs @@ -0,0 +1,298 @@ +pub mod service; + +use async_trait::async_trait; +use common::*; +use mbus_api::{v0::*, *}; +use service::*; +use std::{convert::TryInto, marker::PhantomData}; +use structopt::StructOpt; +use tracing::info; + +#[derive(Debug, StructOpt)] +struct CliArgs { + /// The Nats Server URL to connect to + /// (supports the nats schema) + /// Default: nats://127.0.0.1:4222 + #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] + nats: String, + + /// The period at which the registry updates its cache of all + /// resources from all nodes + #[structopt(long, short, default_value = "20s")] + period: humantime::Duration, +} + +/// Needed so we can implement the ServiceSubscriber trait for +/// the message types external to the crate +#[derive(Clone, Default)] +struct ServiceHandler { + data: PhantomData, +} + +macro_rules! impl_service_handler { + // RequestType is the message bus request type + // ServiceFnName is the name of the service function to route the request + // into + ($RequestType:ident, $ServiceFnName:ident) => { + #[async_trait] + impl ServiceSubscriber for ServiceHandler<$RequestType> { + async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { + let request: ReceivedMessage<$RequestType> = + args.request.try_into()?; + + let service: &VolumeSvc = args.context.get_state(); + let reply = service + .$ServiceFnName(&request.inner()) + .await + .map_err(|error| Error::ServiceError { + message: error.full_string(), + })?; + request.reply(reply).await + } + fn filter(&self) -> Vec { + vec![$RequestType::default().id()] + } + } + }; +} + +// todo: +// a service handler can actually specify a vector of message filters so could +// indeed do the filtering at our service specific code and have a single +// entrypoint here nexus +impl_service_handler!(GetNexuses, get_nexuses); +impl_service_handler!(CreateNexus, create_nexus); +impl_service_handler!(DestroyNexus, destroy_nexus); +impl_service_handler!(ShareNexus, share_nexus); +impl_service_handler!(UnshareNexus, unshare_nexus); +impl_service_handler!(AddNexusChild, add_nexus_child); +impl_service_handler!(RemoveNexusChild, remove_nexus_child); +// volumes +impl_service_handler!(GetVolumes, get_volumes); +impl_service_handler!(CreateVolume, create_volume); +impl_service_handler!(DestroyVolume, destroy_volume); + +fn init_tracing() { + if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { + tracing_subscriber::fmt().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_env_filter("info").init(); + } +} + +#[tokio::main] +async fn main() { + init_tracing(); + + let cli_args = CliArgs::from_args(); + info!("Using options: {:?}", &cli_args); + + server(cli_args).await; +} + +async fn server(cli_args: CliArgs) { + Service::builder(cli_args.nats, ChannelVs::Volume) + .connect() + .await + .with_shared_state(VolumeSvc::new(cli_args.period.into())) + .with_default_liveness() + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_channel(ChannelVs::Nexus) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .with_subscription(ServiceHandler::::default()) + .run() + .await; +} + +#[cfg(test)] +mod tests { + use super::*; + use composer::*; + use rpc::mayastor::Null; + + async fn wait_for_services() { + let _ = GetNodes {}.request().await.unwrap(); + Liveness {}.request_on(ChannelVs::Pool).await.unwrap(); + Liveness {}.request_on(ChannelVs::Volume).await.unwrap(); + } + // to avoid waiting for timeouts + async fn orderly_start(test: &ComposeTest) { + test.start_containers(vec!["nats", "node", "pool", "volume"]) + .await + .unwrap(); + + test.connect_to_bus("nats").await; + wait_for_services().await; + + test.start("mayastor").await.unwrap(); + test.start("mayastor2").await.unwrap(); + + let mut hdl = test.grpc_handle("mayastor").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.unwrap(); + let mut hdl = test.grpc_handle("mayastor2").await.unwrap(); + hdl.mayastor.list_nexus(Null {}).await.unwrap(); + } + + #[tokio::test] + async fn volume() { + let mayastor = "volume-test-name"; + let mayastor2 = "volume-test-name-replica"; + let test = Builder::new() + .name("volume") + .add_container_bin("nats", Binary::from_nix("nats-server")) + .add_container_bin("node", Binary::from_dbg("node").with_nats("-n")) + .add_container_bin("pool", Binary::from_dbg("pool").with_nats("-n")) + .add_container_bin( + "volume", + Binary::from_dbg("volume").with_nats("-n"), + ) + .add_container_bin( + "mayastor", + Binary::from_dbg("mayastor") + .with_nats("-n") + .with_args(vec!["-N", mayastor]) + .with_args(vec!["-g", "10.1.0.6:10124"]), + ) + .add_container_bin( + "mayastor2", + Binary::from_dbg("mayastor") + .with_nats("-n") + .with_args(vec!["-N", mayastor2]) + .with_args(vec!["-g", "10.1.0.7:10124"]), + ) + .with_default_tracing() + .autorun(false) + .build() + .await + .unwrap(); + + orderly_start(&test).await; + let nodes = GetNodes {}.request().await.unwrap(); + tracing::info!("Nodes: {:?}", nodes); + + prepare_pools(mayastor, mayastor2).await; + test_nexus(mayastor, mayastor2).await; + test_volume().await; + + assert!(GetNexuses::default().request().await.unwrap().0.is_empty()); + } + + async fn prepare_pools(mayastor: &str, mayastor2: &str) { + CreatePool { + node: mayastor.to_string(), + name: "pooloop".to_string(), + disks: vec!["malloc:///disk0?size_mb=100".into()], + } + .request() + .await + .unwrap(); + + CreatePool { + node: mayastor2.to_string(), + name: "pooloop".to_string(), + disks: vec!["malloc:///disk0?size_mb=100".into()], + } + .request() + .await + .unwrap(); + + let pools = GetPools::default().request().await.unwrap(); + tracing::info!("Pools: {:?}", pools); + } + + async fn test_nexus(mayastor: &str, mayastor2: &str) { + let replica = CreateReplica { + node: mayastor2.into(), + uuid: "replica".into(), + pool: "pooloop".into(), + size: 12582912, /* actual size will be a multiple of 4MB so just + * create it like so */ + thin: true, + share: Protocol::Nvmf, + } + .request() + .await + .unwrap(); + + let local = "malloc:///local?size_mb=12".into(); + + let nexus = CreateNexus { + node: mayastor.into(), + uuid: "f086f12c-1728-449e-be32-9415051090d6".into(), + size: 5242880, + children: vec![replica.uri, local], + } + .request() + .await + .unwrap(); + + let nexuses = GetNexuses::default().request().await.unwrap().0; + tracing::info!("Nexuses: {:?}", nexuses); + assert_eq!(Some(&nexus), nexuses.first()); + + ShareNexus { + node: mayastor.into(), + uuid: "f086f12c-1728-449e-be32-9415051090d6".into(), + key: None, + protocol: Protocol::Nvmf, + } + .request() + .await + .unwrap(); + + DestroyNexus { + node: mayastor.into(), + uuid: "f086f12c-1728-449e-be32-9415051090d6".to_string(), + } + .request() + .await + .unwrap(); + + DestroyReplica { + node: replica.node.to_string(), + pool: replica.pool.to_string(), + uuid: replica.uuid.to_string(), + } + .request() + .await + .unwrap(); + + assert!(GetNexuses::default().request().await.unwrap().0.is_empty()); + } + + async fn test_volume() { + let volume = CreateVolume { + uuid: "359b7e1a-b724-443b-98b4-e6d97fabbb40".to_string(), + size: 5242880, + nexuses: 1, + replicas: 2, + allowed_nodes: vec![], + preferred_nodes: vec![], + preferred_nexus_nodes: vec![], + }; + + let volume = volume.request().await.unwrap(); + let volumes = GetVolumes::default().request().await.unwrap().0; + tracing::info!("Volumes: {:?}", volumes); + + assert_eq!(Some(&volume), volumes.first()); + + DestroyVolume { + uuid: "359b7e1a-b724-443b-98b4-e6d97fabbb40".to_string(), + } + .request() + .await + .unwrap(); + + assert!(GetVolumes::default().request().await.unwrap().0.is_empty()); + assert!(GetNexuses::default().request().await.unwrap().0.is_empty()); + assert!(GetReplicas::default().request().await.unwrap().0.is_empty()); + } +} diff --git a/services/volume/src/service.rs b/services/volume/src/service.rs new file mode 100644 index 000000000..827b3b881 --- /dev/null +++ b/services/volume/src/service.rs @@ -0,0 +1,333 @@ +#![allow(clippy::unit_arg)] + +use super::*; +use common::wrapper::v0::*; + +/// Volume service implementation methods +#[derive(Clone, Debug, Default)] +pub(super) struct VolumeSvc { + registry: Registry, +} + +impl VolumeSvc { + /// New Service with the update `period` + pub fn new(period: std::time::Duration) -> Self { + let obj = Self { + registry: Registry::new(period), + }; + obj.start(); + obj + } + /// Start registry poller + fn start(&self) { + self.registry.start(); + } + + /// Get all pools from node or from all nodes + async fn get_node_nexuses( + &self, + node_id: Option, + ) -> Result, SvcError> { + Ok(match node_id { + None => self.registry.list_nexuses().await, + Some(node_id) => self.registry.list_node_nexuses(&node_id).await, + }) + } + + /// Get nexuses according to the filter + #[tracing::instrument(level = "debug", err)] + pub(super) async fn get_nexuses( + &self, + request: &GetNexuses, + ) -> Result { + let filter = request.filter.clone(); + let nexuses = match filter { + Filter::None => self.get_node_nexuses(None).await?, + Filter::Node(node_id) => { + self.get_node_nexuses(Some(node_id)).await? + } + Filter::NodeNexus(node_id, nexus_id) => { + let nexuses = self.get_node_nexuses(Some(node_id)).await?; + nexuses + .iter() + .filter(|&n| n.uuid == nexus_id) + .cloned() + .collect() + } + Filter::Nexus(nexus_id) => { + let nexuses = self.get_node_nexuses(None).await?; + nexuses + .iter() + .filter(|&n| n.uuid == nexus_id) + .cloned() + .collect() + } + _ => { + return Err(SvcError::InvalidFilter { + filter, + }) + } + }; + Ok(Nexuses(nexuses)) + } + + /// Create nexus + #[tracing::instrument(level = "debug", err)] + pub(super) async fn create_nexus( + &self, + request: &CreateNexus, + ) -> Result { + self.registry.create_nexus(request).await + } + + /// Destroy nexus + #[tracing::instrument(level = "debug", err)] + pub(super) async fn destroy_nexus( + &self, + request: &DestroyNexus, + ) -> Result<(), SvcError> { + self.registry.destroy_nexus(request).await + } + + /// Create nexus + #[tracing::instrument(level = "debug", err)] + pub(super) async fn share_nexus( + &self, + request: &ShareNexus, + ) -> Result { + self.registry.share_nexus(request).await + } + + /// Destroy nexus + #[tracing::instrument(level = "debug", err)] + pub(super) async fn unshare_nexus( + &self, + request: &UnshareNexus, + ) -> Result<(), SvcError> { + self.registry.unshare_nexus(request).await + } + + /// Add nexus child + #[tracing::instrument(level = "debug", err)] + pub(super) async fn add_nexus_child( + &self, + request: &AddNexusChild, + ) -> Result { + self.registry.add_nexus_child(request).await + } + + /// Remove nexus child + #[tracing::instrument(level = "debug", err)] + pub(super) async fn remove_nexus_child( + &self, + request: &RemoveNexusChild, + ) -> Result<(), SvcError> { + self.registry.remove_nexus_child(request).await + } + + /// Get volumes + #[tracing::instrument(level = "debug", err)] + pub(super) async fn get_volumes( + &self, + request: &GetVolumes, + ) -> Result { + let nexus = self.registry.list_nexuses().await; + Ok(Volumes( + nexus + .iter() + .map(|n| Volume { + uuid: n.uuid.clone(), + size: n.size, + state: n.state.clone(), + children: vec![n.clone()], + }) + .collect(), + )) + } + + /// Create volume + #[tracing::instrument(level = "debug", err)] + pub(super) async fn create_volume( + &self, + request: &CreateVolume, + ) -> Result { + // should we just use the cache here? + let pools = self.registry.fetch_pools_wrapper().await; + + let size = request.size; + let replicas = request.replicas; + let allowed_nodes = request.allowed_nodes.clone(); + + if !allowed_nodes.is_empty() && replicas > allowed_nodes.len() as u64 { + // oops, how would this even work mr requester? + return Err(SvcError::InvalidArguments {}); + } + + if request.nexuses > 1 { + tracing::warn!( + "Multiple nexus per volume is not currently working" + ); + } + + // filter pools according to the following criteria (any order): + // 1. if allowed_nodes were specified then only pools from those nodes + // can be used. + // 2. pools should have enough free space for the + // volume (do we need to take into account metadata?) + // 3. ideally use only healthy(online) pools with degraded pools as a + // fallback + let mut pools = pools + .iter() + .filter(|&p| { + // required nodes, if any + allowed_nodes.is_empty() || allowed_nodes.contains(&p.node()) + }) + .filter(|&p| { + // enough free space + p.free_space() >= size + }) + .filter(|&p| { + // but preferably (the sort will sort this out for us) + p.state() != PoolState::Faulted + && p.state() != PoolState::Unknown + }) + .collect::>(); + + // we could not satisfy the request, no point in continuing any further + if replicas > pools.len() as u64 { + return Err(NotEnough::OfPools { + have: pools.len() as u64, + need: replicas, + } + .into()); + } + + // sort pools from least to most suitable + // state and then number of replicas and then free space + pools.sort(); + + let mut replicas = vec![]; + while let Some(pool) = pools.pop() { + let create_replica = CreateReplica { + node: pool.node(), + uuid: request.uuid.clone(), + pool: pool.uuid(), + size: request.size, + thin: true, + share: if replicas.is_empty() { + // one 1 nexus supported for the moment which will use + // replica 0 + Protocol::Off + } else { + // the others will fail to create because they can't open + // their local replica via Nvmf + Protocol::Nvmf + }, + }; + let replica = self.registry.create_replica(&create_replica).await; + if let Ok(replica) = replica { + replicas.push(replica); + } else { + tracing::error!( + "Failed to create replica: {:?}. Trying other pools (if any available)...", + create_replica + ); + } + + if replicas.len() == request.replicas as usize { + break; + } + } + + if replicas.len() == request.replicas as usize { + // we have enough replicas + // now stitch them up and make up the nexuses + // where are the nexuses allowed to exist? + // (at the moment on the same nodes as the most preferred replicas) + + let mut nexuses = vec![]; + for i in 0 .. request.nexuses { + let create_nexus = CreateNexus { + node: replicas[i as usize].node.clone(), + uuid: request.uuid.clone(), + size: request.size, + children: replicas.iter().map(|r| r.uri.clone()).collect(), + }; + + match self.registry.create_nexus(&create_nexus).await { + Ok(nexus) => { + nexuses.push(nexus); + } + Err(error) => { + // what to do in case of failure? + tracing::error!( + "Failed to create nexus: {:?}, error: {}", + create_nexus, + error.full_string() + ); + } + } + } + + if nexuses.is_empty() { + Err(NotEnough::OfNexuses { + have: 0, + need: 1, + } + .into()) + } else { + let volume = Volume { + uuid: request.uuid.clone(), + size: request.size, + state: NexusState::Online, + children: nexuses, + }; + Ok(volume) + } + } else { + // we can't fulfil the request fully... + // carry on to a "degraded" state with "enough" replicas or bail + // out? + Err(NotEnough::OfReplicas { + have: replicas.len() as u64, + need: request.replicas, + } + .into()) + } + } + + /// Destroy volume + #[tracing::instrument(level = "debug", err)] + pub(super) async fn destroy_volume( + &self, + request: &DestroyVolume, + ) -> Result<(), SvcError> { + let nexuses = self.registry.list_nexuses().await; + let nexuses = nexuses + .iter() + .filter(|n| n.uuid == request.uuid) + .collect::>(); + for nexus in nexuses { + self.registry + .destroy_nexus(&DestroyNexus { + node: nexus.node.clone(), + uuid: request.uuid.clone(), + }) + .await?; + for child in &nexus.children { + let replicas = self.registry.list_replicas().await; + let replica = replicas.iter().find(|r| r.uri == child.uri); + if let Some(replica) = replica { + self.registry + .destroy_replica(&DestroyReplica { + node: replica.node.clone(), + pool: replica.pool.clone(), + uuid: replica.uuid.clone(), + }) + .await?; + } + } + } + Ok(()) + } +} From 73a0fceb96772d128853ab9b45e4836e2bd511db Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 7 Jan 2021 11:49:48 +0000 Subject: [PATCH 69/85] feat(jaeger): add tracing for the rest calls Trace all rest request and responses via a command line option for the the control plane rest server and its k8s operator counterpart. The rest client library now has flag to enable/disable tracing. Added jaegertracing/all-in-one:latest container to the rest client test which allows us to see all the rest requests/responses @localhost:16686 --- Cargo.lock | 118 ++++++++++++++++++++++++++++++++++++- operators/Cargo.toml | 2 + operators/node/src/main.rs | 33 ++++++++++- rest/Cargo.toml | 4 ++ rest/service/src/main.rs | 31 +++++++++- rest/src/lib.rs | 86 ++++++++++++++++----------- rest/tests/v0_test.rs | 48 ++++++++++++--- 7 files changed, 274 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6807b3f5f..60c2ed6c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -280,6 +280,20 @@ dependencies = [ "syn 1.0.51", ] +[[package]] +name = "actix-web-opentelemetry" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d10b9d36fd431016fb3ad4be804c7c35f685661a327bdc1a15aaff8eff8bcc4b" +dependencies = [ + "actix-http", + "actix-web", + "futures", + "opentelemetry", + "opentelemetry-semantic-conventions", + "serde", +] + [[package]] name = "addr2line" version = "0.14.0" @@ -1325,6 +1339,16 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "dashmap" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b937cd1fbd1f194ac842196bd2529f21618088ee6d8bff6a46ece611451c96b" +dependencies = [ + "cfg-if 1.0.0", + "num_cpus", +] + [[package]] name = "data-encoding" version = "2.3.1" @@ -2145,6 +2169,12 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "integer-encoding" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f4ebd0bd29be0f11973e9b3e219005661042a019fd757798c36a47c87852625" + [[package]] name = "io-uring" version = "0.4.0" @@ -2341,7 +2371,7 @@ version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9abc7b19889353e501e6bc7b2b9d7062b2e008ec256f11e9428ed8e56d046d2f" dependencies = [ - "dashmap", + "dashmap 3.11.10", "derivative", "futures", "k8s-openapi", @@ -2914,6 +2944,49 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3434e2a9d2aec539d91f4251bf9047cd53b4d3f386f9d336f4c8076c72a5256" +dependencies = [ + "async-trait", + "dashmap 4.0.1", + "fnv", + "futures", + "js-sys", + "lazy_static", + "percent-encoding 2.1.0", + "pin-project 0.4.27", + "rand 0.7.3", + "regex", + "thiserror", + "tokio", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c604a73595f605a852c431ef9c6bbacc7b911f094900905fd2f684b6fc44b4" +dependencies = [ + "async-trait", + "lazy_static", + "opentelemetry", + "thiserror", + "thrift", + "tokio", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3839dd2c931dc1aabcf964623ad74478fa97b3a88ad073d9e107aea36520c21d" +dependencies = [ + "opentelemetry", +] + [[package]] name = "operators" version = "0.1.0" @@ -2927,6 +3000,8 @@ dependencies = [ "kube-derive", "kube-runtime", "mbus_api", + "opentelemetry", + "opentelemetry-jaeger", "rest", "rustls", "serde", @@ -2939,6 +3014,15 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "ordered-float" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +dependencies = [ + "num-traits 0.2.14", +] + [[package]] name = "ordered-float" version = "2.0.0" @@ -3529,11 +3613,14 @@ version = "0.1.0" dependencies = [ "actix-rt", "actix-web", + "actix-web-opentelemetry", "anyhow", "async-trait", "composer", "futures", "mbus_api", + "opentelemetry", + "opentelemetry-jaeger", "rpc", "rustls", "serde", @@ -3545,6 +3632,7 @@ dependencies = [ "tokio", "tracing", "tracing-futures", + "tracing-opentelemetry", "tracing-subscriber", "url", ] @@ -3766,7 +3854,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" dependencies = [ - "ordered-float", + "ordered-float 2.0.0", "serde", ] @@ -4327,6 +4415,19 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "thrift" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +dependencies = [ + "byteorder", + "integer-encoding", + "log", + "ordered-float 1.1.1", + "threadpool", +] + [[package]] name = "time" version = "0.1.44" @@ -4759,6 +4860,19 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1706e1f42970e09aa0635deb4f4607e8704a4390427d5f0062bf59240338bcc" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + [[package]] name = "tracing-serde" version = "0.1.2" diff --git a/operators/Cargo.toml b/operators/Cargo.toml index d8f05c7a5..297e9dc71 100644 --- a/operators/Cargo.toml +++ b/operators/Cargo.toml @@ -28,6 +28,8 @@ either = "1.6.0" tracing = "0.1" tracing-subscriber = "0.2" tracing-futures = "0.2.4" +opentelemetry-jaeger = { version = "0.10", features = ["tokio"] } +opentelemetry = "0.11.2" rest = { path = "../rest" } [dependencies.serde] diff --git a/operators/node/src/main.rs b/operators/node/src/main.rs index 8e1925c42..c87745813 100644 --- a/operators/node/src/main.rs +++ b/operators/node/src/main.rs @@ -16,6 +16,10 @@ struct CliArgs { /// Polling period #[structopt(long, short, default_value = "30s")] period: humantime::Duration, + + /// Trace rest requests to the Jaeger endpoint agent + #[structopt(long, short)] + jaeger: Option, } #[derive(CustomResource, Deserialize, Serialize, Clone, Debug)] @@ -47,22 +51,45 @@ impl TryFrom<&MayastorNode> for Node { } } -fn init_tracing() { +use opentelemetry::{ + global, + sdk::{propagation::TraceContextPropagator, trace::Tracer}, +}; +use opentelemetry_jaeger::Uninstall; + +fn init_tracing() -> Option<(Tracer, Uninstall)> { if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(filter).init(); } else { tracing_subscriber::fmt().with_env_filter("info").init(); } + if let Some(agent) = CliArgs::from_args().jaeger { + tracing::info!("Starting jaeger trace pipeline at {}...", agent); + // Start a new jaeger trace pipeline + global::set_text_map_propagator(TraceContextPropagator::new()); + let (_tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + .with_agent_endpoint(agent) + .with_service_name("rest-server") + .install() + .expect("Jaeger pipeline install error"); + Some((_tracer, _uninstall)) + } else { + None + } } #[actix_web::main] async fn main() -> anyhow::Result<()> { - init_tracing(); + // need to keep the jaeger pipeline tracer alive, if enabled + let _tracer = init_tracing(); let polling_period = CliArgs::from_args().period.into(); let rest_url = format!("https://{}", CliArgs::from_args().rest); - let rest_cli = rest_client::ActixRestClient::new(&rest_url)?; + let rest_cli = rest_client::ActixRestClient::new( + &rest_url, + CliArgs::from_args().jaeger.is_some(), + )?; let kube_client = kube::Client::try_default().await?; let namespace = "mayastor"; diff --git a/rest/Cargo.toml b/rest/Cargo.toml index 08531806d..fd83ea6aa 100644 --- a/rest/Cargo.toml +++ b/rest/Cargo.toml @@ -30,6 +30,10 @@ strum_macros = "0.19" anyhow = "1.0.32" snafu = "0.6" url = "2.2.0" +opentelemetry-jaeger = { version = "0.10", features = ["tokio"] } +tracing-opentelemetry = "0.10.0" +opentelemetry = "0.11.2" +actix-web-opentelemetry = "0.9.0" [dev-dependencies] composer = { path = "../composer" } diff --git a/rest/service/src/main.rs b/rest/service/src/main.rs index 599a99f45..106f9dd19 100644 --- a/rest/service/src/main.rs +++ b/rest/service/src/main.rs @@ -19,19 +19,45 @@ struct CliArgs { /// Default: nats://0.0.0.0:4222 #[structopt(long, short, default_value = "nats://0.0.0.0:4222")] nats: String, + + /// Trace rest requests to the Jaeger endpoint agent + #[structopt(long, short)] + jaeger: Option, } -fn init_tracing() { +use actix_web_opentelemetry::RequestTracing; +use opentelemetry::{ + global, + sdk::{propagation::TraceContextPropagator, trace::Tracer}, +}; +use opentelemetry_jaeger::Uninstall; + +fn init_tracing() -> Option<(Tracer, Uninstall)> { if let Ok(filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(filter).init(); } else { tracing_subscriber::fmt().with_env_filter("info").init(); } + if let Some(agent) = CliArgs::from_args().jaeger { + tracing::info!("Starting jaeger trace pipeline at {}...", agent); + // Start a new jaeger trace pipeline + global::set_text_map_propagator(TraceContextPropagator::new()); + let (_tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + .with_agent_endpoint(agent) + .with_service_name("rest-server") + .install() + .expect("Jaeger pipeline install error"); + Some((_tracer, _uninstall)) + } else { + None + } } #[actix_web::main] async fn main() -> std::io::Result<()> { - init_tracing(); + // need to keep the jaeger pipeline tracer alive, if enabled + let _tracer = init_tracing(); + mbus_api::message_bus_init(CliArgs::from_args().nats).await; // dummy certificates @@ -48,6 +74,7 @@ async fn main() -> std::io::Result<()> { HttpServer::new(move || { App::new() + .wrap(RequestTracing::new()) .wrap(middleware::Logger::default()) .service(v0::nodes::factory()) .service(v0::pools::factory()) diff --git a/rest/src/lib.rs b/rest/src/lib.rs index c8b5c63b6..ff7447f62 100644 --- a/rest/src/lib.rs +++ b/rest/src/lib.rs @@ -15,6 +15,7 @@ pub mod versions; use actix_web::{body::Body, client::Client}; +use actix_web_opentelemetry::ClientExt; use serde::Deserialize; use std::{io::BufReader, string::ToString}; @@ -23,11 +24,12 @@ use std::{io::BufReader, string::ToString}; pub struct ActixRestClient { client: actix_web::client::Client, url: String, + trace: bool, } impl ActixRestClient { /// creates a new client which uses the specified `url` - pub fn new(url: &str) -> anyhow::Result { + pub fn new(url: &str, trace: bool) -> anyhow::Result { let cert_file = &mut BufReader::new( &std::include_bytes!("../certs/rsa/ca.cert")[..], ); @@ -45,6 +47,7 @@ impl ActixRestClient { Ok(Self { client: rest_client, url: url.to_string(), + trace, }) } async fn get_vec(&self, urn: String) -> anyhow::Result> @@ -53,14 +56,19 @@ impl ActixRestClient { { let uri = format!("{}{}", self.url, urn); - let mut rest_response = - self.client.get(uri.clone()).send().await.map_err(|error| { - anyhow::anyhow!( - "Failed to get uri '{}' from rest, err={:?}", - uri, - error - ) - })?; + let result = if self.trace { + self.client.get(uri.clone()).trace_request().send().await + } else { + self.client.get(uri.clone()).send().await + }; + + let mut rest_response = result.map_err(|error| { + anyhow::anyhow!( + "Failed to get uri '{}' from rest, err={:?}", + uri, + error + ) + })?; let rest_body = rest_response.body().await?; match serde_json::from_slice(&rest_body) { @@ -78,19 +86,28 @@ impl ActixRestClient { { let uri = format!("{}{}", self.url, urn); - let mut rest_response = self - .client - .put(uri.clone()) - .content_type("application/json") - .send_body(body) - .await - .map_err(|error| { - anyhow::anyhow!( - "Failed to put uri '{}' from rest, err={:?}", - uri, - error - ) - })?; + let result = if self.trace { + self.client + .put(uri.clone()) + .content_type("application/json") + .trace_request() + .send_body(body) + .await + } else { + self.client + .put(uri.clone()) + .content_type("application/json") + .send_body(body) + .await + }; + + let mut rest_response = result.map_err(|error| { + anyhow::anyhow!( + "Failed to put uri '{}' from rest, err={:?}", + uri, + error + ) + })?; let rest_body = rest_response.body().await?; Ok(serde_json::from_slice::(&rest_body)?) @@ -101,18 +118,19 @@ impl ActixRestClient { { let uri = format!("{}{}", self.url, urn); - let mut rest_response = self - .client - .delete(uri.clone()) - .send() - .await - .map_err(|error| { - anyhow::anyhow!( - "Failed to delete uri '{}' from rest, err={:?}", - uri, - error - ) - })?; + let result = if self.trace { + self.client.delete(uri.clone()).trace_request().send().await + } else { + self.client.delete(uri.clone()).send().await + }; + + let mut rest_response = result.map_err(|error| { + anyhow::anyhow!( + "Failed to delete uri '{}' from rest, err={:?}", + uri, + error + ) + })?; let rest_body = rest_response.body().await?; Ok(serde_json::from_slice::(&rest_body)?) diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index ed6c3e690..432a7a8cb 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -3,6 +3,7 @@ use mbus_api::{ v0::{ChannelVs, Liveness, NodeState, PoolState}, Message, }; +use opentelemetry::{global, sdk::propagation::TraceContextPropagator}; use rest_client::{versions::v0::*, ActixRestClient}; use rpc::mayastor::Null; use tracing::info; @@ -15,9 +16,11 @@ async fn wait_for_services() { // to avoid waiting for timeouts async fn orderly_start(test: &ComposeTest) { - test.start_containers(vec!["nats", "node", "pool", "volume", "rest"]) - .await - .unwrap(); + test.start_containers(vec![ + "nats", "node", "pool", "volume", "rest", "jaeger", + ]) + .await + .unwrap(); test.connect_to_bus("nats").await; wait_for_services().await; @@ -30,6 +33,12 @@ async fn orderly_start(test: &ComposeTest) { #[actix_rt::test] async fn client() { + global::set_text_map_propagator(TraceContextPropagator::new()); + let (_tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + .with_service_name("rest-client") + .install() + .unwrap(); + let mayastor = "node-test-name"; let test = Builder::new() .name("rest") @@ -43,9 +52,12 @@ async fn client() { .add_container_spec( ContainerSpec::from_binary( "rest", - Binary::from_dbg("rest").with_nats("-n"), + Binary::from_dbg("rest") + .with_nats("-n") + .with_args(vec!["-j", "10.1.0.8:6831"]), ) - .with_portmap("8080", "8080"), + .with_portmap("8080", "8080") + .with_portmap("8081", "8081"), ) .add_container_bin( "mayastor", @@ -54,8 +66,19 @@ async fn client() { .with_args(vec!["-N", mayastor]) .with_args(vec!["-g", "10.1.0.7:10124"]), ) + .add_container_spec( + ContainerSpec::from_image( + "jaeger", + "jaegertracing/all-in-one:latest", + ) + .with_portmap("16686", "16686") + .with_portmap("6831/udp", "6831/udp") + .with_portmap("6832/udp", "6832/udp"), + ) + //.with_base_image("alpine:latest".to_string()) .with_default_tracing() .autorun(false) + //.with_clean(false) .build() .await .unwrap(); @@ -66,7 +89,9 @@ async fn client() { async fn client_test(mayastor: &str, test: &ComposeTest) { orderly_start(&test).await; - let client = ActixRestClient::new("https://localhost:8080").unwrap().v0(); + let client = ActixRestClient::new("https://localhost:8080", true) + .unwrap() + .v0(); let nodes = client.get_nodes().await.unwrap(); assert_eq!(nodes.len(), 1); assert_eq!( @@ -169,13 +194,22 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { } ); - let _ = client.add_nexus_child(AddNexusChild { + let child = client.add_nexus_child(AddNexusChild { node: nexus.node.clone(), nexus: nexus.uuid.clone(), uri: "malloc:///malloc2?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b1".to_string(), auto_rebuild: true, }).await.unwrap(); + assert_eq!( + Some(&child), + client + .get_nexus_children(Filter::Nexus(nexus.uuid.clone())) + .await + .unwrap() + .last() + ); + client .destroy_nexus(DestroyNexus { node: nexus.node.clone(), From 14c372968da9d470d882a73b055014b014d66e21 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Thu, 7 Jan 2021 16:55:00 +0000 Subject: [PATCH 70/85] refactor: add actual types for each resource Adds unique types for volume, nexus, child and replica ID's. For the moment these are simple wrappers over strings to enhance type safety, though we could potentially use them to add validation. --- composer/src/lib.rs | 19 +- mayastor/src/subsys/mbus/registration.rs | 10 +- mbus-api/src/message_bus/v0.rs | 38 +++- mbus-api/src/v0.rs | 214 +++++++++++------- nix/pkgs/mayastor/default.nix | 2 +- operators/node/src/main.rs | 14 +- rest/service/src/v0/children.rs | 48 ++-- rest/service/src/v0/nexuses.rs | 18 +- rest/service/src/v0/nodes.rs | 2 +- rest/service/src/v0/pools.rs | 16 +- rest/service/src/v0/replicas.rs | 46 ++-- rest/service/src/v0/volumes.rs | 18 +- rest/src/versions/v0.rs | 56 +++-- rest/tests/v0_test.rs | 51 +++-- services/common/src/lib.rs | 15 +- services/common/src/wrapper/v0/mod.rs | 10 +- services/common/src/wrapper/v0/node_traits.rs | 45 ++-- services/common/src/wrapper/v0/pool.rs | 62 ++--- services/common/src/wrapper/v0/registry.rs | 54 +++-- services/common/src/wrapper/v0/volume.rs | 62 ++--- services/node/src/server.rs | 34 ++- services/pool/src/server.rs | 8 +- services/pool/src/service.rs | 20 +- services/volume/src/server.rs | 24 +- services/volume/src/service.rs | 27 ++- 25 files changed, 521 insertions(+), 392 deletions(-) diff --git a/composer/src/lib.rs b/composer/src/lib.rs index c4e39b21f..fcbe77d0c 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -99,7 +99,7 @@ impl RpcHandle { pub struct Binary { path: String, arguments: Vec, - nats_arg: String, + nats_arg: Option, env: HashMap, } @@ -139,7 +139,7 @@ impl Binary { } /// Set the nats endpoint via the provided argument pub fn with_nats(mut self, arg: &str) -> Self { - self.nats_arg = arg.to_string(); + self.nats_arg = Some(arg.to_string()); self } /// Add environment variables for the container @@ -152,10 +152,11 @@ impl Binary { /// pick up the nats argument name for a particular binary from nats_arg /// and fill up the nats server endpoint using the network name fn setup_nats(&mut self, network: &str) { - if !self.nats_arg.is_empty() { - self.arguments.push(self.nats_arg.clone()); - self.arguments.push(format!("nats.{}:4222", network)); - self.nats_arg = String::new(); + if let Some(nats_arg) = self.nats_arg.take() { + if !nats_arg.is_empty() { + self.arguments.push(nats_arg); + self.arguments.push(format!("nats.{}:4222", network)); + } } } @@ -212,7 +213,6 @@ impl ContainerSpec { } Self { name: name.into(), - image: None, binary: Some(binary), init: Some(true), env, @@ -362,8 +362,7 @@ impl Builder { self.add_container_spec(ContainerSpec::from_binary(name, bin)) } - /// add a docker container - /// todo: still need to pull the image manually + /// add a docker container which will be pulled if not present pub fn add_container_image(self, name: &str, image: Binary) -> Builder { self.add_container_spec(ContainerSpec::from_binary(name, image)) } @@ -387,8 +386,6 @@ impl Builder { } /// use base image for all binary containers - /// note, the image must be present locally - /// todo: pull image, if not present pub fn with_base_image>>( mut self, image: S, diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs index f7a6f8e40..205ecfe69 100644 --- a/mayastor/src/subsys/mbus/registration.rs +++ b/mayastor/src/subsys/mbus/registration.rs @@ -45,8 +45,8 @@ pub enum Error { #[derive(Clone)] struct Configuration { - /// Name of the node that mayastor is running on - node: String, + /// Id of the node that mayastor is running on + node: NodeId, /// gRPC endpoint of the server provided by mayastor grpc_endpoint: String, /// heartbeat interval (how often the register message is sent) @@ -67,7 +67,9 @@ static MESSAGE_BUS_REG: OnceCell = OnceCell::new(); impl Registration { /// initialise the global registration instance pub(super) fn init(node: &str, grpc_endpoint: &str) { - MESSAGE_BUS_REG.get_or_init(|| Registration::new(node, grpc_endpoint)); + MESSAGE_BUS_REG.get_or_init(|| { + Registration::new(&NodeId::from(node), grpc_endpoint) + }); } /// terminate and re-register @@ -88,7 +90,7 @@ impl Registration { Ok(()) } - fn new(node: &str, grpc_endpoint: &str) -> Registration { + fn new(node: &NodeId, grpc_endpoint: &str) -> Registration { let (msg_sender, msg_receiver) = smol::channel::unbounded::<()>(); let config = Configuration { node: node.to_owned(), diff --git a/mbus-api/src/message_bus/v0.rs b/mbus-api/src/message_bus/v0.rs index 02bc40b0b..e37d0c4fa 100644 --- a/mbus-api/src/message_bus/v0.rs +++ b/mbus-api/src/message_bus/v0.rs @@ -30,7 +30,7 @@ pub type BusResult = Result; /// Node pub type Node = crate::v0::Node; -/// Nodes list +/// Node list pub type Nodes = crate::v0::Nodes; /// Pool pub type Pool = crate::v0::Pool; @@ -62,6 +62,8 @@ pub type Nexus = crate::v0::Nexus; pub type Nexuses = crate::v0::Nexuses; /// State of the nexus pub type NexusState = crate::v0::NexusState; +/// State of the volume +pub type VolumeState = crate::v0::VolumeState; /// Child of the nexus pub type Child = crate::v0::Child; /// State of the child @@ -82,7 +84,7 @@ pub type AddNexusChild = crate::v0::AddNexusChild; pub type Volume = crate::v0::Volume; /// Volumes pub type Volumes = crate::v0::Volumes; -/// Add Volume +/// Create Volume pub type CreateVolume = crate::v0::CreateVolume; /// Delete Volume pub type DestroyVolume = crate::v0::DestroyVolume; @@ -90,6 +92,18 @@ pub type DestroyVolume = crate::v0::DestroyVolume; pub type AddVolumeNexus = crate::v0::AddVolumeNexus; /// Remove Volume Nexus pub type RemoveVolumeNexus = crate::v0::RemoveVolumeNexus; +/// Id of a mayastor node +pub type NodeId = crate::v0::NodeId; +/// Id of a mayastor pool +pub type PoolId = crate::v0::PoolId; +/// UUID of a mayastor pool replica +pub type ReplicaId = crate::v0::ReplicaId; +/// UUID of a mayastor nexus +pub type NexusId = crate::v0::NexusId; +/// URI of a mayastor nexus child +pub type ChildUri = crate::v0::ChildUri; +/// UUID of a mayastor volume +pub type VolumeId = crate::v0::VolumeId; macro_rules! only_one { ($list:ident) => { @@ -117,10 +131,12 @@ pub trait MessageBusTrait: Sized { /// Get node with `id` #[tracing::instrument(level = "debug", err)] - async fn get_node(id: &str) -> BusResult { + async fn get_node(id: &NodeId) -> BusResult { let nodes = Self::get_nodes().await?; - let nodes = - nodes.into_iter().filter(|n| n.id == id).collect::>(); + let nodes = nodes + .into_iter() + .filter(|n| &n.id == id) + .collect::>(); only_one!(nodes) } @@ -186,13 +202,13 @@ pub trait MessageBusTrait: Sized { Ok(()) } - /// create replica + /// share replica #[tracing::instrument(level = "debug", err)] async fn share_replica(request: ShareReplica) -> BusResult { Ok(request.request().await?) } - /// create replica + /// unshare replica #[tracing::instrument(level = "debug", err)] async fn unshare_replica(request: UnshareReplica) -> BusResult<()> { let _ = request.request().await?; @@ -373,7 +389,7 @@ mod tests { orderly_start(&test).await?; - test_bus_backend(mayastor, &test).await?; + test_bus_backend(&NodeId::from(mayastor), &test).await?; // run with --nocapture to see all the logs test.logs_all().await?; @@ -381,7 +397,7 @@ mod tests { } async fn test_bus_backend( - mayastor: &str, + mayastor: &NodeId, test: &ComposeTest, ) -> Result<(), Box> { let nodes = MessageBus::get_nodes().await?; @@ -390,7 +406,7 @@ mod tests { assert_eq!( nodes.first().unwrap(), &Node { - id: mayastor.to_string(), + id: mayastor.clone(), grpc_endpoint: "0.0.0.0:10124".to_string(), state: NodeState::Online, } @@ -399,7 +415,7 @@ mod tests { assert_eq!( node, Node { - id: mayastor.to_string(), + id: mayastor.clone(), grpc_endpoint: "0.0.0.0:10124".to_string(), state: NodeState::Online, } diff --git a/mbus-api/src/v0.rs b/mbus-api/src/v0.rs index 9abd1ab35..7b5946931 100644 --- a/mbus-api/src/v0.rs +++ b/mbus-api/src/v0.rs @@ -171,11 +171,11 @@ bus_impl_message_all!( /// Registration /// Register message payload -#[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq)] +#[derive(Serialize, Deserialize, Default, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct Register { /// id of the mayastor instance - pub id: String, + pub id: NodeId, /// grpc_endpoint of the mayastor instance pub grpc_endpoint: String, } @@ -185,7 +185,7 @@ bus_impl_message_all!(Register, Register, (), Registry); #[derive(Serialize, Deserialize, Default, Debug, Clone)] pub struct Deregister { /// id of the mayastor instance - pub id: String, + pub id: NodeId, } bus_impl_message_all!(Deregister, Deregister, (), Registry); @@ -221,7 +221,7 @@ impl Default for NodeState { #[serde(rename_all = "camelCase")] pub struct Node { /// id of the mayastor instance - pub id: String, + pub id: NodeId, /// grpc_endpoint of the mayastor instance pub grpc_endpoint: String, /// deemed state of the node @@ -241,31 +241,31 @@ pub enum Filter { /// All objects None, /// Filter by Node id - Node(String), + Node(NodeId), /// Pool filters /// /// Filter by Pool id - Pool(String), + Pool(PoolId), /// Filter by Node and Pool id - NodePool(String, String), + NodePool(NodeId, PoolId), /// Filter by Node and Replica id - NodeReplica(String, String), + NodeReplica(NodeId, ReplicaId), /// Filter by Node, Pool and Replica id - NodePoolReplica(String, String, String), + NodePoolReplica(NodeId, PoolId, ReplicaId), /// Filter by Pool and Replica id - PoolReplica(String, String), + PoolReplica(PoolId, ReplicaId), /// Filter by Replica id - Replica(String), + Replica(ReplicaId), /// Volume filters /// /// Filter by Node and Nexus - NodeNexus(String, String), + NodeNexus(NodeId, NexusId), /// Filter by Nexus - Nexus(String), + Nexus(NexusId), /// Filter by Node and Volume - NodeVolume(String, String), + NodeVolume(NodeId, VolumeId), /// Filter by Volume - Volume(String), + Volume(VolumeId), } impl Default for Filter { fn default() -> Self { @@ -273,6 +273,62 @@ impl Default for Filter { } } +macro_rules! bus_impl_string_id { + ($Name:ident, $Doc:literal) => { + #[doc = $Doc] + #[derive(Serialize, Deserialize, Default, Debug, Clone, Eq, PartialEq, Hash)] + pub struct $Name(pub String); + + impl std::fmt::Display for $Name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } + } + + impl $Name { + /// Build Self from a string trait id + pub fn from>(id: T) -> Self { + $Name(id.into()) + } + /// Build Self from a string trait id + pub fn as_str<'a>(&'a self) -> &'a str { + self.0.as_str() + } + } + + impl From<&str> for $Name { + fn from(id: &str) -> Self { + $Name::from(id) + } + } + + impl From for $Name { + fn from(id: String) -> Self { + $Name::from(id.as_str()) + } + } + + impl Into<$Name> for &$Name { + fn into(self) -> $Name { + self.clone() + } + } + + impl Into for $Name { + fn into(self) -> String { + self.to_string() + } + } + }; +} + +bus_impl_string_id!(NodeId, "UUID of a mayastor node"); +bus_impl_string_id!(PoolId, "UUID of a mayastor pool"); +bus_impl_string_id!(ReplicaId, "UUID of a mayastor pool replica"); +bus_impl_string_id!(NexusId, "UUID of a mayastor nexus"); +bus_impl_string_id!(ChildUri, "URI of a mayastor nexus child"); +bus_impl_string_id!(VolumeId, "UUID of a mayastor volume"); + /// Pool Service /// Get all the pools from specific node or None for all nodes #[derive(Serialize, Deserialize, Default, Debug, Clone)] @@ -317,9 +373,9 @@ impl From for PoolState { #[serde(rename_all = "camelCase")] pub struct Pool { /// id of the mayastor instance - pub node: String, - /// name of the pool - pub name: String, + pub node: NodeId, + /// id of the pool + pub id: PoolId, /// absolute disk paths claimed by the pool pub disks: Vec, /// current state of the pool @@ -367,9 +423,9 @@ impl PartialOrd for PoolState { #[serde(rename_all = "camelCase")] pub struct CreatePool { /// id of the mayastor instance - pub node: String, - /// name of the pool - pub name: String, + pub node: NodeId, + /// id of the pool + pub id: PoolId, /// disk device paths or URIs to be claimed by the pool pub disks: Vec, } @@ -380,9 +436,9 @@ bus_impl_message_all!(CreatePool, CreatePool, Pool, Pool); #[serde(rename_all = "camelCase")] pub struct DestroyPool { /// id of the mayastor instance - pub node: String, - /// name of the pool - pub name: String, + pub node: NodeId, + /// id of the pool + pub id: PoolId, } bus_impl_message_all!(DestroyPool, DestroyPool, (), Pool); @@ -402,11 +458,11 @@ pub struct GetReplicas { #[serde(rename_all = "camelCase")] pub struct Replica { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the replica - pub uuid: String, - /// name of the pool - pub pool: String, + pub uuid: ReplicaId, + /// id of the pool + pub pool: PoolId, /// thin provisioning pub thin: bool, /// size of the replica in bytes @@ -425,11 +481,11 @@ bus_impl_message_all!(GetReplicas, GetReplicas, Replicas, Pool); #[serde(rename_all = "camelCase")] pub struct CreateReplica { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the replica - pub uuid: String, - /// name of the pool - pub pool: String, + pub uuid: ReplicaId, + /// id of the pool + pub pool: PoolId, /// size of the replica in bytes pub size: u64, /// thin provisioning @@ -444,11 +500,11 @@ bus_impl_message_all!(CreateReplica, CreateReplica, Replica, Pool); #[serde(rename_all = "camelCase")] pub struct DestroyReplica { /// id of the mayastor instance - pub node: String, - /// name of the pool - pub pool: String, + pub node: NodeId, + /// id of the pool + pub pool: PoolId, /// uuid of the replica - pub uuid: String, + pub uuid: ReplicaId, } bus_impl_message_all!(DestroyReplica, DestroyReplica, (), Pool); @@ -457,11 +513,11 @@ bus_impl_message_all!(DestroyReplica, DestroyReplica, (), Pool); #[serde(rename_all = "camelCase")] pub struct ShareReplica { /// id of the mayastor instance - pub node: String, - /// name of the pool - pub pool: String, + pub node: NodeId, + /// id of the pool + pub pool: PoolId, /// uuid of the replica - pub uuid: String, + pub uuid: ReplicaId, /// protocol used for exposing the replica pub protocol: Protocol, } @@ -472,11 +528,11 @@ bus_impl_message_all!(ShareReplica, ShareReplica, String, Pool); #[serde(rename_all = "camelCase")] pub struct UnshareReplica { /// id of the mayastor instance - pub node: String, - /// name of the pool - pub pool: String, + pub node: NodeId, + /// id of the pool + pub pool: PoolId, /// uuid of the replica - pub uuid: String, + pub uuid: ReplicaId, } bus_impl_message_all!(UnshareReplica, UnshareReplica, (), Pool); @@ -522,11 +578,11 @@ impl From for Protocol { pub enum ReplicaState { /// unknown state Unknown = 0, - /// the pool is in normal working order + /// the replica is in normal working order Online = 1, - /// the pool has experienced a failure but can still function + /// the replica has experienced a failure but can still function Degraded = 2, - /// the pool is completely inaccessible + /// the replica is completely inaccessible Faulted = 3, } @@ -560,9 +616,9 @@ pub struct GetNexuses { #[serde(rename_all = "camelCase")] pub struct Nexus { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the nexus - pub uuid: String, + pub uuid: NexusId, /// size of the volume in bytes pub size: u64, /// current state of the nexus @@ -581,7 +637,7 @@ pub struct Nexus { #[serde(rename_all = "camelCase")] pub struct Child { /// uri of the child device - pub uri: String, + pub uri: ChildUri, /// state of the child pub state: ChildState, /// current rebuild progress (%) @@ -654,16 +710,16 @@ bus_impl_message_all!(GetNexuses, GetNexuses, Nexuses, Nexus); #[serde(rename_all = "camelCase")] pub struct CreateNexus { /// id of the mayastor instance - pub node: String, - /// this UUID will be set in as the UUID - pub uuid: String, + pub node: NodeId, + /// the nexus uuid will be set to this + pub uuid: NexusId, /// size of the device in bytes pub size: u64, /// replica can be iscsi and nvmf remote targets or a local spdk bdev /// (i.e. bdev:///name-of-the-bdev). /// /// uris to the targets we connect to - pub children: Vec, + pub children: Vec, } bus_impl_message_all!(CreateNexus, CreateNexus, Nexus, Nexus); @@ -672,9 +728,9 @@ bus_impl_message_all!(CreateNexus, CreateNexus, Nexus, Nexus); #[serde(rename_all = "camelCase")] pub struct DestroyNexus { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the nexus - pub uuid: String, + pub uuid: NexusId, } bus_impl_message_all!(DestroyNexus, DestroyNexus, (), Nexus); @@ -683,9 +739,9 @@ bus_impl_message_all!(DestroyNexus, DestroyNexus, (), Nexus); #[serde(rename_all = "camelCase")] pub struct ShareNexus { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the nexus - pub uuid: String, + pub uuid: NexusId, /// encryption key pub key: Option, /// share protocol @@ -698,9 +754,9 @@ bus_impl_message_all!(ShareNexus, ShareNexus, String, Nexus); #[serde(rename_all = "camelCase")] pub struct UnshareNexus { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the nexus - pub uuid: String, + pub uuid: NexusId, } bus_impl_message_all!(UnshareNexus, UnshareNexus, (), Nexus); @@ -709,11 +765,11 @@ bus_impl_message_all!(UnshareNexus, UnshareNexus, (), Nexus); #[serde(rename_all = "camelCase")] pub struct RemoveNexusChild { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the nexus - pub nexus: String, + pub nexus: NexusId, /// URI of the child device to be removed - pub uri: String, + pub uri: ChildUri, } bus_impl_message_all!(RemoveNexusChild, RemoveNexusChild, (), Nexus); @@ -722,11 +778,11 @@ bus_impl_message_all!(RemoveNexusChild, RemoveNexusChild, (), Nexus); #[serde(rename_all = "camelCase")] pub struct AddNexusChild { /// id of the mayastor instance - pub node: String, + pub node: NodeId, /// uuid of the nexus - pub nexus: String, + pub nexus: NexusId, /// URI of the child device to be added - pub uri: String, + pub uri: ChildUri, /// auto start rebuilding pub auto_rebuild: bool, } @@ -739,15 +795,19 @@ bus_impl_message_all!(AddNexusChild, AddNexusChild, Child, Nexus); #[serde(rename_all = "camelCase")] pub struct Volume { /// name of the volume - pub uuid: String, + pub uuid: VolumeId, /// size of the volume in bytes pub size: u64, /// current state of the volume - pub state: NexusState, + pub state: VolumeState, /// array of children nexuses pub children: Vec, } +/// Volume State information +/// Currently it's the same as the nexus +pub type VolumeState = NexusState; + /// Get volumes #[derive(Serialize, Deserialize, Default, Debug, Clone)] #[serde(rename_all = "camelCase")] @@ -763,7 +823,7 @@ bus_impl_message_all!(GetVolumes, GetVolumes, Volumes, Volume); #[serde(rename_all = "camelCase")] pub struct CreateVolume { /// uuid of the volume - pub uuid: String, + pub uuid: VolumeId, /// size of the volume in bytes pub size: u64, /// number of children nexuses (ANA) @@ -772,13 +832,13 @@ pub struct CreateVolume { pub replicas: u64, /// only these nodes can be used for the replicas #[serde(default)] - pub allowed_nodes: Vec, + pub allowed_nodes: Vec, /// preferred nodes for the replicas #[serde(default)] - pub preferred_nodes: Vec, + pub preferred_nodes: Vec, /// preferred nodes for the nexuses #[serde(default)] - pub preferred_nexus_nodes: Vec, + pub preferred_nexus_nodes: Vec, } bus_impl_message_all!(CreateVolume, CreateVolume, Volume, Volume); @@ -787,7 +847,7 @@ bus_impl_message_all!(CreateVolume, CreateVolume, Volume, Volume); #[serde(rename_all = "camelCase")] pub struct DestroyVolume { /// uuid of the volume - pub uuid: String, + pub uuid: VolumeId, } bus_impl_message_all!(DestroyVolume, DestroyVolume, (), Volume); @@ -796,9 +856,9 @@ bus_impl_message_all!(DestroyVolume, DestroyVolume, (), Volume); #[serde(rename_all = "camelCase")] pub struct AddVolumeNexus { /// uuid of the volume - pub uuid: String, + pub uuid: VolumeId, /// preferred node id for the nexus - pub preferred_node: Option, + pub preferred_node: Option, } bus_impl_message_all!(AddVolumeNexus, AddVolumeNexus, Nexus, Volume); @@ -807,8 +867,8 @@ bus_impl_message_all!(AddVolumeNexus, AddVolumeNexus, Nexus, Volume); #[serde(rename_all = "camelCase")] pub struct RemoveVolumeNexus { /// uuid of the volume - pub uuid: String, + pub uuid: VolumeId, /// id of the node where the nexus lives - pub node: Option, + pub node: Option, } bus_impl_message_all!(RemoveVolumeNexus, RemoveVolumeNexus, (), Volume); diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index 3cac55000..4c62e6793 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -39,7 +39,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "127jpjmsqdhpbgkvp4q1j7xzmbp5d3allcnpcbwxmk701f2z3bmh"; + cargoSha256 = "1c93jzly0pa2k7h40m4fn86v39n8a9kra2087rxnqa9nk0gw0lha"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/operators/node/src/main.rs b/operators/node/src/main.rs index c87745813..3798a2df8 100644 --- a/operators/node/src/main.rs +++ b/operators/node/src/main.rs @@ -40,7 +40,7 @@ impl TryFrom<&MayastorNode> for Node { type Error = strum::ParseError; fn try_from(kube_node: &MayastorNode) -> Result { Ok(Node { - id: kube_node.name(), + id: NodeId::from(kube_node.name()), grpc_endpoint: kube_node.spec.grpc_endpoint.clone(), state: kube_node .status @@ -69,7 +69,7 @@ fn init_tracing() -> Option<(Tracer, Uninstall)> { global::set_text_map_propagator(TraceContextPropagator::new()); let (_tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() .with_agent_endpoint(agent) - .with_service_name("rest-server") + .with_service_name("node-operator") .install() .expect("Jaeger pipeline install error"); Some((_tracer, _uninstall)) @@ -134,7 +134,7 @@ async fn polling_work( .filter(|node| { !kube_nodes .iter() - .any(|kube_node| kube_node.name() == node.id) + .any(|kube_node| kube_node.name() == node.id.to_string()) }) .collect::>(); @@ -142,7 +142,9 @@ async fn polling_work( let delete_nodes = kube_nodes .iter() .filter(|kube_node| { - !rest_nodes.iter().any(|node| kube_node.name() == node.id) + !rest_nodes + .iter() + .any(|node| kube_node.name() == node.id.to_string()) }) .collect::>(); @@ -224,7 +226,7 @@ async fn node_create( node: &Node, ) -> anyhow::Result<()> { let kube_node = MayastorNode::new( - &node.id, + node.id.as_str(), MayastorNodeSpec { grpc_endpoint: node.grpc_endpoint.clone(), }, @@ -255,7 +257,7 @@ async fn node_update( let post_params = PostParams::default(); let status = Some(node.state.to_string()); - let mut kube_node = nodes_api.get(&node.id).await?; + let mut kube_node = nodes_api.get(node.id.as_str()).await?; kube_node.status = status.clone(); let kube_node = nodes_api diff --git a/rest/service/src/v0/children.rs b/rest/service/src/v0/children.rs index d9b97e652..41000e6f7 100644 --- a/rest/service/src/v0/children.rs +++ b/rest/service/src/v0/children.rs @@ -19,20 +19,20 @@ pub(crate) fn factory() -> impl HttpServiceFactory { #[get("/v0/nexuses/{nexus_id}/children")] async fn get_nexus_children( - web::Path(nexus_id): web::Path, + web::Path(nexus_id): web::Path, ) -> impl Responder { get_children_response(Filter::Nexus(nexus_id)).await } #[get("/v0/nodes/{node_id}/nexuses/{nexus_id}/children")] async fn get_node_nexus_children( - web::Path((node_id, nexus_id)): web::Path<(String, String)>, + web::Path((node_id, nexus_id)): web::Path<(NodeId, NexusId)>, ) -> impl Responder { get_children_response(Filter::NodeNexus(node_id, nexus_id)).await } #[get("/v0/nexuses/{nexus_id}/children/{child_id:.*}")] async fn get_nexus_child( - web::Path((nexus_id, child_id)): web::Path<(String, String)>, + web::Path((nexus_id, child_id)): web::Path<(NexusId, ChildUri)>, req: HttpRequest, ) -> impl Responder { get_child_response(child_id, req, Filter::Nexus(nexus_id)).await @@ -40,9 +40,9 @@ async fn get_nexus_child( #[get("/v0/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}")] async fn get_node_nexus_child( web::Path((node_id, nexus_id, child_id)): web::Path<( - String, - String, - String, + NodeId, + NexusId, + ChildUri, )>, req: HttpRequest, ) -> impl Responder { @@ -52,7 +52,7 @@ async fn get_node_nexus_child( #[put("/v0/nexuses/{nexus_id}/children/{child_id:.*}")] async fn add_nexus_child( - web::Path((nexus_id, child_id)): web::Path<(String, String)>, + web::Path((nexus_id, child_id)): web::Path<(NexusId, ChildUri)>, req: HttpRequest, ) -> impl Responder { add_child_filtered(child_id, req, Filter::Nexus(nexus_id)).await @@ -60,9 +60,9 @@ async fn add_nexus_child( #[put("/v0/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}")] async fn add_node_nexus_child( web::Path((node_id, nexus_id, child_id)): web::Path<( - String, - String, - String, + NodeId, + NexusId, + ChildUri, )>, req: HttpRequest, ) -> impl Responder { @@ -72,7 +72,7 @@ async fn add_node_nexus_child( #[delete("/v0/nexuses/{nexus_id}/children/{child_id:.*}")] async fn delete_nexus_child( - web::Path((nexus_id, child_id)): web::Path<(String, String)>, + web::Path((nexus_id, child_id)): web::Path<(NexusId, ChildUri)>, req: HttpRequest, ) -> impl Responder { delete_child_filtered(child_id, req, Filter::Nexus(nexus_id)).await @@ -80,9 +80,9 @@ async fn delete_nexus_child( #[delete("/v0/nodes/{node_id}/nexuses/{nexus_id}/children/{child_id:.*}")] async fn delete_node_nexus_child( web::Path((node_id, nexus_id, child_id)): web::Path<( - String, - String, - String, + NodeId, + NexusId, + ChildUri, )>, req: HttpRequest, ) -> impl Responder { @@ -98,7 +98,7 @@ async fn get_children_response( } async fn get_child_response( - child_id: String, + child_id: ChildUri, req: HttpRequest, filter: Filter, ) -> Result { @@ -108,8 +108,11 @@ async fn get_child_response( RestRespond::ok(child) } -fn find_nexus_child(nexus: &Nexus, child_uri: &str) -> Result { - if let Some(child) = nexus.children.iter().find(|&c| c.uri == child_uri) { +fn find_nexus_child( + nexus: &Nexus, + child_uri: &ChildUri, +) -> Result { + if let Some(child) = nexus.children.iter().find(|&c| &c.uri == child_uri) { Ok(child.clone()) } else { Err(BusError::NotFound) @@ -117,7 +120,7 @@ fn find_nexus_child(nexus: &Nexus, child_uri: &str) -> Result { } async fn add_child_filtered( - child_id: String, + child_id: ChildUri, req: HttpRequest, filter: Filter, ) -> impl Responder { @@ -138,7 +141,7 @@ async fn add_child_filtered( } async fn delete_child_filtered( - child_id: String, + child_id: ChildUri, req: HttpRequest, filter: Filter, ) -> impl Responder { @@ -157,8 +160,9 @@ async fn delete_child_filtered( RestRespond::result(MessageBus::remove_nexus_child(destroy).await) } -fn build_child_uri(child_id: String, req: HttpRequest) -> String { - match url::Url::parse(&child_id) { +fn build_child_uri(child_id: ChildUri, req: HttpRequest) -> ChildUri { + let child_id = child_id.to_string(); + ChildUri::from(match url::Url::parse(child_id.as_str()) { Ok(_) => { if req.query_string().is_empty() { child_id @@ -170,5 +174,5 @@ fn build_child_uri(child_id: String, req: HttpRequest) -> String { // not a URL, it's probably legacy, default to AIO format!("aio://{}", child_id) } - } + }) } diff --git a/rest/service/src/v0/nexuses.rs b/rest/service/src/v0/nexuses.rs index 4e333ad53..e79d21741 100644 --- a/rest/service/src/v0/nexuses.rs +++ b/rest/service/src/v0/nexuses.rs @@ -23,19 +23,19 @@ async fn get_nexuses() -> impl Responder { RestRespond::result(MessageBus::get_nexuses(Filter::None).await) } #[get("/v0/nexuses/{nexus_id}")] -async fn get_nexus(web::Path(nexus_id): web::Path) -> impl Responder { +async fn get_nexus(web::Path(nexus_id): web::Path) -> impl Responder { RestRespond::result(MessageBus::get_nexuses(Filter::Nexus(nexus_id)).await) } #[get("/v0/nodes/{id}/nexuses")] async fn get_node_nexuses( - web::Path(node_id): web::Path, + web::Path(node_id): web::Path, ) -> impl Responder { RestRespond::result(MessageBus::get_nexuses(Filter::Node(node_id)).await) } #[get("/v0/nodes/{node_id}/nexuses/{nexus_id}")] async fn get_node_nexus( - web::Path((node_id, nexus_id)): web::Path<(String, String)>, + web::Path((node_id, nexus_id)): web::Path<(NodeId, NexusId)>, ) -> impl Responder { RestRespond::result( MessageBus::get_nexus(Filter::NodeNexus(node_id, nexus_id)).await, @@ -44,7 +44,7 @@ async fn get_node_nexus( #[put("/v0/nodes/{node_id}/nexuses/{nexus_id}")] async fn put_node_nexus( - web::Path((node_id, nexus_id)): web::Path<(String, String)>, + web::Path((node_id, nexus_id)): web::Path<(NodeId, NexusId)>, create: web::Json, ) -> impl Responder { let create = create.into_inner().bus_request(node_id, nexus_id); @@ -53,20 +53,20 @@ async fn put_node_nexus( #[delete("/v0/nodes/{node_id}/nexuses/{nexus_id}")] async fn del_node_nexus( - web::Path((node_id, nexus_id)): web::Path<(String, String)>, + web::Path((node_id, nexus_id)): web::Path<(NodeId, NexusId)>, ) -> impl Responder { destroy_nexus(Filter::NodeNexus(node_id, nexus_id)).await } #[delete("/v0/nexuses/{nexus_id}")] -async fn del_nexus(web::Path(nexus_id): web::Path) -> impl Responder { +async fn del_nexus(web::Path(nexus_id): web::Path) -> impl Responder { destroy_nexus(Filter::Nexus(nexus_id)).await } #[put("/v0/nodes/{node_id}/nexuses/{nexus_id}/share/{protocol}")] async fn put_node_nexus_share( web::Path((node_id, nexus_id, protocol)): web::Path<( - String, - String, + NodeId, + NexusId, Protocol, )>, ) -> impl Responder { @@ -81,7 +81,7 @@ async fn put_node_nexus_share( #[delete("/v0/nodes/{node_id}/nexuses/{nexus_id}/share")] async fn del_node_nexus_share( - web::Path((node_id, nexus_id)): web::Path<(String, String)>, + web::Path((node_id, nexus_id)): web::Path<(NodeId, NexusId)>, ) -> impl Responder { let unshare = UnshareNexus { node: node_id, diff --git a/rest/service/src/v0/nodes.rs b/rest/service/src/v0/nodes.rs index 01d7e65f1..e49e68e3f 100644 --- a/rest/service/src/v0/nodes.rs +++ b/rest/service/src/v0/nodes.rs @@ -16,6 +16,6 @@ async fn get_nodes() -> impl Responder { RestRespond::result(MessageBus::get_nodes().await) } #[get("/v0/nodes/{id}")] -async fn get_node(web::Path(node_id): web::Path) -> impl Responder { +async fn get_node(web::Path(node_id): web::Path) -> impl Responder { RestRespond::result(MessageBus::get_node(&node_id).await) } diff --git a/rest/service/src/v0/pools.rs b/rest/service/src/v0/pools.rs index e62814505..f0361eb8f 100644 --- a/rest/service/src/v0/pools.rs +++ b/rest/service/src/v0/pools.rs @@ -22,20 +22,20 @@ async fn get_pools() -> impl Responder { } #[get("/v0/pools/{id}")] -async fn get_pool(web::Path(pool_id): web::Path) -> impl Responder { +async fn get_pool(web::Path(pool_id): web::Path) -> impl Responder { RestRespond::result(MessageBus::get_pool(Filter::Pool(pool_id)).await) } #[get("/v0/nodes/{id}/pools")] async fn get_node_pools( - web::Path(node_id): web::Path, + web::Path(node_id): web::Path, ) -> impl Responder { RestRespond::result(MessageBus::get_pools(Filter::Node(node_id)).await) } #[get("/v0/nodes/{node_id}/pools/{pool_id}")] async fn get_node_pool( - web::Path((node_id, pool_id)): web::Path<(String, String)>, + web::Path((node_id, pool_id)): web::Path<(NodeId, PoolId)>, ) -> impl Responder { RestRespond::result( MessageBus::get_pool(Filter::NodePool(node_id, pool_id)).await, @@ -44,7 +44,7 @@ async fn get_node_pool( #[put("/v0/nodes/{node_id}/pools/{pool_id}")] async fn put_node_pool( - web::Path((node_id, pool_id)): web::Path<(String, String)>, + web::Path((node_id, pool_id)): web::Path<(NodeId, PoolId)>, create: web::Json, ) -> impl Responder { let create = create.into_inner().bus_request(node_id, pool_id); @@ -53,12 +53,12 @@ async fn put_node_pool( #[delete("/v0/nodes/{node_id}/pools/{pool_id}")] async fn del_node_pool( - web::Path((node_id, pool_id)): web::Path<(String, String)>, + web::Path((node_id, pool_id)): web::Path<(NodeId, PoolId)>, ) -> impl Responder { destroy_pool(Filter::NodePool(node_id, pool_id)).await } #[delete("/v0/pools/{pool_id}")] -async fn del_pool(web::Path(pool_id): web::Path) -> impl Responder { +async fn del_pool(web::Path(pool_id): web::Path) -> impl Responder { destroy_pool(Filter::Pool(pool_id)).await } @@ -66,7 +66,7 @@ async fn destroy_pool(filter: Filter) -> impl Responder { let destroy = match filter.clone() { Filter::NodePool(node_id, pool_id) => DestroyPool { node: node_id, - name: pool_id, + id: pool_id, }, Filter::Pool(pool_id) => { let node_id = match MessageBus::get_pool(filter).await { @@ -75,7 +75,7 @@ async fn destroy_pool(filter: Filter) -> impl Responder { }; DestroyPool { node: node_id, - name: pool_id, + id: pool_id, } } _ => return (RestError::from(BusError::NotFound)).into(), diff --git a/rest/service/src/v0/replicas.rs b/rest/service/src/v0/replicas.rs index 08e7f7841..7e47c902f 100644 --- a/rest/service/src/v0/replicas.rs +++ b/rest/service/src/v0/replicas.rs @@ -29,7 +29,7 @@ async fn get_replicas() -> impl Responder { } #[get("/v0/replicas/{id}")] async fn get_replica( - web::Path(replica_id): web::Path, + web::Path(replica_id): web::Path, ) -> impl Responder { RestRespond::result( MessageBus::get_replica(Filter::Replica(replica_id)).await, @@ -38,14 +38,14 @@ async fn get_replica( #[get("/v0/nodes/{id}/replicas")] async fn get_node_replicas( - web::Path(node_id): web::Path, + web::Path(node_id): web::Path, ) -> impl Responder { RestRespond::result(MessageBus::get_replicas(Filter::Node(node_id)).await) } #[get("/v0/nodes/{node_id}/pools/{pool_id}/replicas")] async fn get_node_pool_replicas( - web::Path((node_id, pool_id)): web::Path<(String, String)>, + web::Path((node_id, pool_id)): web::Path<(NodeId, PoolId)>, ) -> impl Responder { RestRespond::result( MessageBus::get_replicas(Filter::NodePool(node_id, pool_id)).await, @@ -54,9 +54,9 @@ async fn get_node_pool_replicas( #[get("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}")] async fn get_node_pool_replica( web::Path((node_id, pool_id, replica_id)): web::Path<( - String, - String, - String, + NodeId, + PoolId, + ReplicaId, )>, ) -> impl Responder { RestRespond::result( @@ -70,9 +70,9 @@ async fn get_node_pool_replica( #[put("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}")] async fn put_node_pool_replica( web::Path((node_id, pool_id, replica_id)): web::Path<( - String, - String, - String, + NodeId, + PoolId, + ReplicaId, )>, create: web::Json, ) -> impl Responder { @@ -84,7 +84,7 @@ async fn put_node_pool_replica( } #[put("/v0/pools/{pool_id}/replicas/{replica_id}")] async fn put_pool_replica( - web::Path((pool_id, replica_id)): web::Path<(String, String)>, + web::Path((pool_id, replica_id)): web::Path<(PoolId, ReplicaId)>, create: web::Json, ) -> impl Responder { put_replica( @@ -97,16 +97,16 @@ async fn put_pool_replica( #[delete("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}")] async fn del_node_pool_replica( web::Path((node_id, pool_id, replica_id)): web::Path<( - String, - String, - String, + NodeId, + PoolId, + ReplicaId, )>, ) -> impl Responder { destroy_replica(Filter::NodePoolReplica(node_id, pool_id, replica_id)).await } #[delete("/v0/pools/{pool_id}/replicas/{replica_id}")] async fn del_pool_replica( - web::Path((pool_id, replica_id)): web::Path<(String, String)>, + web::Path((pool_id, replica_id)): web::Path<(PoolId, ReplicaId)>, ) -> impl Responder { destroy_replica(Filter::PoolReplica(pool_id, replica_id)).await } @@ -114,9 +114,9 @@ async fn del_pool_replica( #[put("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share/{protocol}")] async fn put_node_pool_replica_share( web::Path((node_id, pool_id, replica_id, protocol)): web::Path<( - String, - String, - String, + NodeId, + PoolId, + ReplicaId, Protocol, )>, ) -> impl Responder { @@ -129,8 +129,8 @@ async fn put_node_pool_replica_share( #[put("/v0/pools/{pool_id}/replicas/{replica_id}/share/{protocol}")] async fn put_pool_replica_share( web::Path((pool_id, replica_id, protocol)): web::Path<( - String, - String, + PoolId, + ReplicaId, Protocol, )>, ) -> impl Responder { @@ -140,16 +140,16 @@ async fn put_pool_replica_share( #[delete("/v0/nodes/{node_id}/pools/{pool_id}/replicas/{replica_id}/share")] async fn del_node_pool_replica_share( web::Path((node_id, pool_id, replica_id)): web::Path<( - String, - String, - String, + NodeId, + PoolId, + ReplicaId, )>, ) -> impl Responder { unshare_replica(Filter::NodePoolReplica(node_id, pool_id, replica_id)).await } #[delete("/v0/pools/{pool_id}/replicas/{replica_id}/share")] async fn del_pool_replica_share( - web::Path((pool_id, replica_id)): web::Path<(String, String)>, + web::Path((pool_id, replica_id)): web::Path<(PoolId, ReplicaId)>, ) -> impl Responder { unshare_replica(Filter::PoolReplica(pool_id, replica_id)).await } diff --git a/rest/service/src/v0/volumes.rs b/rest/service/src/v0/volumes.rs index 0b32187ce..b15b75833 100644 --- a/rest/service/src/v0/volumes.rs +++ b/rest/service/src/v0/volumes.rs @@ -8,7 +8,7 @@ impl HttpServiceFactory for Factory { get_node_volumes.register(config); get_node_volume.register(config); put_volume.register(config); - del_nexus.register(config); + del_volume.register(config); } } pub(crate) fn factory() -> impl HttpServiceFactory { @@ -21,19 +21,21 @@ async fn get_volumes() -> impl Responder { } #[get("/v0/volumes/{volume_id}")] -async fn get_volume(web::Path(volume_id): web::Path) -> impl Responder { +async fn get_volume( + web::Path(volume_id): web::Path, +) -> impl Responder { RestRespond::result(MessageBus::get_volume(Filter::Volume(volume_id)).await) } #[get("/v0/nodes/{node_id}/volumes")] async fn get_node_volumes( - web::Path(node_id): web::Path, + web::Path(node_id): web::Path, ) -> impl Responder { RestRespond::result(MessageBus::get_volumes(Filter::Node(node_id)).await) } #[get("/v0/nodes/{node_id}/volumes/{volume_id}")] async fn get_node_volume( - web::Path((node_id, volume_id)): web::Path<(String, String)>, + web::Path((node_id, volume_id)): web::Path<(NodeId, VolumeId)>, ) -> impl Responder { RestRespond::result( MessageBus::get_volume(Filter::NodeVolume(node_id, volume_id)).await, @@ -42,7 +44,7 @@ async fn get_node_volume( #[put("/v0/volumes/{volume_id}")] async fn put_volume( - web::Path(volume_id): web::Path, + web::Path(volume_id): web::Path, create: web::Json, ) -> impl Responder { let create = create.into_inner().bus_request(volume_id); @@ -50,9 +52,11 @@ async fn put_volume( } #[delete("/v0/volumes/{volume_id}")] -async fn del_nexus(web::Path(volume_id): web::Path) -> impl Responder { +async fn del_volume( + web::Path(volume_id): web::Path, +) -> impl Responder { let request = DestroyVolume { - uuid: volume_id.to_string(), + uuid: volume_id, }; RestRespond::result(MessageBus::delete_volume(request).await) } diff --git a/rest/src/versions/v0.rs b/rest/src/versions/v0.rs index d160f1fd6..a65a80b04 100644 --- a/rest/src/versions/v0.rs +++ b/rest/src/versions/v0.rs @@ -18,11 +18,11 @@ pub type Node = v0::Node; pub type Nodes = v0::Nodes; /// Pool from the node service pub type Pool = v0::Pool; -/// Vector of Pools from the node service +/// Vector of Pools from the pool service pub type Pools = v0::Pools; /// Replica pub type Replica = v0::Replica; -/// Vector of Replicas from the node service +/// Vector of Replicas from the pool service pub type Replicas = v0::Replicas; /// Replica protocol pub type Protocol = v0::Protocol; @@ -65,12 +65,12 @@ impl CreatePoolBody { /// convert into message bus type pub fn bus_request( &self, - node_id: String, - pool_id: String, + node_id: NodeId, + pool_id: PoolId, ) -> v0::CreatePool { v0::CreatePool { node: node_id, - name: pool_id, + id: pool_id, disks: self.disks.clone(), } } @@ -88,9 +88,9 @@ impl CreateReplicaBody { /// convert into message bus type pub fn bus_request( &self, - node_id: String, - pool_id: String, - uuid: String, + node_id: NodeId, + pool_id: PoolId, + uuid: ReplicaId, ) -> v0::CreateReplica { v0::CreateReplica { node: node_id, @@ -110,6 +110,8 @@ pub type Nexus = v0::Nexus; pub type Nexuses = v0::Nexuses; /// State of the nexus pub type NexusState = v0::NexusState; +/// State of the nexus +pub type VolumeState = v0::VolumeState; /// Child of the nexus pub type Child = v0::Child; /// State of the child @@ -132,7 +134,7 @@ pub struct CreateNexusBody { /// (i.e. bdev:///name-of-the-bdev). /// /// uris to the targets we connect to - pub children: Vec, + pub children: Vec, } impl From for CreateNexusBody { fn from(create: CreateNexus) -> Self { @@ -146,8 +148,8 @@ impl CreateNexusBody { /// convert into message bus type pub fn bus_request( &self, - node_id: String, - nexus_id: String, + node_id: NodeId, + nexus_id: NexusId, ) -> v0::CreateNexus { v0::CreateNexus { node: node_id, @@ -165,10 +167,22 @@ pub type AddNexusChild = v0::AddNexusChild; pub type Volume = v0::Volume; /// Volumes pub type Volumes = v0::Volumes; -/// Add Volume +/// Create Volume pub type CreateVolume = v0::CreateVolume; /// Destroy Volume pub type DestroyVolume = v0::DestroyVolume; +/// Id of a mayastor node +pub type NodeId = v0::NodeId; +/// Id of a mayastor pool +pub type PoolId = v0::PoolId; +/// UUID of a mayastor pool replica +pub type ReplicaId = v0::ReplicaId; +/// UUID of a mayastor nexus +pub type NexusId = v0::NexusId; +/// URI of a mayastor nexus child +pub type ChildUri = v0::ChildUri; +/// UUID of a mayastor volume +pub type VolumeId = v0::VolumeId; /// Create Volume Body JSON #[derive(Serialize, Deserialize, Default, Debug, Clone)] @@ -181,13 +195,13 @@ pub struct CreateVolumeBody { pub replicas: u64, /// only these nodes can be used for the replicas #[serde(default)] - pub allowed_nodes: Vec, + pub allowed_nodes: Vec, /// preferred nodes for the replicas #[serde(default)] - pub preferred_nodes: Vec, + pub preferred_nodes: Vec, /// preferred nodes for the nexuses #[serde(default)] - pub preferred_nexus_nodes: Vec, + pub preferred_nexus_nodes: Vec, } impl From for CreateVolumeBody { fn from(create: CreateVolume) -> Self { @@ -203,7 +217,7 @@ impl From for CreateVolumeBody { } impl CreateVolumeBody { /// convert into message bus type - pub fn bus_request(&self, volume_id: String) -> CreateVolume { + pub fn bus_request(&self, volume_id: VolumeId) -> CreateVolume { CreateVolume { uuid: volume_id, size: self.size, @@ -246,7 +260,7 @@ pub trait RestClient { /// Unshare replica with arguments async fn unshare_replica(&self, args: UnshareReplica) -> anyhow::Result<()>; - /// Get all the known pools + /// Get all the known nexuses async fn get_nexuses(&self, filter: Filter) -> anyhow::Result>; /// Create new nexus with arguments async fn create_nexus(&self, args: CreateNexus) -> anyhow::Result; @@ -384,13 +398,13 @@ impl RestClient for ActixRestClient { } async fn create_pool(&self, args: CreatePool) -> anyhow::Result { - let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.id); let pool = self.put(urn, CreatePoolBody::from(args)).await?; Ok(pool) } async fn destroy_pool(&self, args: DestroyPool) -> anyhow::Result<()> { - let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.name); + let urn = format!("/v0/nodes/{}/pools/{}", &args.node, &args.id); self.del(urn).await?; Ok(()) } @@ -504,12 +518,12 @@ impl RestClient for ActixRestClient { &self, args: RemoveNexusChild, ) -> anyhow::Result<()> { - let urn = match url::Url::parse(&args.uri) { + let urn = match url::Url::parse(args.uri.as_str()) { Ok(uri) => { // remove initial '/' uri.path()[1 ..].to_string() } - _ => args.uri.clone(), + _ => args.uri.to_string(), }; self.del(urn).await?; Ok(()) diff --git a/rest/tests/v0_test.rs b/rest/tests/v0_test.rs index 432a7a8cb..8570d97fb 100644 --- a/rest/tests/v0_test.rs +++ b/rest/tests/v0_test.rs @@ -75,18 +75,21 @@ async fn client() { .with_portmap("6831/udp", "6831/udp") .with_portmap("6832/udp", "6832/udp"), ) + // uncomment to run alpine commands within the containers //.with_base_image("alpine:latest".to_string()) .with_default_tracing() .autorun(false) + // uncomment to leave containers running allowing us access the jaeger + // traces at localhost:16686 //.with_clean(false) .build() .await .unwrap(); - client_test(mayastor, &test).await; + client_test(&mayastor.into(), &test).await; } -async fn client_test(mayastor: &str, test: &ComposeTest) { +async fn client_test(mayastor: &NodeId, test: &ComposeTest) { orderly_start(&test).await; let client = ActixRestClient::new("https://localhost:8080", true) @@ -97,7 +100,7 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { assert_eq!( nodes.first().unwrap(), &Node { - id: mayastor.to_string(), + id: mayastor.clone(), grpc_endpoint: "10.1.0.7:10124".to_string(), state: NodeState::Online, } @@ -105,16 +108,16 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { info!("Nodes: {:#?}", nodes); let _ = client.get_pools(Filter::None).await.unwrap(); let pool = client.create_pool(CreatePool { - node: mayastor.to_string(), - name: "pooloop".to_string(), + node: mayastor.clone(), + id: "pooloop".into(), disks: vec!["malloc:///malloc0?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()] }).await.unwrap(); info!("Pools: {:#?}", pool); assert_eq!( pool, Pool { - node: "node-test-name".to_string(), - name: "pooloop".to_string(), + node: "node-test-name".into(), + id: "pooloop".into(), disks: vec!["malloc:///malloc0?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()], state: PoolState::Online, capacity: 100663296, @@ -129,8 +132,8 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { let replica = client .create_replica(CreateReplica { node: pool.node.clone(), - pool: pool.name.clone(), - uuid: "replica1".to_string(), + pool: pool.id.clone(), + uuid: "replica1".into(), size: 12582912, /* actual size will be a multiple of 4MB so just * create it like so */ thin: true, @@ -143,8 +146,8 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { replica, Replica { node: pool.node.clone(), - uuid: "replica1".to_string(), - pool: pool.name.clone(), + uuid: "replica1".into(), + pool: pool.id.clone(), thin: false, size: 12582912, share: Protocol::Nvmf, @@ -170,22 +173,22 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { assert_eq!(nexuses.len(), 0); let nexus = client .create_nexus(CreateNexus { - node: "node-test-name".to_string(), - uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + node: "node-test-name".into(), + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".into(), size: 12582912, - children: vec!["malloc:///malloc1?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string()]}) + children: vec!["malloc:///malloc1?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".into()]}) .await.unwrap(); info!("Nexus: {:#?}", nexus); assert_eq!( nexus, Nexus { - node: "node-test-name".to_string(), - uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + node: "node-test-name".into(), + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".into(), size: 12582912, state: NexusState::Online, children: vec![Child { - uri: "malloc:///malloc1?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".to_string(), + uri: "malloc:///malloc1?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b0".into(), state: ChildState::Online, rebuild_progress: None }], @@ -197,7 +200,7 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { let child = client.add_nexus_child(AddNexusChild { node: nexus.node.clone(), nexus: nexus.uuid.clone(), - uri: "malloc:///malloc2?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b1".to_string(), + uri: "malloc:///malloc2?blk_size=512&size_mb=100&uuid=b940f4f2-d45d-4404-8167-3b0366f9e2b1".into(), auto_rebuild: true, }).await.unwrap(); @@ -221,7 +224,7 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { let volume = client .create_volume(CreateVolume { - uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".into(), size: 12582912, nexuses: 1, replicas: 1, @@ -236,9 +239,9 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { assert_eq!( Some(&volume), client - .get_volumes(Filter::Volume( - "058a95e5-cee6-4e81-b682-fe864ca99b9c".into() - )) + .get_volumes(Filter::Volume(VolumeId::from( + "058a95e5-cee6-4e81-b682-fe864ca99b9c" + ))) .await .unwrap() .first() @@ -246,7 +249,7 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { client .destroy_volume(DestroyVolume { - uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".to_string(), + uuid: "058a95e5-cee6-4e81-b682-fe864ca99b9c".into(), }) .await .unwrap(); @@ -256,7 +259,7 @@ async fn client_test(mayastor: &str, test: &ComposeTest) { client .destroy_pool(DestroyPool { node: pool.node.clone(), - name: pool.name, + id: pool.id, }) .await .unwrap(); diff --git a/services/common/src/lib.rs b/services/common/src/lib.rs index b3fed96f7..2fc784d9a 100644 --- a/services/common/src/lib.rs +++ b/services/common/src/lib.rs @@ -101,16 +101,19 @@ impl<'a> Context<'a> { self.bus } /// get the shared state of type `T` from the context - pub fn get_state(&self) -> &T { + pub fn get_state(&self) -> Result<&T, Error> { match self.state.try_get() { - Some(state) => state, + Some(state) => Ok(state), None => { let type_name = std::any::type_name::(); - let error = format!( + let error_msg = format!( "Requested data type '{}' not shared via with_shared_data", type_name ); - panic!(error); + error!("{}", error_msg); + Err(Error::ServiceError { + message: error_msg, + }) } } } @@ -176,8 +179,8 @@ impl Service { /// .run().await; /// /// # async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - /// let store: &NodeStore = args.context.get_state(); - /// let more: &More = args.context.get_state(); + /// let store: &NodeStore = args.context.get_state()?; + /// let more: &More = args.context.get_state()?; /// # Ok(()) /// # } pub fn with_shared_state(self, state: T) -> Self { diff --git a/services/common/src/wrapper/v0/mod.rs b/services/common/src/wrapper/v0/mod.rs index c929ad668..e904f9ec0 100644 --- a/services/common/src/wrapper/v0/mod.rs +++ b/services/common/src/wrapper/v0/mod.rs @@ -39,12 +39,12 @@ pub enum SvcError { BusCreatePool { source: mbus_api::Error }, #[snafu(display("Failed to destroy pool from the pool service"))] BusDestroyPool { source: mbus_api::Error }, - #[snafu(display("Failed to destroy pool from the pool service"))] + #[snafu(display("Failed to fetch replicas from the pool service"))] BusGetReplicas { source: mbus_api::Error }, #[snafu(display("Failed to get node '{}' from the node service", node))] - BusGetNode { source: BusError, node: String }, + BusGetNode { source: BusError, node: NodeId }, #[snafu(display("Node '{}' is not online", node))] - NodeNotOnline { node: String }, + NodeNotOnline { node: NodeId }, #[snafu(display("Failed to connect to node via gRPC"))] GrpcConnect { source: tonic::transport::Error }, #[snafu(display("Failed to list pools via gRPC"))] @@ -64,7 +64,7 @@ pub enum SvcError { #[snafu(display("Failed to unshare replica via gRPC"))] GrpcUnshareReplica { source: tonic::Status }, #[snafu(display("Node not found"))] - BusNodeNotFound { node_id: String }, + BusNodeNotFound { node_id: NodeId }, #[snafu(display("Pool not found"))] BusPoolNotFound { pool_id: String }, #[snafu(display("Invalid filter for pools"))] @@ -79,7 +79,7 @@ pub enum SvcError { GrpcShareNexus { source: tonic::Status }, #[snafu(display("Failed to unshare nexus via gRPC"))] GrpcUnshareNexus { source: tonic::Status }, - #[snafu(display("Failed to volume due to insufficient resources"))] + #[snafu(display("Operation failed due to insufficient resources"))] NotEnoughResources { source: NotEnough }, #[snafu(display("Invalid arguments"))] InvalidArguments {}, diff --git a/services/common/src/wrapper/v0/node_traits.rs b/services/common/src/wrapper/v0/node_traits.rs index 6925fba14..95684b7b7 100644 --- a/services/common/src/wrapper/v0/node_traits.rs +++ b/services/common/src/wrapper/v0/node_traits.rs @@ -56,12 +56,12 @@ pub trait NodeReplicaTrait: Send + Sync + Debug + Clone { /// Update internal replica list following a create fn on_create_replica(&mut self, replica: &Replica); /// Update internal replica list following a destroy - fn on_destroy_replica(&mut self, pool: &str, replica: &str); + fn on_destroy_replica(&mut self, pool: &PoolId, replica: &ReplicaId); /// Update internal replica list following an update fn on_update_replica( &mut self, - pool: &str, - replica: &str, + pool: &PoolId, + replica: &ReplicaId, share: &Protocol, uri: &str, ); @@ -86,7 +86,7 @@ pub trait NodePoolTrait: Send + Sync + Debug + Clone { /// Update internal pool list following a create async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]); /// Update internal pool list following a destroy - fn on_destroy_pool(&mut self, pool: &str); + fn on_destroy_pool(&mut self, pool: &PoolId); } /// Trait for a Node Nexus which can be implemented to interact with mayastor @@ -140,9 +140,9 @@ pub trait NodeNexusTrait: Send + Sync + Debug + Clone { /// Update internal nexus list following a create fn on_create_nexus(&mut self, nexus: &Nexus) {} /// Update internal nexus following a share/unshare - fn on_update_nexus(&mut self, nexus: &str, uri: &str) {} + fn on_update_nexus(&mut self, nexus: &NexusId, uri: &str) {} /// Update internal nexus list following a destroy - fn on_destroy_nexus(&mut self, nexus: &str) {} + fn on_destroy_nexus(&mut self, nexus: &NexusId) {} } /// Trait for a Node Nexus Children which can be implemented to interact with @@ -174,7 +174,7 @@ pub trait NodeNexusChildTrait: Send + Sync + Debug + Clone { } /// Update internal nexus children following a create - fn on_add_child(&mut self, nexus: &str, child: &Child) {} + fn on_add_child(&mut self, nexus: &NexusId, child: &Child) {} /// Update internal nexus children following a remove fn on_remove_child(&mut self, request: &RemoveNexusChild) {} } @@ -195,7 +195,7 @@ pub trait NodeWrapperTrait: { /// New NodeWrapper for the node #[allow(clippy::new_ret_no_self)] - async fn new(node: &str) -> Result + async fn new(node: &NodeId) -> Result where Self: Sized; /// Fetch all nodes via the message bus @@ -207,7 +207,7 @@ pub trait NodeWrapperTrait: } /// Get the internal id - fn id(&self) -> String; + fn id(&self) -> NodeId; /// Get the internal node fn node(&self) -> Node; /// Get the internal pools @@ -268,11 +268,11 @@ impl PoolWrapper { self.pool.clone() } /// Get the pool uuid - pub fn uuid(&self) -> String { - self.pool.name.clone() + pub fn uuid(&self) -> PoolId { + self.pool.id.clone() } /// Get the pool node name - pub fn node(&self) -> String { + pub fn node(&self) -> NodeId { self.pool.node.clone() } /// Get the pool state @@ -282,10 +282,16 @@ impl PoolWrapper { /// Get the free space pub fn free_space(&self) -> u64 { - if self.pool.capacity > self.pool.used { + if self.pool.capacity >= self.pool.used { self.pool.capacity - self.pool.used } else { // odd, let's report no free space available + tracing::error!( + "Pool '{}' has a capacity of '{} B' but is using '{} B'", + self.pool.id, + self.pool.capacity, + self.pool.used + ); 0 } } @@ -305,15 +311,20 @@ impl PoolWrapper { self.replicas.push(replica.clone()) } /// Remove replica from list - pub fn removed_replica(&mut self, uuid: &str) { - self.replicas.retain(|replica| replica.uuid != uuid) + pub fn removed_replica(&mut self, uuid: &ReplicaId) { + self.replicas.retain(|replica| &replica.uuid != uuid) } /// update replica from list - pub fn updated_replica(&mut self, uuid: &str, share: &Protocol, uri: &str) { + pub fn updated_replica( + &mut self, + uuid: &ReplicaId, + share: &Protocol, + uri: &str, + ) { if let Some(replica) = self .replicas .iter_mut() - .find(|replica| replica.uuid == uuid) + .find(|replica| &replica.uuid == uuid) { replica.share = share.clone(); replica.uri = uri.to_string(); diff --git a/services/common/src/wrapper/v0/pool.rs b/services/common/src/wrapper/v0/pool.rs index a330ba461..fddbeab1d 100644 --- a/services/common/src/wrapper/v0/pool.rs +++ b/services/common/src/wrapper/v0/pool.rs @@ -4,7 +4,7 @@ use super::{node_traits::*, *}; #[derive(Debug, Default, Clone)] pub struct NodeWrapperPool { node: Node, - pools: HashMap, + pools: HashMap, } #[async_trait] @@ -57,10 +57,10 @@ impl NodePoolTrait for NodeWrapperPool { async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]) { self.pools - .insert(pool.name.clone(), PoolWrapper::new_from(&pool, replicas)); + .insert(pool.id.clone(), PoolWrapper::new_from(&pool, replicas)); } - fn on_destroy_pool(&mut self, pool: &str) { + fn on_destroy_pool(&mut self, pool: &PoolId) { self.pools.remove(pool); } } @@ -149,7 +149,7 @@ impl NodeReplicaTrait for NodeWrapperPool { } } - fn on_destroy_replica(&mut self, pool: &str, replica: &str) { + fn on_destroy_replica(&mut self, pool: &PoolId, replica: &ReplicaId) { if let Some(pool) = self.pools.get_mut(pool) { pool.removed_replica(replica) } @@ -157,8 +157,8 @@ impl NodeReplicaTrait for NodeWrapperPool { fn on_update_replica( &mut self, - pool: &str, - replica: &str, + pool: &PoolId, + replica: &ReplicaId, share: &Protocol, uri: &str, ) { @@ -170,11 +170,11 @@ impl NodeReplicaTrait for NodeWrapperPool { #[async_trait] impl NodeWrapperTrait for NodeWrapperPool { - async fn new(node: &str) -> Result { + async fn new(node: &NodeId) -> Result { Ok(Box::new(Self::new_wrapper(node).await?)) } - fn id(&self) -> String { + fn id(&self) -> NodeId { self.node.id.clone() } fn node(&self) -> Node { @@ -238,7 +238,7 @@ impl NodeWrapperTrait for NodeWrapperPool { impl NodeWrapperPool { /// Fetch node via the message bus - async fn fetch_node(node: &str) -> Result { + async fn fetch_node(node: &NodeId) -> Result { MessageBus::get_node(node).await.context(BusGetNode { node, }) @@ -246,7 +246,7 @@ impl NodeWrapperPool { /// New node wrapper for the pool service containing /// a list of pools and replicas - async fn new_wrapper(node: &str) -> Result { + async fn new_wrapper(node: &NodeId) -> Result { let mut node = Self { // if we can't even fetch the node, then no point in proceeding node: NodeWrapperPool::fetch_node(node).await?, @@ -261,7 +261,7 @@ impl NodeWrapperPool { for pool in &pools { let replicas = replicas .iter() - .filter(|r| r.pool == pool.name) + .filter(|r| r.pool == pool.id) .cloned() .collect::>(); node.on_create_pool(pool, &replicas).await; @@ -280,10 +280,11 @@ impl_no_nexus!(NodeWrapperPool); /// mayastor gRPC types /// convert rpc pool to a message bus pool -fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: String) -> Pool { +fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: NodeId) -> Pool { + let rpc_pool = rpc_pool.clone(); Pool { node: id, - name: rpc_pool.name.clone(), + id: rpc_pool.name.into(), disks: rpc_pool.disks.clone(), state: rpc_pool.state.into(), capacity: rpc_pool.capacity, @@ -294,16 +295,17 @@ fn rpc_pool_to_bus(rpc_pool: &rpc::mayastor::Pool, id: String) -> Pool { /// convert rpc replica to a message bus replica fn rpc_replica_to_bus( rpc_replica: &rpc::mayastor::Replica, - id: String, + id: NodeId, ) -> Replica { + let rpc_replica = rpc_replica.clone(); Replica { node: id, - uuid: rpc_replica.uuid.clone(), - pool: rpc_replica.pool.clone(), + uuid: rpc_replica.uuid.into(), + pool: rpc_replica.pool.into(), thin: rpc_replica.thin, size: rpc_replica.size, share: rpc_replica.share.into(), - uri: rpc_replica.uri.clone(), + uri: rpc_replica.uri, } } @@ -311,12 +313,13 @@ fn rpc_replica_to_bus( fn bus_replica_to_rpc( request: &CreateReplica, ) -> rpc::mayastor::CreateReplicaRequest { + let request = request.clone(); rpc::mayastor::CreateReplicaRequest { - uuid: request.uuid.clone(), - pool: request.pool.clone(), + uuid: request.uuid.into(), + pool: request.pool.into(), thin: request.thin, size: request.size, - share: request.share.clone() as i32, + share: request.share as i32, } } @@ -324,9 +327,10 @@ fn bus_replica_to_rpc( fn bus_replica_share_to_rpc( request: &ShareReplica, ) -> rpc::mayastor::ShareReplicaRequest { + let request = request.clone(); rpc::mayastor::ShareReplicaRequest { - uuid: request.uuid.clone(), - share: request.protocol.clone() as i32, + uuid: request.uuid.into(), + share: request.protocol as i32, } } @@ -334,17 +338,19 @@ fn bus_replica_share_to_rpc( fn bus_replica_unshare_to_rpc( request: &UnshareReplica, ) -> rpc::mayastor::ShareReplicaRequest { + let request = request.clone(); rpc::mayastor::ShareReplicaRequest { - uuid: request.uuid.clone(), + uuid: request.uuid.into(), share: Protocol::Off as i32, } } -/// convert a message bus replica share to an rpc replica share +/// convert a message bus pool to an rpc pool fn bus_pool_to_rpc(request: &CreatePool) -> rpc::mayastor::CreatePoolRequest { + let request = request.clone(); rpc::mayastor::CreatePoolRequest { - name: request.name.clone(), - disks: request.disks.clone(), + name: request.id.into(), + disks: request.disks, } } @@ -353,7 +359,7 @@ fn bus_replica_destroy_to_rpc( request: &DestroyReplica, ) -> rpc::mayastor::DestroyReplicaRequest { rpc::mayastor::DestroyReplicaRequest { - uuid: request.uuid.clone(), + uuid: request.uuid.clone().into(), } } @@ -362,6 +368,6 @@ fn bus_pool_destroy_to_rpc( request: &DestroyPool, ) -> rpc::mayastor::DestroyPoolRequest { rpc::mayastor::DestroyPoolRequest { - name: request.name.clone(), + name: request.id.clone().into(), } } diff --git a/services/common/src/wrapper/v0/registry.rs b/services/common/src/wrapper/v0/registry.rs index ecdde6471..2ebda3e43 100644 --- a/services/common/src/wrapper/v0/registry.rs +++ b/services/common/src/wrapper/v0/registry.rs @@ -30,7 +30,7 @@ impl Default for NotFoundPolicy { /// mayastor nodes/other services. #[derive(Clone, Default, Debug)] pub struct Registry { - nodes: Arc>>, + nodes: Arc>>, update_period: std::time::Duration, not_found: NotFoundPolicy, _t: PhantomData, @@ -64,7 +64,7 @@ impl Registry { nodes.iter().map(|n| n.node()).collect() } - /// List all cached pools + /// List all cached pool wrappers pub async fn list_pools_wrapper(&self) -> Vec { let nodes = self.nodes.lock().await; nodes @@ -93,16 +93,16 @@ impl Registry { self.list_pools_wrapper().await } - /// List all cached pool wrappers + /// List all cached pools pub async fn list_pools(&self) -> Vec { let nodes = self.nodes.lock().await; nodes.values().map(|node| node.pools()).flatten().collect() } /// List all cached pools from node - pub async fn list_node_pools(&self, node: &str) -> Vec { + pub async fn list_node_pools(&self, node: &NodeId) -> Vec { let nodes = self.list_nodes_wrapper().await; - if let Some(node) = nodes.iter().find(|&n| n.id() == node) { + if let Some(node) = nodes.iter().find(|&n| &n.id() == node) { node.pools() } else { // or return error, node not found? @@ -121,9 +121,9 @@ impl Registry { } /// List all cached replicas from node - pub async fn list_node_replicas(&self, node: &str) -> Vec { + pub async fn list_node_replicas(&self, node: &NodeId) -> Vec { let nodes = self.list_nodes_wrapper().await; - if let Some(node) = nodes.iter().find(|&n| n.id() == node) { + if let Some(node) = nodes.iter().find(|&n| &n.id() == node) { node.replicas() } else { // or return error, node not found? @@ -146,12 +146,15 @@ impl Registry { } /// Get current list of known nodes - async fn get_known_nodes(&self, node_id: &str) -> Option { + async fn get_known_nodes(&self, node_id: &NodeId) -> Option { let nodes = self.nodes.lock().await; nodes.get(node_id).cloned() } /// Get node `node_id` - async fn get_node(&self, node_id: &str) -> Result { + async fn get_node( + &self, + node_id: &NodeId, + ) -> Result { let mut nodes = self.nodes.lock().await; let node = match nodes.get(node_id) { Some(node) => node.clone(), @@ -163,12 +166,12 @@ impl Registry { node } else { return Err(SvcError::BusNodeNotFound { - node_id: node_id.to_string(), + node_id: node_id.into(), }); } } else { return Err(SvcError::BusNodeNotFound { - node_id: node_id.to_string(), + node_id: node_id.into(), }); } } @@ -194,7 +197,7 @@ impl Registry { let mut nodes = self.nodes.lock().await; let node = nodes.get_mut(&request.node); if let Some(node) = node { - node.on_destroy_pool(&request.name) + node.on_destroy_pool(&request.id) } } async fn on_replica_added(&self, replica: &Replica) { @@ -213,9 +216,9 @@ impl Registry { } async fn reg_update_replica( &self, - node: &str, - pool: &str, - id: &str, + node: &NodeId, + pool: &PoolId, + id: &ReplicaId, share: &Protocol, uri: &str, ) { @@ -259,7 +262,7 @@ impl Registry { Ok(()) } - /// Create replica and update registry + /// Share replica and update registry pub async fn share_replica( &self, request: &ShareReplica, @@ -277,7 +280,7 @@ impl Registry { Ok(share) } - /// Create replica and update registry + /// Unshare replica and update registry pub async fn unshare_replica( &self, request: &UnshareReplica, @@ -309,7 +312,12 @@ impl Registry { node.on_destroy_nexus(&request.uuid); } } - async fn on_add_nexus_child(&self, node: &str, nexus: &str, child: &Child) { + async fn on_add_nexus_child( + &self, + node: &NodeId, + nexus: &NexusId, + child: &Child, + ) { let mut nodes = self.nodes.lock().await; let node = nodes.get_mut(node); if let Some(node) = node { @@ -323,7 +331,7 @@ impl Registry { node.on_remove_child(request); } } - async fn on_update_nexus(&self, node: &str, nexus: &str, uri: &str) { + async fn on_update_nexus(&self, node: &NodeId, nexus: &NexusId, uri: &str) { let mut nodes = self.nodes.lock().await; let node = nodes.get_mut(node); if let Some(node) = node { @@ -342,9 +350,9 @@ impl Registry { } /// List all cached nexuses from node - pub async fn list_node_nexuses(&self, node: &str) -> Vec { + pub async fn list_node_nexuses(&self, node: &NodeId) -> Vec { let nodes = self.list_nodes_wrapper().await; - if let Some(node) = nodes.iter().find(|&n| n.id() == node) { + if let Some(node) = nodes.iter().find(|&n| &n.id() == node) { node.nexuses() } else { // hmm, or return error, node not found? @@ -374,7 +382,7 @@ impl Registry { Ok(()) } - /// Create nexus + /// Share nexus pub async fn share_nexus( &self, request: &ShareNexus, @@ -386,7 +394,7 @@ impl Registry { Ok(share) } - /// Destroy nexus + /// Unshare nexus pub async fn unshare_nexus( &self, request: &UnshareNexus, diff --git a/services/common/src/wrapper/v0/volume.rs b/services/common/src/wrapper/v0/volume.rs index eecfa2776..96a8f2822 100644 --- a/services/common/src/wrapper/v0/volume.rs +++ b/services/common/src/wrapper/v0/volume.rs @@ -5,8 +5,8 @@ use mbus_api::Message; #[derive(Debug, Default, Clone)] pub struct NodeWrapperVolume { node: Node, - pools: HashMap, - nexuses: HashMap, + pools: HashMap, + nexuses: HashMap, } #[async_trait] @@ -36,10 +36,10 @@ impl NodePoolTrait for NodeWrapperVolume { async fn on_create_pool(&mut self, pool: &Pool, replicas: &[Replica]) { self.pools - .insert(pool.name.clone(), PoolWrapper::new_from(&pool, replicas)); + .insert(pool.id.clone(), PoolWrapper::new_from(&pool, replicas)); } - fn on_destroy_pool(&mut self, pool: &str) { + fn on_destroy_pool(&mut self, pool: &PoolId) { self.pools.remove(pool); } } @@ -95,7 +95,7 @@ impl NodeReplicaTrait for NodeWrapperVolume { } } - fn on_destroy_replica(&mut self, pool: &str, replica: &str) { + fn on_destroy_replica(&mut self, pool: &PoolId, replica: &ReplicaId) { if let Some(pool) = self.pools.get_mut(pool) { pool.removed_replica(replica) } @@ -103,8 +103,8 @@ impl NodeReplicaTrait for NodeWrapperVolume { fn on_update_replica( &mut self, - pool: &str, - replica: &str, + pool: &PoolId, + replica: &ReplicaId, share: &Protocol, uri: &str, ) { @@ -199,13 +199,13 @@ impl NodeNexusTrait for NodeWrapperVolume { self.nexuses.insert(nexus.uuid.clone(), nexus.clone()); } - fn on_update_nexus(&mut self, nexus: &str, uri: &str) { + fn on_update_nexus(&mut self, nexus: &NexusId, uri: &str) { if let Some(nexus) = self.nexuses.get_mut(nexus) { nexus.device_uri = uri.to_string(); } } - fn on_destroy_nexus(&mut self, nexus: &str) { + fn on_destroy_nexus(&mut self, nexus: &NexusId) { self.nexuses.remove(nexus); } } @@ -244,7 +244,7 @@ impl NodeNexusChildTrait for NodeWrapperVolume { Ok(()) } - fn on_add_child(&mut self, nexus: &str, child: &Child) { + fn on_add_child(&mut self, nexus: &NexusId, child: &Child) { if let Some(nexus) = self.nexuses.get_mut(nexus) { nexus.children.push(child.clone()); } @@ -259,11 +259,11 @@ impl NodeNexusChildTrait for NodeWrapperVolume { #[async_trait] impl NodeWrapperTrait for NodeWrapperVolume { - async fn new(node: &str) -> Result { + async fn new(node: &NodeId) -> Result { Ok(Box::new(Self::new_wrapper(node).await?)) } - fn id(&self) -> String { + fn id(&self) -> NodeId { self.node.id.clone() } fn node(&self) -> Node { @@ -327,7 +327,7 @@ impl NodeWrapperTrait for NodeWrapperVolume { impl NodeWrapperVolume { /// Fetch node via the message bus - async fn fetch_node(node: &str) -> Result { + async fn fetch_node(node: &NodeId) -> Result { MessageBus::get_node(node).await.context(BusGetNode { node, }) @@ -335,7 +335,7 @@ impl NodeWrapperVolume { /// New node wrapper for the pool service containing /// a list of pools and replicas - async fn new_wrapper(node: &str) -> Result { + async fn new_wrapper(node: &NodeId) -> Result { let mut node = Self { // if we can't even fetch the node, then no point in proceeding node: NodeWrapperVolume::fetch_node(node).await?, @@ -351,7 +351,7 @@ impl NodeWrapperVolume { for pool in &pools { let replicas = replicas .iter() - .filter(|r| r.pool == pool.name) + .filter(|r| r.pool == pool.id) .cloned() .collect::>(); node.on_create_pool(pool, &replicas).await; @@ -367,10 +367,11 @@ impl NodeWrapperVolume { } } -fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: String) -> Nexus { +fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: NodeId) -> Nexus { + let rpc_nexus = rpc_nexus.clone(); Nexus { node: id, - uuid: rpc_nexus.uuid.clone(), + uuid: rpc_nexus.uuid.into(), size: rpc_nexus.size, state: NexusState::from(rpc_nexus.state), children: rpc_nexus @@ -383,8 +384,9 @@ fn rpc_nexus_to_bus(rpc_nexus: &rpc::mayastor::Nexus, id: String) -> Nexus { } } fn rpc_child_to_bus(rpc_child: &rpc::mayastor::Child) -> Child { + let rpc_child = rpc_child.clone(); Child { - uri: rpc_child.uri.clone(), + uri: rpc_child.uri.into(), state: ChildState::from(rpc_child.state), rebuild_progress: if rpc_child.rebuild_progress >= 0 { Some(rpc_child.rebuild_progress) @@ -396,49 +398,53 @@ fn rpc_child_to_bus(rpc_child: &rpc::mayastor::Child) -> Child { fn bus_nexus_to_rpc( request: &CreateNexus, ) -> rpc::mayastor::CreateNexusRequest { + let request = request.clone(); rpc::mayastor::CreateNexusRequest { - uuid: request.uuid.clone(), + uuid: request.uuid.into(), size: request.size, - children: request.children.clone(), + children: request.children.iter().map(|c| c.to_string()).collect(), } } fn bus_nexus_share_to_rpc( request: &ShareNexus, ) -> rpc::mayastor::PublishNexusRequest { + let request = request.clone(); rpc::mayastor::PublishNexusRequest { - uuid: request.uuid.clone(), + uuid: request.uuid.into(), key: request.key.clone().unwrap_or_default(), - share: request.protocol.clone() as i32, + share: request.protocol as i32, } } fn bus_nexus_unshare_to_rpc( request: &UnshareNexus, ) -> rpc::mayastor::UnpublishNexusRequest { rpc::mayastor::UnpublishNexusRequest { - uuid: request.uuid.clone(), + uuid: request.uuid.clone().into(), } } fn bus_nexus_destroy_to_rpc( request: &DestroyNexus, ) -> rpc::mayastor::DestroyNexusRequest { rpc::mayastor::DestroyNexusRequest { - uuid: request.uuid.clone(), + uuid: request.uuid.clone().into(), } } fn bus_nexus_child_add_to_rpc( request: &AddNexusChild, ) -> rpc::mayastor::AddChildNexusRequest { + let request = request.clone(); rpc::mayastor::AddChildNexusRequest { - uuid: request.nexus.clone(), - uri: request.uri.clone(), + uuid: request.nexus.into(), + uri: request.uri.into(), norebuild: !request.auto_rebuild, } } fn bus_nexus_child_remove_to_rpc( request: &RemoveNexusChild, ) -> rpc::mayastor::RemoveChildNexusRequest { + let request = request.clone(); rpc::mayastor::RemoveChildNexusRequest { - uuid: request.nexus.clone(), - uri: request.uri.clone(), + uuid: request.nexus.into(), + uri: request.uri.into(), } } diff --git a/services/node/src/server.rs b/services/node/src/server.rs index 2a93379a9..18fc5b135 100644 --- a/services/node/src/server.rs +++ b/services/node/src/server.rs @@ -76,7 +76,7 @@ struct NodeStore { inner: std::sync::Arc, } struct NodeStoreInner { - state: Mutex>, + state: Mutex>, deadline: std::time::Duration, } impl Default for NodeStoreInner { @@ -119,7 +119,7 @@ impl NodeStore { state.remove(&node.id); } /// Offline node through its id - async fn offline(&self, id: String) { + async fn offline(&self, id: NodeId) { let mut state = self.inner.state.lock().await; if let Some(n) = state.get_mut(&id) { n.0.state = NodeState::Offline; @@ -141,7 +141,7 @@ impl NodeStore { #[async_trait] impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let store: &NodeStore = args.context.get_state(); + let store: &NodeStore = args.context.get_state()?; store.register(args.request.inner()?).await; Ok(()) } @@ -153,7 +153,7 @@ impl ServiceSubscriber for ServiceHandler { #[async_trait] impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { - let store: &NodeStore = args.context.get_state(); + let store: &NodeStore = args.context.get_state()?; store.deregister(args.request.inner()?).await; Ok(()) } @@ -167,7 +167,7 @@ impl ServiceSubscriber for ServiceHandler { async fn handler(&self, args: Arguments<'_>) -> Result<(), Error> { let request: ReceivedMessage = args.request.try_into()?; - let store: &NodeStore = args.context.get_state(); + let store: &NodeStore = args.context.get_state()?; let nodes = store.get_nodes().await; request.reply(Nodes(nodes)).await } @@ -249,9 +249,9 @@ mod tests { } #[tokio::test] - async fn node() -> Result<(), Box> { + async fn node() { init_tracing(); - let maya_name = "node-test-name"; + let maya_name = NodeId::from("node-test-name"); let test = Builder::new() .name("node") .add_container_bin( @@ -268,41 +268,37 @@ mod tests { "mayastor", Binary::from_dbg("mayastor") .with_nats("-n") - .with_args(vec!["-N", maya_name]), + .with_args(vec!["-N", maya_name.as_str()]), ) - .with_clean(true) .autorun(false) .build() - .await?; + .await + .unwrap(); - orderly_start(&test).await?; + orderly_start(&test).await.unwrap(); - let nodes = GetNodes {}.request().await?; + let nodes = GetNodes {}.request().await.unwrap(); tracing::info!("Nodes: {:?}", nodes); assert_eq!(nodes.0.len(), 1); assert_eq!( nodes.0.first().unwrap(), &Node { - id: maya_name.to_string(), + id: maya_name.clone(), grpc_endpoint: "0.0.0.0:10124".to_string(), state: NodeState::Online, } ); tokio::time::delay_for(std::time::Duration::from_secs(2)).await; - let nodes = GetNodes {}.request().await?; + let nodes = GetNodes {}.request().await.unwrap(); tracing::info!("Nodes: {:?}", nodes); assert_eq!(nodes.0.len(), 1); assert_eq!( nodes.0.first().unwrap(), &Node { - id: maya_name.to_string(), + id: maya_name.clone(), grpc_endpoint: "0.0.0.0:10124".to_string(), state: NodeState::Offline, } ); - - // run with --nocapture to see all the logs - test.logs_all().await?; - Ok(()) } } diff --git a/services/pool/src/server.rs b/services/pool/src/server.rs index eceda011f..c50c3d141 100644 --- a/services/pool/src/server.rs +++ b/services/pool/src/server.rs @@ -40,7 +40,7 @@ macro_rules! impl_service_handler { let request: ReceivedMessage<$RequestType> = args.request.try_into()?; - let service: &PoolSvc = args.context.get_state(); + let service: &PoolSvc = args.context.get_state()?; let reply = service .$ServiceFnName(&request.inner()) .await @@ -160,8 +160,8 @@ mod tests { tracing::info!("Nodes: {:?}", nodes); CreatePool { - node: mayastor.to_string(), - name: "pooloop".to_string(), + node: mayastor.into(), + id: "pooloop".into(), disks: vec!["malloc:///disk0?size_mb=100".into()], } .request() @@ -230,7 +230,7 @@ mod tests { DestroyPool { node: mayastor.into(), - name: "pooloop".into(), + id: "pooloop".into(), } .request() .await diff --git a/services/pool/src/service.rs b/services/pool/src/service.rs index 9e15b5691..6164a6646 100644 --- a/services/pool/src/service.rs +++ b/services/pool/src/service.rs @@ -27,7 +27,7 @@ impl PoolSvc { /// Get all pools from node or from all nodes async fn get_node_pools( &self, - node_id: Option, + node_id: Option, ) -> Result, SvcError> { Ok(match node_id { None => self.registry.list_pools().await, @@ -38,7 +38,7 @@ impl PoolSvc { /// Get all replicas from node or from all nodes async fn get_node_replicas( &self, - node_id: Option, + node_id: Option, ) -> Result, SvcError> { Ok(match node_id { None => self.registry.list_replicas().await, @@ -58,19 +58,11 @@ impl PoolSvc { Filter::Node(node_id) => self.get_node_pools(Some(node_id)).await?, Filter::NodePool(node_id, pool_id) => { let pools = self.get_node_pools(Some(node_id)).await?; - pools - .iter() - .filter(|&p| p.name == pool_id) - .cloned() - .collect() + pools.iter().filter(|&p| p.id == pool_id).cloned().collect() } Filter::Pool(pool_id) => { let pools = self.get_node_pools(None).await?; - pools - .iter() - .filter(|&p| p.name == pool_id) - .cloned() - .collect() + pools.iter().filter(|&p| p.id == pool_id).cloned().collect() } _ => { return Err(SvcError::InvalidFilter { @@ -166,7 +158,7 @@ impl PoolSvc { self.registry.destroy_replica(&request).await } - /// Create replica + /// Share replica #[tracing::instrument(level = "debug", err)] pub(super) async fn share_replica( &self, @@ -175,7 +167,7 @@ impl PoolSvc { self.registry.share_replica(&request).await } - /// Create replica + /// Unshare replica #[tracing::instrument(level = "debug", err)] pub(super) async fn unshare_replica( &self, diff --git a/services/volume/src/server.rs b/services/volume/src/server.rs index d3f8448b0..ced17233e 100644 --- a/services/volume/src/server.rs +++ b/services/volume/src/server.rs @@ -40,7 +40,7 @@ macro_rules! impl_service_handler { let request: ReceivedMessage<$RequestType> = args.request.try_into()?; - let service: &VolumeSvc = args.context.get_state(); + let service: &VolumeSvc = args.context.get_state()?; let reply = service .$ServiceFnName(&request.inner()) .await @@ -186,8 +186,8 @@ mod tests { async fn prepare_pools(mayastor: &str, mayastor2: &str) { CreatePool { - node: mayastor.to_string(), - name: "pooloop".to_string(), + node: mayastor.into(), + id: "pooloop".into(), disks: vec!["malloc:///disk0?size_mb=100".into()], } .request() @@ -195,8 +195,8 @@ mod tests { .unwrap(); CreatePool { - node: mayastor2.to_string(), - name: "pooloop".to_string(), + node: mayastor2.into(), + id: "pooloop".into(), disks: vec!["malloc:///disk0?size_mb=100".into()], } .request() @@ -227,7 +227,7 @@ mod tests { node: mayastor.into(), uuid: "f086f12c-1728-449e-be32-9415051090d6".into(), size: 5242880, - children: vec![replica.uri, local], + children: vec![replica.uri.into(), local], } .request() .await @@ -249,16 +249,16 @@ mod tests { DestroyNexus { node: mayastor.into(), - uuid: "f086f12c-1728-449e-be32-9415051090d6".to_string(), + uuid: "f086f12c-1728-449e-be32-9415051090d6".into(), } .request() .await .unwrap(); DestroyReplica { - node: replica.node.to_string(), - pool: replica.pool.to_string(), - uuid: replica.uuid.to_string(), + node: replica.node, + pool: replica.pool, + uuid: replica.uuid, } .request() .await @@ -269,7 +269,7 @@ mod tests { async fn test_volume() { let volume = CreateVolume { - uuid: "359b7e1a-b724-443b-98b4-e6d97fabbb40".to_string(), + uuid: "359b7e1a-b724-443b-98b4-e6d97fabbb40".into(), size: 5242880, nexuses: 1, replicas: 2, @@ -285,7 +285,7 @@ mod tests { assert_eq!(Some(&volume), volumes.first()); DestroyVolume { - uuid: "359b7e1a-b724-443b-98b4-e6d97fabbb40".to_string(), + uuid: "359b7e1a-b724-443b-98b4-e6d97fabbb40".into(), } .request() .await diff --git a/services/volume/src/service.rs b/services/volume/src/service.rs index 827b3b881..5c455e3c7 100644 --- a/services/volume/src/service.rs +++ b/services/volume/src/service.rs @@ -23,10 +23,10 @@ impl VolumeSvc { self.registry.start(); } - /// Get all pools from node or from all nodes + /// Get all nexuses from node or from all nodes async fn get_node_nexuses( &self, - node_id: Option, + node_id: Option, ) -> Result, SvcError> { Ok(match node_id { None => self.registry.list_nexuses().await, @@ -89,7 +89,7 @@ impl VolumeSvc { self.registry.destroy_nexus(request).await } - /// Create nexus + /// Share nexus #[tracing::instrument(level = "debug", err)] pub(super) async fn share_nexus( &self, @@ -98,7 +98,7 @@ impl VolumeSvc { self.registry.share_nexus(request).await } - /// Destroy nexus + /// Unshare nexus #[tracing::instrument(level = "debug", err)] pub(super) async fn unshare_nexus( &self, @@ -136,7 +136,7 @@ impl VolumeSvc { nexus .iter() .map(|n| Volume { - uuid: n.uuid.clone(), + uuid: VolumeId::from(n.uuid.as_str()), size: n.size, state: n.state.clone(), children: vec![n.clone()], @@ -210,7 +210,7 @@ impl VolumeSvc { while let Some(pool) = pools.pop() { let create_replica = CreateReplica { node: pool.node(), - uuid: request.uuid.clone(), + uuid: ReplicaId::from(request.uuid.as_str()), pool: pool.uuid(), size: request.size, thin: true, @@ -249,9 +249,12 @@ impl VolumeSvc { for i in 0 .. request.nexuses { let create_nexus = CreateNexus { node: replicas[i as usize].node.clone(), - uuid: request.uuid.clone(), + uuid: NexusId::from(request.uuid.as_str()), size: request.size, - children: replicas.iter().map(|r| r.uri.clone()).collect(), + children: replicas + .iter() + .map(|r| r.uri.to_string().into()) + .collect(), }; match self.registry.create_nexus(&create_nexus).await { @@ -305,18 +308,20 @@ impl VolumeSvc { let nexuses = self.registry.list_nexuses().await; let nexuses = nexuses .iter() - .filter(|n| n.uuid == request.uuid) + .filter(|n| n.uuid.as_str() == request.uuid.as_str()) .collect::>(); for nexus in nexuses { self.registry .destroy_nexus(&DestroyNexus { node: nexus.node.clone(), - uuid: request.uuid.clone(), + uuid: NexusId::from(request.uuid.as_str()), }) .await?; for child in &nexus.children { let replicas = self.registry.list_replicas().await; - let replica = replicas.iter().find(|r| r.uri == child.uri); + let replica = replicas + .iter() + .find(|r| r.uri.as_str() == child.uri.as_str()); if let Some(replica) = replica { self.registry .destroy_replica(&DestroyReplica { From 5a05601e539e222906cdd99a08021d5d466e4f47 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Mon, 11 Jan 2021 14:01:04 +0000 Subject: [PATCH 71/85] test(rebuild): wait for successful rebuild CI/CD was showing occasional failures of rebuild tests. This was because the tests generally only waited a short time (i.e. 2 seconds) for a rebuild to complete. This wasn't always long enough. The utility function to wait for successful rebuilds has been modified so that it continually polls for rebuild progress. As long as some progress is being made within a given window (5 seconds) it will continue to wait for the rebuild. After 30 seconds, if the rebuild has not completed, it will give up waiting for it. --- mayastor/tests/rebuild.rs | 111 +++++++++++++++++++------------------- 1 file changed, 57 insertions(+), 54 deletions(-) diff --git a/mayastor/tests/rebuild.rs b/mayastor/tests/rebuild.rs index c82e3170f..e7f1aa234 100644 --- a/mayastor/tests/rebuild.rs +++ b/mayastor/tests/rebuild.rs @@ -54,10 +54,7 @@ async fn rebuild_basic() { .unwrap()); // Check nexus is healthy after rebuild completion. - assert!( - wait_for_rebuild_completion(nexus_hdl, child, Duration::from_secs(20)) - .await - ); + assert!(wait_for_successful_rebuild(nexus_hdl, child).await); check_nexus_state(nexus_hdl, NexusState::NexusOnline).await; } @@ -371,14 +368,7 @@ async fn rebuild_sizes() { // Add the local child and wait for rebuild. add_child(nexus_hdl, &local_child, true).await; - assert!( - wait_for_rebuild_completion( - nexus_hdl, - &local_child, - Duration::from_secs(2), - ) - .await - ); + assert!(wait_for_successful_rebuild(nexus_hdl, &local_child).await); // Teardown destroy_nexus(nexus_hdl).await; @@ -424,14 +414,7 @@ async fn rebuild_segment_sizes() { // Wait for rebuild to complete. add_child(nexus_hdl, &child2, true).await; - assert!( - wait_for_rebuild_completion( - nexus_hdl, - &child2, - Duration::from_secs(5) - ) - .await - ); + assert!(wait_for_successful_rebuild(nexus_hdl, &child2).await); // Teardown destroy_nexus(nexus_hdl).await; @@ -577,14 +560,7 @@ async fn rebuild_multiple() { resume_rebuild(nexus_hdl, &child.share_uri) .await .expect("Failed to resume rebuild"); - assert!( - wait_for_rebuild_completion( - nexus_hdl, - &child.share_uri, - Duration::from_secs(10), - ) - .await - ); + assert!(wait_for_successful_rebuild(nexus_hdl, &child.share_uri).await); remove_child(nexus_hdl, &child.share_uri).await; } assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); @@ -596,14 +572,7 @@ async fn rebuild_multiple() { // Wait for rebuilds to complete for child in °raded_children { - assert!( - wait_for_rebuild_completion( - nexus_hdl, - &child.share_uri, - Duration::from_secs(10), - ) - .await - ); + assert!(wait_for_successful_rebuild(nexus_hdl, &child.share_uri).await); } } @@ -653,10 +622,7 @@ async fn rebuild_with_load() { assert_eq!(fio_result, 0, "Failed to run fio_verify_size"); // Wait for rebuild to complete. - assert!( - wait_for_rebuild_completion(nexus_hdl, &child2, Duration::from_secs(1)) - .await - ); + assert!(wait_for_successful_rebuild(nexus_hdl, &child2).await); // Disconnect and destroy nexus nvmf_disconnect(nexus_uri); @@ -898,16 +864,20 @@ async fn get_num_rebuilds(hdl: &mut RpcHandle) -> u32 { } /// Get the rebuild progress for the given child. -async fn get_rebuild_progress(hdl: &mut RpcHandle, child: &str) -> u32 { - let reply = hdl +/// Return None if the progress cannot be obtained i.e. because the rebuild job +/// has completed. +async fn get_rebuild_progress(hdl: &mut RpcHandle, child: &str) -> Option { + match hdl .mayastor .get_rebuild_progress(RebuildProgressRequest { uuid: NEXUS_UUID.into(), uri: child.into(), }) .await - .expect("Failed to get rebuild progress"); - reply.into_inner().progress + { + Ok(reply) => Some(reply.into_inner().progress), + Err(_) => None, + } } /// Waits on the given rebuild state or times out. @@ -949,14 +919,42 @@ async fn get_rebuild_state(hdl: &mut RpcHandle, child: &str) -> Option { } } -/// Returns true if the rebuild has completed. -/// A rebuild is deemed to be complete if the destination child is online. -async fn wait_for_rebuild_completion( - hdl: &mut RpcHandle, - child: &str, - timeout: Duration, -) -> bool { - wait_for_child_state(hdl, child, ChildState::ChildOnline, timeout).await +/// Returns true if the rebuild has completed successfully i.e. the destination +/// child is 'online'. +/// Returns false if: +/// 1. The rebuild does not make any progress within the progress window +/// 2. The rebuild takes longer than the TIMEOUT time. +async fn wait_for_successful_rebuild(hdl: &mut RpcHandle, child: &str) -> bool { + let mut last_progress = 0; + let mut progress_start_time = std::time::Instant::now(); + let progress_window = std::time::Duration::from_secs(5); + let time = std::time::Instant::now(); + const TIMEOUT: Duration = std::time::Duration::from_secs(30); + + // Keep looping while progress is being made and the rebuild has not timed + // out. + while std::time::Instant::now() - progress_start_time < progress_window + || time.elapsed().as_millis() < TIMEOUT.as_millis() + { + match get_rebuild_progress(hdl, child).await { + Some(progress) => { + if progress - last_progress > 0 { + // Progress has been made, reset the progress window. + progress_start_time = std::time::Instant::now(); + last_progress = progress; + } + } + None => { + // 'None' is returned when the rebuild job cannot be found - + // which can indicate rebuild completion. + // If the child is online, the rebuild completed successfully. + return get_child_state(hdl, child).await + == ChildState::ChildOnline as i32; + } + } + std::thread::sleep(Duration::from_millis(50)); + } + return false; } /// Wait on the given child state or times out. @@ -969,14 +967,19 @@ async fn wait_for_child_state( ) -> bool { let time = std::time::Instant::now(); while time.elapsed().as_millis() < timeout.as_millis() { - let c = get_child(hdl, NEXUS_UUID, child).await; - if c.state == state as i32 { + if get_child_state(hdl, child).await == state as i32 { return true; } std::thread::sleep(Duration::from_millis(10)); } false } + +/// Return the current state of the given child. +async fn get_child_state(hdl: &mut RpcHandle, child: &str) -> i32 { + get_child(hdl, NEXUS_UUID, child).await.state +} + /// Returns the state of the nexus with the given uuid. async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> Option { let list = hdl From 2db95409d0c1b8243809f9369bbf11fd7abddf3c Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Mon, 11 Jan 2021 06:45:46 -0800 Subject: [PATCH 72/85] Add notes to envrc Signed-off-by: Ana Hobden --- .envrc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.envrc b/.envrc index 1d953f4bd..0ae6bee37 100644 --- a/.envrc +++ b/.envrc @@ -1 +1,10 @@ +# If you want to configure certain options for your .envrc and don't +# want git to track that change you can either use: +# (1) ~/.gitignore (which is global) +# (2) `git update-index --assume-unchanged .envrc + +# Use with all defaults: use nix + +# Or, use with nospdk/norust: +# use nix --arg nospdk true --arg norust true From dfdc2c9aff5428970ad05e9d308a328fe379dd0e Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 7 Jan 2021 11:27:31 +0000 Subject: [PATCH 73/85] feat(moac): k8s client - pull fixes from upstream The hard work has been done in jkryl/javascript repo on github. Here we only update to the new version of the package. Follow up work is to try to upstream the fix. --- csi/moac/node-packages.nix | 100 ++++++++++++++++++------------------- csi/moac/package-lock.json | 60 +++++++++++----------- csi/moac/package.json | 2 +- 3 files changed, 81 insertions(+), 81 deletions(-) diff --git a/csi/moac/node-packages.nix b/csi/moac/node-packages.nix index cafe14fa2..6dc4f5548 100644 --- a/csi/moac/node-packages.nix +++ b/csi/moac/node-packages.nix @@ -247,13 +247,13 @@ let sha512 = "c3Xy026kOF7QOTn00hbIllV1dLR9hG9NkSrLQgCVs8NF6sBU+VGWjD3wLPhmh1TYAc7ugCFsvHYMN4VcBN1U1A=="; }; }; - "@types/js-yaml-3.12.5" = { + "@types/js-yaml-3.12.6" = { name = "_at_types_slash_js-yaml"; packageName = "@types/js-yaml"; - version = "3.12.5"; + version = "3.12.6"; src = fetchurl { - url = "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.5.tgz"; - sha512 = "JCcp6J0GV66Y4ZMDAQCXot4xprYB+Zfd3meK9+INSJeVZwJmHAW30BBEEkPzXswMXuiyReUGOP3GxrADc9wPww=="; + url = "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.6.tgz"; + sha512 = "cK4XqrLvP17X6c0C8n4iTbT59EixqyXL3Fk8/Rsk4dF3oX4dg70gYUXrXVUUHpnsGMPNlTQMqf+TVmNPX6FmSQ=="; }; }; "@types/json5-0.0.29" = { @@ -301,13 +301,13 @@ let sha512 = "wuzZksN4w4kyfoOv/dlpov4NOunwutLA/q7uc00xU02ZyUY+aoM5PWIXEKBMnm0NHd4a+N71BMjq+x7+2Af1fg=="; }; }; - "@types/node-10.17.44" = { + "@types/node-10.17.50" = { name = "_at_types_slash_node"; packageName = "@types/node"; - version = "10.17.44"; + version = "10.17.50"; src = fetchurl { - url = "https://registry.npmjs.org/@types/node/-/node-10.17.44.tgz"; - sha512 = "vHPAyBX1ffLcy4fQHmDyIUMUb42gHZjPHU66nhvbMzAWJqHnySGZ6STwN3rwrnSd1FHB0DI/RWgGELgKSYRDmw=="; + url = "https://registry.npmjs.org/@types/node/-/node-10.17.50.tgz"; + sha512 = "vwX+/ija9xKc/z9VqMCdbf4WYcMTGsI0I/L/6shIF3qXURxZOhPQlPRHtjTpiNhAwn0paMJzlOQqw6mAGEQnTA=="; }; }; "@types/node-13.13.23" = { @@ -355,13 +355,13 @@ let sha512 = "NeFeX7YfFZDYsCfbuaOmFQ0OjSmHreKBpp7MQ4alWQBHeh2USLsj7qyMyn9t82kjqIX516CR/5SRHnARduRtbQ=="; }; }; - "@types/tar-4.0.3" = { + "@types/tar-4.0.4" = { name = "_at_types_slash_tar"; packageName = "@types/tar"; - version = "4.0.3"; + version = "4.0.4"; src = fetchurl { - url = "https://registry.npmjs.org/@types/tar/-/tar-4.0.3.tgz"; - sha512 = "Z7AVMMlkI8NTWF0qGhC4QIX0zkV/+y0J8x7b/RsHrN0310+YNjoJd8UrApCiGBCWtKjxS9QhNqLi2UJNToh5hA=="; + url = "https://registry.npmjs.org/@types/tar/-/tar-4.0.4.tgz"; + sha512 = "0Xv+xcmkTsOZdIF4yCnd7RkOOyfyqPaqJ7RZFKnwdxfDbkN3eAAE9sHl8zJFqBz4VhxolW9EErbjR1oyH7jK2A=="; }; }; "@types/tough-cookie-4.0.0" = { @@ -742,13 +742,13 @@ let sha512 = "zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="; }; }; - "cacheable-lookup-5.0.3" = { + "cacheable-lookup-5.0.4" = { name = "cacheable-lookup"; packageName = "cacheable-lookup"; - version = "5.0.3"; + version = "5.0.4"; src = fetchurl { - url = "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.3.tgz"; - sha512 = "W+JBqF9SWe18A72XFzN/V/CULFzPm7sBXzzR6ekkE+3tLG72wFZrBiBZhrZuDoYexop4PHJVdFAKb/Nj9+tm9w=="; + url = "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz"; + sha512 = "2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA=="; }; }; "cacheable-request-7.0.1" = { @@ -868,13 +868,13 @@ let sha512 = "4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A=="; }; }; - "client-node-fixed-watcher-0.13.2" = { + "client-node-fixed-watcher-0.13.4" = { name = "client-node-fixed-watcher"; packageName = "client-node-fixed-watcher"; - version = "0.13.2"; + version = "0.13.4"; src = fetchurl { - url = "https://registry.npmjs.org/client-node-fixed-watcher/-/client-node-fixed-watcher-0.13.2.tgz"; - sha512 = "Ze0lahaDt28q9OnYZDTMOKq2zJs64ETwyfWEOMjUErtY7hXjL7z725Nu5Ghfb3Fagujy/bSJ2QUXRuNioQqC8w=="; + url = "https://registry.npmjs.org/client-node-fixed-watcher/-/client-node-fixed-watcher-0.13.4.tgz"; + sha512 = "av1xeciIaTlZj2mPdiqvX7M3BcJ0wFiXB0bl6/jIVI5LhExkTEFeTdxDa8JuF8PGpHF7HDtyMDicNszA/j7jFA=="; }; }; "cliui-3.2.0" = { @@ -1984,13 +1984,13 @@ let sha512 = "BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg=="; }; }; - "got-11.8.0" = { + "got-11.8.1" = { name = "got"; packageName = "got"; - version = "11.8.0"; + version = "11.8.1"; src = fetchurl { - url = "https://registry.npmjs.org/got/-/got-11.8.0.tgz"; - sha512 = "k9noyoIIY9EejuhaBNLyZ31D5328LeqnyPNXJQb2XlJZcKakLqN5m6O/ikhq/0lw56kUYS54fVm+D1x57YC9oQ=="; + url = "https://registry.npmjs.org/got/-/got-11.8.1.tgz"; + sha512 = "9aYdZL+6nHmvJwHALLwKSUZ0hMwGaJGYv3hoPLPgnT8BoBXm1SjnZeky+91tfwJaDzun2s4RsBRy48IEYv2q2Q=="; }; }; "graceful-fs-4.2.4" = { @@ -3145,13 +3145,13 @@ let sha1 = "2109adc7965887cfc05cbbd442cac8bfbb360863"; }; }; - "object-hash-2.0.3" = { + "object-hash-2.1.1" = { name = "object-hash"; packageName = "object-hash"; - version = "2.0.3"; + version = "2.1.1"; src = fetchurl { - url = "https://registry.npmjs.org/object-hash/-/object-hash-2.0.3.tgz"; - sha512 = "JPKn0GMu+Fa3zt3Bmr66JhokJU5BaNBIh4ZeTlaCBzrBsOeXzwcKKAK1tbLiPKgvwmPXsDvvLHoWh5Bm7ofIYg=="; + url = "https://registry.npmjs.org/object-hash/-/object-hash-2.1.1.tgz"; + sha512 = "VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ=="; }; }; "object-inspect-1.8.0" = { @@ -3262,13 +3262,13 @@ let sha512 = "5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g=="; }; }; - "openid-client-4.2.1" = { + "openid-client-4.2.2" = { name = "openid-client"; packageName = "openid-client"; - version = "4.2.1"; + version = "4.2.2"; src = fetchurl { - url = "https://registry.npmjs.org/openid-client/-/openid-client-4.2.1.tgz"; - sha512 = "07eOcJeMH3ZHNvx5DVMZQmy3vZSTQqKSSunbtM1pXb+k5LBPi5hMum1vJCFReXlo4wuLEqZ/OgbsZvXPhbGRtA=="; + url = "https://registry.npmjs.org/openid-client/-/openid-client-4.2.2.tgz"; + sha512 = "aifblOWaE4nT7fZ/ax/5Ohzs9VrJOtxVvhuAMVF4QsPVNgLWDyGprPQXDZf7obEyaShzNlyv7aoIDPEVFO/XZQ=="; }; }; "optionator-0.9.1" = { @@ -4576,13 +4576,13 @@ let sha512 = "tEu6DGxGgRJPb/mVPIZ48e69xCn2yRmCgYmDugAVwmJ6o+0u1RI18eO7E7WBTLYLaEVVOhwQmcdhQHweux/WPg=="; }; }; - "underscore-1.11.0" = { + "underscore-1.12.0" = { name = "underscore"; packageName = "underscore"; - version = "1.11.0"; + version = "1.12.0"; src = fetchurl { - url = "https://registry.npmjs.org/underscore/-/underscore-1.11.0.tgz"; - sha512 = "xY96SsN3NA461qIRKZ/+qox37YXPtSBswMGfiNptr+wrt6ds4HaMw23TP612fEyGekRE6LNRiLYr/aqbHXNedw=="; + url = "https://registry.npmjs.org/underscore/-/underscore-1.12.0.tgz"; + sha512 = "21rQzss/XPMjolTiIezSu3JAjgagXKROtNrYFEOWK109qY1Uv2tVjPTZ1ci2HgvQDA16gHYSthQIJfB+XId/rQ=="; }; }; "unpipe-1.0.0" = { @@ -4792,13 +4792,13 @@ let sha512 = "/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig=="; }; }; - "ws-7.4.0" = { + "ws-7.4.2" = { name = "ws"; packageName = "ws"; - version = "7.4.0"; + version = "7.4.2"; src = fetchurl { - url = "https://registry.npmjs.org/ws/-/ws-7.4.0.tgz"; - sha512 = "kyFwXuV/5ymf+IXhS6f0+eAFvydbaBW3zjpT6hUdAh/hbVjTIB5EHBGi0bPoCLSK2wcuz3BrEkB9LrYv1Nm4NQ=="; + url = "https://registry.npmjs.org/ws/-/ws-7.4.2.tgz"; + sha512 = "T4tewALS3+qsrpGI/8dqNMLIVdq/g/85U98HPMa6F0m6xTbvhXU6RCQLqPH3+SlomNV/LdY6RXEbBpMH6EOJnA=="; }; }; "wtfnode-0.8.3" = { @@ -4975,7 +4975,7 @@ let sources."@types/cacheable-request-6.0.1" sources."@types/caseless-0.12.2" sources."@types/http-cache-semantics-4.0.0" - sources."@types/js-yaml-3.12.5" + sources."@types/js-yaml-3.12.6" sources."@types/json5-0.0.29" sources."@types/keyv-3.1.1" sources."@types/lodash-4.14.161" @@ -4985,7 +4985,7 @@ let sources."@types/request-2.48.5" sources."@types/responselike-1.0.0" sources."@types/stream-buffers-3.0.3" - sources."@types/tar-4.0.3" + sources."@types/tar-4.0.4" sources."@types/tough-cookie-4.0.0" sources."@types/underscore-1.10.24" sources."@types/ws-6.0.4" @@ -5038,7 +5038,7 @@ let ]; }) sources."bytes-3.1.0" - sources."cacheable-lookup-5.0.3" + sources."cacheable-lookup-5.0.4" (sources."cacheable-request-7.0.1" // { dependencies = [ sources."get-stream-5.2.0" @@ -5054,9 +5054,9 @@ let sources."chokidar-3.4.2" sources."chownr-2.0.0" sources."clean-stack-2.2.0" - (sources."client-node-fixed-watcher-0.13.2" // { + (sources."client-node-fixed-watcher-0.13.4" // { dependencies = [ - sources."@types/node-10.17.44" + sources."@types/node-10.17.50" ]; }) sources."cliui-3.2.0" @@ -5238,7 +5238,7 @@ let sources."glob-7.1.6" sources."glob-parent-5.1.1" sources."globals-12.4.0" - sources."got-11.8.0" + sources."got-11.8.1" sources."graceful-fs-4.2.4" sources."growl-1.10.5" sources."grpc-promise-1.4.0" @@ -5397,7 +5397,7 @@ let sources."number-is-nan-1.0.1" sources."oauth-sign-0.9.0" sources."object-assign-4.1.1" - sources."object-hash-2.0.3" + sources."object-hash-2.1.1" sources."object-inspect-1.8.0" sources."object-keys-1.1.1" sources."object.assign-4.1.0" @@ -5408,7 +5408,7 @@ let sources."on-finished-2.3.0" sources."once-1.4.0" sources."one-time-1.0.0" - sources."openid-client-4.2.1" + sources."openid-client-4.2.2" sources."optionator-0.9.1" sources."optjs-3.2.2" sources."os-locale-1.4.0" @@ -5588,7 +5588,7 @@ let sources."type-fest-0.8.1" sources."type-is-1.6.18" sources."typescript-4.0.3" - sources."underscore-1.11.0" + sources."underscore-1.12.0" sources."unpipe-1.0.0" sources."uri-js-4.2.2" sources."util-deprecate-1.0.2" @@ -5619,7 +5619,7 @@ let sources."wrap-ansi-2.1.0" sources."wrappy-1.0.2" sources."write-1.0.3" - sources."ws-7.4.0" + sources."ws-7.4.2" sources."wtfnode-0.8.3" sources."xdg-basedir-4.0.0" sources."y18n-3.2.2" diff --git a/csi/moac/package-lock.json b/csi/moac/package-lock.json index 2d73a7a2e..58a2f1220 100644 --- a/csi/moac/package-lock.json +++ b/csi/moac/package-lock.json @@ -272,9 +272,9 @@ "integrity": "sha512-c3Xy026kOF7QOTn00hbIllV1dLR9hG9NkSrLQgCVs8NF6sBU+VGWjD3wLPhmh1TYAc7ugCFsvHYMN4VcBN1U1A==" }, "@types/js-yaml": { - "version": "3.12.5", - "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.5.tgz", - "integrity": "sha512-JCcp6J0GV66Y4ZMDAQCXot4xprYB+Zfd3meK9+INSJeVZwJmHAW30BBEEkPzXswMXuiyReUGOP3GxrADc9wPww==" + "version": "3.12.6", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.6.tgz", + "integrity": "sha512-cK4XqrLvP17X6c0C8n4iTbT59EixqyXL3Fk8/Rsk4dF3oX4dg70gYUXrXVUUHpnsGMPNlTQMqf+TVmNPX6FmSQ==" }, "@types/json5": { "version": "0.0.29", @@ -341,9 +341,9 @@ } }, "@types/tar": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@types/tar/-/tar-4.0.3.tgz", - "integrity": "sha512-Z7AVMMlkI8NTWF0qGhC4QIX0zkV/+y0J8x7b/RsHrN0310+YNjoJd8UrApCiGBCWtKjxS9QhNqLi2UJNToh5hA==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/tar/-/tar-4.0.4.tgz", + "integrity": "sha512-0Xv+xcmkTsOZdIF4yCnd7RkOOyfyqPaqJ7RZFKnwdxfDbkN3eAAE9sHl8zJFqBz4VhxolW9EErbjR1oyH7jK2A==", "requires": { "@types/minipass": "*", "@types/node": "*" @@ -670,9 +670,9 @@ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" }, "cacheable-lookup": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.3.tgz", - "integrity": "sha512-W+JBqF9SWe18A72XFzN/V/CULFzPm7sBXzzR6ekkE+3tLG72wFZrBiBZhrZuDoYexop4PHJVdFAKb/Nj9+tm9w==" + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", + "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==" }, "cacheable-request": { "version": "7.0.1", @@ -781,9 +781,9 @@ "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" }, "client-node-fixed-watcher": { - "version": "0.13.2", - "resolved": "https://registry.npmjs.org/client-node-fixed-watcher/-/client-node-fixed-watcher-0.13.2.tgz", - "integrity": "sha512-Ze0lahaDt28q9OnYZDTMOKq2zJs64ETwyfWEOMjUErtY7hXjL7z725Nu5Ghfb3Fagujy/bSJ2QUXRuNioQqC8w==", + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/client-node-fixed-watcher/-/client-node-fixed-watcher-0.13.4.tgz", + "integrity": "sha512-av1xeciIaTlZj2mPdiqvX7M3BcJ0wFiXB0bl6/jIVI5LhExkTEFeTdxDa8JuF8PGpHF7HDtyMDicNszA/j7jFA==", "requires": { "@types/js-yaml": "^3.12.1", "@types/node": "^10.12.0", @@ -810,9 +810,9 @@ }, "dependencies": { "@types/node": { - "version": "10.17.44", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.44.tgz", - "integrity": "sha512-vHPAyBX1ffLcy4fQHmDyIUMUb42gHZjPHU66nhvbMzAWJqHnySGZ6STwN3rwrnSd1FHB0DI/RWgGELgKSYRDmw==" + "version": "10.17.50", + "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.50.tgz", + "integrity": "sha512-vwX+/ija9xKc/z9VqMCdbf4WYcMTGsI0I/L/6shIF3qXURxZOhPQlPRHtjTpiNhAwn0paMJzlOQqw6mAGEQnTA==" } } }, @@ -1948,9 +1948,9 @@ } }, "got": { - "version": "11.8.0", - "resolved": "https://registry.npmjs.org/got/-/got-11.8.0.tgz", - "integrity": "sha512-k9noyoIIY9EejuhaBNLyZ31D5328LeqnyPNXJQb2XlJZcKakLqN5m6O/ikhq/0lw56kUYS54fVm+D1x57YC9oQ==", + "version": "11.8.1", + "resolved": "https://registry.npmjs.org/got/-/got-11.8.1.tgz", + "integrity": "sha512-9aYdZL+6nHmvJwHALLwKSUZ0hMwGaJGYv3hoPLPgnT8BoBXm1SjnZeky+91tfwJaDzun2s4RsBRy48IEYv2q2Q==", "requires": { "@sindresorhus/is": "^4.0.0", "@szmarczak/http-timer": "^4.0.5", @@ -3397,9 +3397,9 @@ "dev": true }, "object-hash": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.0.3.tgz", - "integrity": "sha512-JPKn0GMu+Fa3zt3Bmr66JhokJU5BaNBIh4ZeTlaCBzrBsOeXzwcKKAK1tbLiPKgvwmPXsDvvLHoWh5Bm7ofIYg==" + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.1.1.tgz", + "integrity": "sha512-VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ==" }, "object-inspect": { "version": "1.8.0", @@ -3488,9 +3488,9 @@ } }, "openid-client": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-4.2.1.tgz", - "integrity": "sha512-07eOcJeMH3ZHNvx5DVMZQmy3vZSTQqKSSunbtM1pXb+k5LBPi5hMum1vJCFReXlo4wuLEqZ/OgbsZvXPhbGRtA==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-4.2.2.tgz", + "integrity": "sha512-aifblOWaE4nT7fZ/ax/5Ohzs9VrJOtxVvhuAMVF4QsPVNgLWDyGprPQXDZf7obEyaShzNlyv7aoIDPEVFO/XZQ==", "requires": { "base64url": "^3.0.1", "got": "^11.8.0", @@ -4745,9 +4745,9 @@ "dev": true }, "underscore": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.11.0.tgz", - "integrity": "sha512-xY96SsN3NA461qIRKZ/+qox37YXPtSBswMGfiNptr+wrt6ds4HaMw23TP612fEyGekRE6LNRiLYr/aqbHXNedw==" + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.0.tgz", + "integrity": "sha512-21rQzss/XPMjolTiIezSu3JAjgagXKROtNrYFEOWK109qY1Uv2tVjPTZ1ci2HgvQDA16gHYSthQIJfB+XId/rQ==" }, "unpipe": { "version": "1.0.0", @@ -4933,9 +4933,9 @@ } }, "ws": { - "version": "7.4.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.0.tgz", - "integrity": "sha512-kyFwXuV/5ymf+IXhS6f0+eAFvydbaBW3zjpT6hUdAh/hbVjTIB5EHBGi0bPoCLSK2wcuz3BrEkB9LrYv1Nm4NQ==" + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.2.tgz", + "integrity": "sha512-T4tewALS3+qsrpGI/8dqNMLIVdq/g/85U98HPMa6F0m6xTbvhXU6RCQLqPH3+SlomNV/LdY6RXEbBpMH6EOJnA==" }, "wtfnode": { "version": "0.8.3", diff --git a/csi/moac/package.json b/csi/moac/package.json index f478137aa..a4a418aba 100644 --- a/csi/moac/package.json +++ b/csi/moac/package.json @@ -26,7 +26,7 @@ "dependencies": { "@grpc/proto-loader": "^0.5.5", "@types/lodash": "^4.14.161", - "client-node-fixed-watcher": "^0.13.2", + "client-node-fixed-watcher": "^0.13.4", "express": "^4.17.1", "grpc-promise": "^1.4.0", "grpc-uds": "^0.1.6", From 1987cb98c9cb32f8a8b6e5ee7eb176a5bd82abbf Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Mon, 11 Jan 2021 14:29:02 +0000 Subject: [PATCH 74/85] test: enable running rebuild E2E test on CI/CD Add rebuild tests to the E2E test script. --- scripts/e2e-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index a0df8bff3..c745b06e1 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -7,7 +7,7 @@ set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") # new tests should be added before the replica_pod_remove test -TESTS="install basic_volume_io replica node_disconnect/replica_pod_remove" +TESTS="install basic_volume_io replica rebuild node_disconnect/replica_pod_remove" DEVICE= REGISTRY= TAG= From db4ea18da5e32a5348626c79465f07947c973501 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Tue, 12 Jan 2021 11:28:55 +0000 Subject: [PATCH 75/85] fix(linter): fix linter git hook @commitlint/config-conventional is not installed by npx so we need to install it before running @commitlint/cli. since we're doing this, install @commitlint/cli which speeds up npx as it no longer needs to pull it. /ignore package-lock.json and /mode_modules - todo: mode this to another directory to avoid poluting the / or even add a tools js project which pulls all required tools, eg commitlint, semistandard... --- .github/workflows/pr-commitlint.yml | 3 +-- .gitignore | 2 ++ .pre-commit-config.yaml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr-commitlint.yml b/.github/workflows/pr-commitlint.yml index 8ef3e229b..87a018d50 100644 --- a/.github/workflows/pr-commitlint.yml +++ b/.github/workflows/pr-commitlint.yml @@ -18,5 +18,4 @@ jobs: last_commit=HEAD^2 # don't lint the merge commit npx commitlint --from $first_commit~1 --to $last_commit -V - name: Lint Pull Request - run: echo $'${{ github.event.pull_request.title }}\n\n${{ github.event.pull_request.body }}' | npx commitlint -V - + run: echo "${{ github.event.pull_request.title }}"$'\n\n'"${{ github.event.pull_request.body }}" | npx commitlint -V diff --git a/.gitignore b/.gitignore index 670502f6a..be3036fc8 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ mayastor/local-randrw-0-verify.state mayastor/local-write_verify-0-verify.state test-yamls/* +/package-lock.json +/node_modules diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 22f6c0ea9..a03312fa0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,7 +34,7 @@ repos: name: Commit Lint description: Runs commitlint against the commit message. language: system - entry: bash -c "cat $1 | npx commitlint" + entry: bash -c "npm install @commitlint/config-conventional @commitlint/cli; cat $1 | npx commitlint" args: [$1] stages: [commit-msg] From 83baf5fd2024bbff1d673de53311b892a54d6a37 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Tue, 12 Jan 2021 16:41:24 +0000 Subject: [PATCH 76/85] ci: fix invalid docker prune command --- scripts/reclaim-space.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reclaim-space.sh b/scripts/reclaim-space.sh index c6f027d2d..afdfa8863 100755 --- a/scripts/reclaim-space.sh +++ b/scripts/reclaim-space.sh @@ -24,7 +24,7 @@ fi set -x nix-collect-garbage -docker prune --force --all +docker image prune --force --all set +x echo "Available space after cleanup: $(get_avail_gib) GiB" \ No newline at end of file From 68fb1558e734ec7180839165b6fef560afa984ab Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Thu, 14 Jan 2021 09:31:35 +0000 Subject: [PATCH 77/85] fix(moac): need more info about watcher objects --- csi/moac/watcher.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/csi/moac/watcher.ts b/csi/moac/watcher.ts index f70ec8313..99569cf01 100644 --- a/csi/moac/watcher.ts +++ b/csi/moac/watcher.ts @@ -240,7 +240,7 @@ export class CustomResourceCache extends EventEmitter { log.error(`Ignoring event ${event} with object without a name`); return; } - log.trace(`Received watcher event ${event} for ${this.name} "${name}"`); + log.trace(`Received watcher event ${event} for ${this.name} "${name}": ${JSON.stringify(cr)}`); this._setIdleTimeout(); let confirmOp = this.waiting[name]; if (confirmOp) { @@ -273,7 +273,7 @@ export class CustomResourceCache extends EventEmitter { return this.listWatch.start() .then(() => { this.connected = true; - log.debug(`${this.name} watcher was started`); + log.debug(`${this.name} watcher with ${this.listWatch.list().length} objects was started`); log.trace(`Initial content of the "${this.name}" cache: ` + this.listWatch.list().map((i: CustomResource) => i.metadata?.name)); this._setIdleTimeout(); From 11f55516192ecf57ecda7bab5f143e996b755eb0 Mon Sep 17 00:00:00 2001 From: Jonathan Teh <30538043+jonathan-teh@users.noreply.github.com> Date: Wed, 13 Jan 2021 16:37:48 +0000 Subject: [PATCH 78/85] fix(nexus): segfault connecting to 0 ns nvmf tgt Check bdev_count returned in the nvme_create callback and return early if no bdev was created, deleting the partially created nvme bdev first, to avoid dereferencing a null context.names. --- mayastor/src/bdev/dev/nvmf.rs | 23 ++++++++++++++++++----- test/grpc/test_nexus.js | 25 +++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/mayastor/src/bdev/dev/nvmf.rs b/mayastor/src/bdev/dev/nvmf.rs index 259f72db7..4ebb2d93e 100644 --- a/mayastor/src/bdev/dev/nvmf.rs +++ b/mayastor/src/bdev/dev/nvmf.rs @@ -148,22 +148,22 @@ impl CreateDestroy for Nvmf { extern "C" fn done_nvme_create_cb( arg: *mut c_void, - _bdev_count: c_ulong, + bdev_count: c_ulong, errno: c_int, ) { let sender = unsafe { - Box::from_raw(arg as *mut oneshot::Sender>) + Box::from_raw(arg as *mut oneshot::Sender>) }; sender - .send(errno_result_from_i32((), errno)) + .send(errno_result_from_i32(bdev_count as usize, errno)) .expect("done callback receiver side disappeared"); } let cname = CString::new(self.name.clone()).unwrap(); let mut context = NvmeCreateContext::new(self); - let (sender, receiver) = oneshot::channel::>(); + let (sender, receiver) = oneshot::channel::>(); let errno = unsafe { bdev_nvme_create( @@ -183,7 +183,7 @@ impl CreateDestroy for Nvmf { name: self.name.clone(), })?; - receiver + let bdev_count = receiver .await .context(nexus_uri::CancelBdev { name: self.name.clone(), @@ -192,6 +192,19 @@ impl CreateDestroy for Nvmf { name: self.name.clone(), })?; + if bdev_count == 0 { + error!("No nvme bdev created, no namespaces?"); + // Remove partially created nvme bdev which doesn't show up in + // the list of bdevs + let errno = unsafe { bdev_nvme_delete(cname.as_ptr()) }; + info!( + "removed partially created bdev {}, returned {}", + self.name, errno + ); + return Err(NexusBdevError::BdevNotFound { + name: self.name.clone(), + }); + } if let Some(bdev) = Bdev::lookup_by_name(&self.get_name()) { if let Some(u) = self.uuid { if bdev.uuid_as_string() != u.to_hyphenated().to_string() { diff --git a/test/grpc/test_nexus.js b/test/grpc/test_nexus.js index 47fab8de7..139df9f8b 100644 --- a/test/grpc/test_nexus.js +++ b/test/grpc/test_nexus.js @@ -827,6 +827,31 @@ describe('nexus', function () { } }); + // must be last nvmf test as it removes ns + it('should remove namespace from nvmf subsystem', (done) => { + const args = { + nqn: `nqn.2019-05.io.openebs:${TGTUUID}`, + nsid: 1 + }; + common.jsonrpcCommand('/tmp/target.sock', 'nvmf_subsystem_remove_ns', args, done); + }); + + it('should fail to create nexus with child that has no namespaces', (done) => { + const args = { + uuid: UUID, + size: diskSize, + children: [ + `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}` + ] + }; + + client.createNexus(args, (err) => { + if (!err) return done(new Error('Expected error')); + assert.equal(err.code, grpc.status.INVALID_ARGUMENT); + done(); + }); + }); + it('should have zero nexus devices left', (done) => { client.listNexus({}, (err, res) => { if (err) return done(err); From ba84570072e56a943687537ae6084565360bd1e5 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Tue, 12 Jan 2021 14:08:17 +0000 Subject: [PATCH 79/85] fix(moac): import vols after nodes are registered Give storage nodes a chance to register with control plane before we start to import volumes from crds. Otherwise moac tries to fix volumes that are ok, but it does not know about it. --- csi/moac/event_stream.js | 13 +++++--- csi/moac/index.js | 42 ++++++++++++++++-------- csi/moac/test/event_stream_test.js | 14 ++++---- csi/moac/test/index.js | 6 ++-- csi/moac/test/volumes_test.js | 27 ++++++++++++--- csi/moac/volume.ts | 18 ++++++---- mayastor/src/subsys/mbus/registration.rs | 2 +- services/node/src/server.rs | 4 +-- 8 files changed, 83 insertions(+), 43 deletions(-) diff --git a/csi/moac/event_stream.js b/csi/moac/event_stream.js index 465b84bc9..254027dbf 100644 --- a/csi/moac/event_stream.js +++ b/csi/moac/event_stream.js @@ -91,12 +91,10 @@ class EventStream extends Readable { eventType: 'new', object: node }); + // First we emit replica and then pool events. Otherwise volume manager + // could start creating new volume on imported pool although that the + // volume is already there. node.pools.forEach((obj) => { - self.events.push({ - kind: 'pool', - eventType: 'new', - object: obj - }); obj.replicas.forEach((obj) => { self.events.push({ kind: 'replica', @@ -104,6 +102,11 @@ class EventStream extends Readable { object: obj }); }); + self.events.push({ + kind: 'pool', + eventType: 'new', + object: obj + }); }); node.nexus.forEach((obj) => { self.events.push({ diff --git a/csi/moac/index.js b/csi/moac/index.js index 4b794846e..2b47ee89d 100755 --- a/csi/moac/index.js +++ b/csi/moac/index.js @@ -40,11 +40,13 @@ function createKubeConfig (kubefile) { } async function main () { + let apiServer; let poolOper; let volumeOper; let csiNodeOper; let nodeOper; let kubeConfig; + let warmupTimer; const opts = yargs .options({ @@ -54,6 +56,12 @@ async function main () { default: '/var/tmp/csi.sock', string: true }, + i: { + alias: 'heartbeat-interval', + describe: 'Interval used by storage nodes for registration messages (seconds)', + default: 5, + number: true + }, k: { alias: 'kubeconfig', describe: 'Path to kubeconfig file', @@ -113,6 +121,7 @@ async function main () { // We must install signal handlers before grpc lib does it. async function cleanUp () { + if (warmupTimer) clearTimeout(warmupTimer); if (csiServer) csiServer.undoReady(); if (apiServer) apiServer.stop(); if (!opts.s) { @@ -175,22 +184,27 @@ async function main () { const volumes = new Volumes(registry); volumes.start(); - if (!opts.s) { - volumeOper = new VolumeOperator( - opts.namespace, - kubeConfig, - volumes, - opts.watcherIdleTimeout - ); - await volumeOper.init(kubeConfig); - await volumeOper.start(); - } + const warmupSecs = Math.floor(1.5 * opts.i); + log.info(`Warming up will take ${warmupSecs} seconds ...`); + warmupTimer = setTimeout(async () => { + warmupTimer = undefined; + if (!opts.s) { + volumeOper = new VolumeOperator( + opts.namespace, + kubeConfig, + volumes, + opts.watcherIdleTimeout + ); + await volumeOper.init(kubeConfig); + await volumeOper.start(); + } - const apiServer = new ApiServer(registry); - await apiServer.start(opts.port); + apiServer = new ApiServer(registry); + await apiServer.start(opts.port); - csiServer.makeReady(registry, volumes); - log.info('MOAC is up and ready to 🚀'); + csiServer.makeReady(registry, volumes); + log.info('MOAC is warmed up and ready to 🚀'); + }, warmupSecs * 1000); } main(); diff --git a/csi/moac/test/event_stream_test.js b/csi/moac/test/event_stream_test.js index 0cb4a8d89..1a204aa9f 100644 --- a/csi/moac/test/event_stream_test.js +++ b/csi/moac/test/event_stream_test.js @@ -171,9 +171,6 @@ module.exports = function () { expect(events[i].kind).to.equal('node'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.name).to.equal('node1'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool1'); expect(events[i].kind).to.equal('replica'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.uuid).to.equal('uuid1'); @@ -182,10 +179,13 @@ module.exports = function () { expect(events[i++].object.uuid).to.equal('uuid2'); expect(events[i].kind).to.equal('pool'); expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool2'); + expect(events[i++].object.name).to.equal('pool1'); expect(events[i].kind).to.equal('replica'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.uuid).to.equal('uuid3'); + expect(events[i].kind).to.equal('pool'); + expect(events[i].eventType).to.equal('new'); + expect(events[i++].object.name).to.equal('pool2'); expect(events[i].kind).to.equal('nexus'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.uuid).to.equal('nexus1'); @@ -198,9 +198,6 @@ module.exports = function () { expect(events[i].kind).to.equal('node'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.name).to.equal('node2'); - expect(events[i].kind).to.equal('pool'); - expect(events[i].eventType).to.equal('new'); - expect(events[i++].object.name).to.equal('pool3'); expect(events[i].kind).to.equal('replica'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.uuid).to.equal('uuid4'); @@ -210,6 +207,9 @@ module.exports = function () { expect(events[i].kind).to.equal('replica'); expect(events[i].eventType).to.equal('new'); expect(events[i++].object.uuid).to.equal('uuid6'); + expect(events[i].kind).to.equal('pool'); + expect(events[i].eventType).to.equal('new'); + expect(events[i++].object.name).to.equal('pool3'); expect(events[i].kind).to.equal('node'); expect(events[i].eventType).to.equal('sync'); expect(events[i++].object.name).to.equal('node2'); diff --git a/csi/moac/test/index.js b/csi/moac/test/index.js index d80b36adb..94deebc2d 100644 --- a/csi/moac/test/index.js +++ b/csi/moac/test/index.js @@ -53,12 +53,14 @@ describe('moac', function () { it('start moac process', function (done) { // Starting moac, which includes loading all NPM modules from disk, takes // time when running in docker with FS mounted from non-linux host. - this.timeout(4000); + this.timeout(5000); const child = spawn(path.join(__dirname, '..', 'index.js'), [ '-s', // NATS does not run but just to verify that the option works - '--message-bus=127.0.0.1' + '--message-bus=127.0.0.1', + // shorten the warm up to make the test faster + '--heartbeat-interval=1' ]); let stderr = ''; diff --git a/csi/moac/test/volumes_test.js b/csi/moac/test/volumes_test.js index 4ccb179da..98aab5a4c 100644 --- a/csi/moac/test/volumes_test.js +++ b/csi/moac/test/volumes_test.js @@ -19,8 +19,10 @@ const { Volumes } = require('../volumes'); const { GrpcCode } = require('../grpc_client'); const { shouldFailWith, waitUntil } = require('./utils'); const enums = require('./grpc_enums'); +const sleep = require('sleep-promise'); const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; +const EYE_BLINK_MS = 30; module.exports = function () { let registry, volumes; @@ -399,27 +401,40 @@ module.exports = function () { it('should import a volume and fault it if there are no replicas', async () => { volumes.start(); volume = await volumes.importVolume(UUID, volumeSpec, { size: 40 }); + // give FSA a chance to run + await sleep(EYE_BLINK_MS); expect(volume.state).to.equal('faulted'); expect(Object.keys(volume.replicas)).to.have.lengthOf(0); }); it('should import a volume without nexus', async () => { - const replica = new Replica({ + const replica1 = new Replica({ uuid: UUID, - size: 10, + size: 40, share: 'REPLICA_NONE', uri: `bdev:///${UUID}` }); - replica.pool = { node: node1 }; + replica1.pool = { node: node1 }; + const replica2 = new Replica({ + uuid: UUID, + size: 40, + share: 'REPLICA_NVMF', + uri: `nvmf:///${UUID}` + }); + replica2.pool = { node: node2 }; const getReplicaSetStub = sinon.stub(registry, 'getReplicaSet'); - getReplicaSetStub.returns([replica]); + getReplicaSetStub.returns([replica1, replica2]); volumes.start(); volume = await volumes.importVolume(UUID, volumeSpec, { size: 40 }); + expect(volume.state).to.equal('unknown'); + expect(Object.keys(volume.replicas)).to.have.lengthOf(2); + // give FSA a chance to run + await sleep(EYE_BLINK_MS); expect(volume.nexus).to.be.null(); expect(volume.state).to.equal('healthy'); expect(volume.size).to.equal(40); - expect(volEvents).to.have.lengthOf(3); + expect(volEvents).to.have.lengthOf(4); }); it('should import unpublished volume with nexus', async () => { @@ -450,6 +465,8 @@ module.exports = function () { volumes.start(); volume = await volumes.importVolume(UUID, volumeSpec, { size: 40 }); + // give FSA a chance to run + await sleep(EYE_BLINK_MS); expect(volume.nexus.getUri()).to.be.undefined(); expect(Object.keys(volume.replicas)).to.have.lengthOf(1); expect(Object.values(volume.replicas)[0]).to.equal(replica); diff --git a/csi/moac/volume.ts b/csi/moac/volume.ts index 4dedc7d4b..7ad4abbca 100644 --- a/csi/moac/volume.ts +++ b/csi/moac/volume.ts @@ -209,8 +209,10 @@ export class Volume { this.emitEvent('del'); } - // Trigger the run of FSA. It will either run immediately or if it is already - // running, it will start again when the current run finishes. + // Trigger the run of FSA. It will always run asynchronously to give caller + // a chance to perform other changes to the volume before everything is + // checked by FSA. If it is already running, it will start again when the + // current run finishes. // // Why critical section on fsa? Certain operations done by fsa are async. If // we allow another process to enter fsa before the async operation is done @@ -219,11 +221,13 @@ export class Volume { // done yet). fsa() { if (this.runFsa++ === 0) { - this._fsa().finally(() => { - const runAgain = this.runFsa > 1; - this.runFsa = 0; - if (runAgain) this.fsa(); - }); + setImmediate(() => { + this._fsa().finally(() => { + const runAgain = this.runFsa > 1; + this.runFsa = 0; + if (runAgain) this.fsa(); + }); + }) } } diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs index 205ecfe69..3eb2a7b2e 100644 --- a/mayastor/src/subsys/mbus/registration.rs +++ b/mayastor/src/subsys/mbus/registration.rs @@ -14,7 +14,7 @@ use snafu::Snafu; use std::{env, time::Duration}; /// Mayastor sends registration messages in this interval (kind of heart-beat) -const HB_INTERVAL: Duration = Duration::from_secs(10); +const HB_INTERVAL: Duration = Duration::from_secs(5); /// Errors for pool operations. /// diff --git a/services/node/src/server.rs b/services/node/src/server.rs index 18fc5b135..280976d7a 100644 --- a/services/node/src/server.rs +++ b/services/node/src/server.rs @@ -14,8 +14,8 @@ struct CliArgs { #[structopt(long, short, default_value = "nats://127.0.0.1:4222")] nats: String, /// Deadline for the mayastor instance keep alive registration - /// Default: 20s - #[structopt(long, short, default_value = "20s")] + /// Default: 10s + #[structopt(long, short, default_value = "10s")] deadline: humantime::Duration, } From 8e38605a8e8964af14948d5f99b04a33e1bcfca2 Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Thu, 14 Jan 2021 09:18:49 +0000 Subject: [PATCH 80/85] test: remove unused nix tests The nix tests have not been run for some time. Most of the test cases are now covered by either the Docker Compose tests or the E2E tests - making the nix tests redundant. Some of the rebuild test cases that weren't covered have been rewritten as Docker Compose tests. --- composer/src/lib.rs | 20 +- mayastor/tests/rebuild.rs | 202 ++++++++++++++ nix/test/README.md | 162 ------------ nix/test/basic/fio_nvme_basic.nix | 44 ---- nix/test/basic/mayastor-config.yaml | 17 -- nix/test/child_status/child_status.nix | 93 ------- nix/test/common.nix | 68 ----- nix/test/default-mayastor-config.yaml | 15 -- nix/test/default.nix | 20 -- nix/test/disconnect/disconnect.nix | 57 ---- nix/test/disconnect/mayastor-config.yaml | 14 - nix/test/nvmf/nvmf_distributed.nix | 70 ----- nix/test/nvmf/nvmf_ports.nix | 44 ---- .../mayastorUtils/MayastorSystemUtils.py | 7 - .../pythonLibs/mayastorUtils/NvmfUtils.py | 3 - nix/test/pythonLibs/mayastorUtils/__init__.py | 2 - nix/test/rebuild/README.md | 21 -- nix/test/rebuild/node1-mayastor-config.yaml | 17 -- nix/test/rebuild/node2-mayastor-config.yaml | 17 -- nix/test/rebuild/rebuild.nix | 247 ------------------ 20 files changed, 221 insertions(+), 919 deletions(-) delete mode 100644 nix/test/README.md delete mode 100644 nix/test/basic/fio_nvme_basic.nix delete mode 100644 nix/test/basic/mayastor-config.yaml delete mode 100644 nix/test/child_status/child_status.nix delete mode 100644 nix/test/common.nix delete mode 100644 nix/test/default-mayastor-config.yaml delete mode 100644 nix/test/default.nix delete mode 100644 nix/test/disconnect/disconnect.nix delete mode 100644 nix/test/disconnect/mayastor-config.yaml delete mode 100644 nix/test/nvmf/nvmf_distributed.nix delete mode 100644 nix/test/nvmf/nvmf_ports.nix delete mode 100644 nix/test/pythonLibs/mayastorUtils/MayastorSystemUtils.py delete mode 100644 nix/test/pythonLibs/mayastorUtils/NvmfUtils.py delete mode 100644 nix/test/pythonLibs/mayastorUtils/__init__.py delete mode 100644 nix/test/rebuild/README.md delete mode 100644 nix/test/rebuild/node1-mayastor-config.yaml delete mode 100644 nix/test/rebuild/node2-mayastor-config.yaml delete mode 100644 nix/test/rebuild/rebuild.nix diff --git a/composer/src/lib.rs b/composer/src/lib.rs index fcbe77d0c..66e4aefab 100644 --- a/composer/src/lib.rs +++ b/composer/src/lib.rs @@ -35,7 +35,11 @@ use futures::{StreamExt, TryStreamExt}; use ipnetwork::Ipv4Network; use tonic::transport::Channel; -use bollard::{image::CreateImageOptions, models::ContainerInspectResponse}; +use bollard::{ + image::CreateImageOptions, + models::ContainerInspectResponse, + network::DisconnectNetworkOptions, +}; use mbus_api::TimeoutOptions; use rpc::mayastor::{ bdev_rpc_client::BdevRpcClient, @@ -939,6 +943,20 @@ impl ComposeTest { Ok(()) } + /// disconnect container from the network + pub async fn disconnect(&self, container_name: &str) -> Result<(), Error> { + let id = self.containers.get(container_name).unwrap(); + self.docker + .disconnect_network( + &self.network_id, + DisconnectNetworkOptions { + container: id.0.as_str(), + force: false, + }, + ) + .await + } + /// get the logs from the container. It would be nice to make it implicit /// that is, when you make a rpc call, whatever logs where created due to /// that are returned diff --git a/mayastor/tests/rebuild.rs b/mayastor/tests/rebuild.rs index e7f1aa234..71b4aff6e 100644 --- a/mayastor/tests/rebuild.rs +++ b/mayastor/tests/rebuild.rs @@ -636,6 +636,191 @@ async fn rebuild_with_load() { nvmf_disconnect(child2); } +/// Test rebuild when restarting the source container. +#[tokio::test] +async fn rebuild_restart_src() { + let test = start_infrastructure("rebuild_restart_src").await; + let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let rebuild_dst = &get_share_uri(&ms3); + + // Check a rebuild is started for a newly added child. + add_child(nexus_hdl, rebuild_dst, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + rebuild_dst, + "running", + Duration::from_secs(1), + ) + .await + .unwrap()); + + // Restart the rebuild source container and check that the rebuild fails. + test.restart("ms2") + .await + .expect("Failed to restart rebuild source"); + assert_eq!( + wait_for_successful_rebuild(nexus_hdl, rebuild_dst).await, + false + ); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + + // Check the states of the nexus and children. + // Note: A failed rebuild will not change the state of the source child + // (even if it fails to read from it), but it will fault the destination + // child. + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + let rebuild_src = &get_share_uri(&ms2); + assert_eq!( + get_child_state(nexus_hdl, rebuild_src).await, + ChildState::ChildOnline as i32 + ); + assert_eq!( + get_child_state(nexus_hdl, rebuild_dst).await, + ChildState::ChildFaulted as i32 + ); +} + +/// Test rebuild when restarting the destination container. +#[tokio::test] +async fn rebuild_restart_dst() { + let test = start_infrastructure("rebuild_restart_dst").await; + let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let rebuild_dst = &get_share_uri(&ms3); + + // Check a rebuild is started for a newly added child. + add_child(nexus_hdl, rebuild_dst, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + rebuild_dst, + "running", + Duration::from_secs(1), + ) + .await + .unwrap()); + + // Restart the rebuild destination container and check the rebuild fails. + test.restart("ms3") + .await + .expect("Failed to restart rebuild destination"); + assert_eq!( + wait_for_successful_rebuild(nexus_hdl, rebuild_dst).await, + false + ); + assert_eq!(get_num_rebuilds(nexus_hdl).await, 0); + + // Check the states of the nexus and children. + // Note: A failed rebuild will not change the state of the source child + // (even if it fails to read from it), but it will fault the destination + // child. + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + let rebuild_src = &get_share_uri(&ms2); + assert_eq!( + get_child_state(nexus_hdl, rebuild_src).await, + ChildState::ChildOnline as i32 + ); + assert_eq!( + get_child_state(nexus_hdl, rebuild_dst).await, + ChildState::ChildFaulted as i32 + ); +} + +/// Test rebuild when disconnecting the source container from the network. +#[tokio::test] +async fn rebuild_src_disconnect() { + let test_name = "rebuild_src_disconnect"; + let test = start_infrastructure(test_name).await; + let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let rebuild_dst = &get_share_uri(&ms3); + + // Check a rebuild is started for a newly added child. + add_child(nexus_hdl, rebuild_dst, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + rebuild_dst, + "running", + Duration::from_secs(1), + ) + .await + .unwrap()); + + // Disconnect the rebuild source container from the network and check that + // the rebuild terminates. This requires a large timeout because it takes + // some time for the NVMf subsystem to report the error up. + test.disconnect("ms2") + .await + .expect("Failed to disconnect source container from network"); + assert_eq!( + wait_for_num_rebuilds(nexus_hdl, 0, Duration::from_secs(180)).await, + true + ); + + // Check the states of the nexus and children. + // Note: A failed rebuild will not change the state of the source child + // (even if it fails to read from it), but it will fault the destination + // child. + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + let rebuild_src = &get_share_uri(&ms2); + assert_eq!( + get_child_state(nexus_hdl, rebuild_src).await, + ChildState::ChildOnline as i32 + ); + assert_eq!( + get_child_state(nexus_hdl, rebuild_dst).await, + ChildState::ChildFaulted as i32 + ); +} + +/// Test rebuild when disconnecting the destination container from the +/// network. +#[tokio::test] +async fn rebuild_dst_disconnect() { + let test_name = "rebuild_dst_disconnect"; + let test = start_infrastructure(test_name).await; + let (mut ms1, ms2, ms3) = setup_test(&test, 1).await; + let nexus_hdl = &mut ms1; + let rebuild_dst = &get_share_uri(&ms3); + + // Check a rebuild is started for a newly added child. + add_child(nexus_hdl, rebuild_dst, true).await; + assert!(wait_for_rebuild_state( + nexus_hdl, + rebuild_dst, + "running", + Duration::from_secs(1), + ) + .await + .unwrap()); + + // Disconnect the rebuild destination container from the network and check + // that the rebuild terminates. This requires a large timeout because it + // takes some time for the NVMf subsystem to report the error up. + test.disconnect("ms3") + .await + .expect("Failed to disconnect destination container from network"); + assert_eq!( + wait_for_num_rebuilds(nexus_hdl, 0, Duration::from_secs(180)).await, + true + ); + + // Check the states of the nexus and children. + // Note: A failed rebuild will not change the state of the source child + // (even if it fails to read from it), but it will fault the destination + // child. + check_nexus_state(nexus_hdl, NexusState::NexusDegraded).await; + let rebuild_src = &get_share_uri(&ms2); + assert_eq!( + get_child_state(nexus_hdl, rebuild_src).await, + ChildState::ChildOnline as i32 + ); + assert_eq!( + get_child_state(nexus_hdl, rebuild_dst).await, + ChildState::ChildFaulted as i32 + ); +} + /// Build the infrastructure required to run the tests. async fn start_infrastructure(test_name: &str) -> ComposeTest { Builder::new() @@ -880,6 +1065,23 @@ async fn get_rebuild_progress(hdl: &mut RpcHandle, child: &str) -> Option { } } +/// Wait for the number of rebuilds to reach the desired number. +/// Returns false if a timeout occurs. +async fn wait_for_num_rebuilds( + hdl: &mut RpcHandle, + num_rebuilds: u32, + timeout: Duration, +) -> bool { + let time = std::time::Instant::now(); + while time.elapsed().as_millis() < timeout.as_millis() { + if get_num_rebuilds(hdl).await == num_rebuilds { + return true; + } + std::thread::sleep(Duration::from_millis(10)); + } + false +} + /// Waits on the given rebuild state or times out. /// Returns false if a timeout occurs. async fn wait_for_rebuild_state( diff --git a/nix/test/README.md b/nix/test/README.md deleted file mode 100644 index 6fd4d810f..000000000 --- a/nix/test/README.md +++ /dev/null @@ -1,162 +0,0 @@ -# Summary - -The tests use nix to construct VMs and then runs tests against -these VMs. I'm not sure what a nice way is to layout the tests -themselves, but the thinking is: `` and for example -``, etc..etc - -The sequence of execution should be chosen such that the io tests -(for example) only start running when basic tests have succeeded. - -It is also possible to preserve the state of the VMs and use them in -subsequent testing, but I suggest we leave that for later. - - -TODO: -[ ] We should write some high-level python code that we can reuse in the tests. -[ ] We need to figure out how many tests we want to run within a single run -[ ] We will need more gRPC methods to build more sophisticated tests -[ ] A test where we set up and deploy, k8s first, and then install mayastor would be very nice but for sure should be done last. - - -# Run the test - -In order to run the test: -``` -cd path/to/test -nix-build default.nix -A fio_nvme_basic -``` - -To get the interactive driver (with emacs!) - -``` -nix-build default.nix -A fio_nvme_basic.driver -``` - -and run `./result/bin/` - - -The output shows something like: - -``` -{ - "bdevs": [ - { - "blk_size": 512, - "claimed": true, - "claimed_by": "NVMe-oF Target", - "name": "aio:///dev/vdb", - "num_blocks": 1048576, - "uuid": "00000000-76b6-4fcf-864d-1027d4038756" - } - ] -} - -(0.11 seconds) -should be able to discover the target -initiator: must succeed: nvme discover -a 192.168.0.1 -t tcp -s 8420 -initiator # [ 6.554240] nvme nvme0: queue_size 128 > ctrl sqsize 64, clamping down -initiator # [ 6.554997] nvme nvme0: new ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery", addr 192.168.0.1:8420 -initiator # [ 6.562521] nvme nvme0: Removing ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery" -(0.05 seconds) - -Discovery Log Number of Records 1, Generation counter 4 -=====Discovery Log Entry 0====== -trtype: tcp -adrfam: ipv4 -subtype: nvme subsystem -treq: not required -portid: 0 -trsvcid: 8430 -subnqn: nqn.2019-05.io.openebs:00000000-76b6-4fcf-864d-1027d4038756 -traddr: 192.168.0.1 -sectype: none - -(0.05 seconds) -should be able to connecto to the target -initiator: must succeed: nvme connect-all -a 192.168.0.1 -t tcp -s 8420 -initiator # [ 6.593168] nvme nvme0: queue_size 128 > ctrl sqsize 64, clamping down -initiator # [ 6.593786] nvme nvme0: new ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery", addr 192.168.0.1:8420 -initiator # [ 6.601565] nvme nvme0: Removing ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery" -initiator # [ 6.636751] nvme nvme0: queue_size 128 > ctrl sqsize 64, clamping down -initiator # [ 6.639529] nvme nvme0: creating 1 I/O queues. -initiator # [ 6.640734] nvme nvme0: mapped 1/0/0 default/read/poll queues. -initiator # [ 6.646345] nvme nvme0: new ctrl: NQN "nqn.2019-05.io.openebs:00000000-76b6-4fcf-864d-1027d4038756", addr 192.168.0.1:8430 -(0.08 seconds) - -(0.08 seconds) -should be able to run FIO with verify=crc32 -initiator: must succeed: fio --thread=1 --ioengine=libaio --direct=1 --bs=4k --iodepth=1 --rw=randrw --verify=crc32 --numjobs=1 --group_reporting=1 --runtime=15 --name=job --filename=/dev/nvme0n1 -(15.74 seconds) -job: (g=0): rw=randrw, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=1 -fio-3.20 -Starting 1 thread -Jobs: 1 (f=1): [m(1)][100.0%][r=880KiB/s,w=868KiB/s][r=220,w=217 IOPS][eta 00m:00s] -job: (groupid=0, jobs=1): err= 0: pid=697: Mon Jul 13 21:51:40 2020 - read: IOPS=215, BW=860KiB/s (881kB/s)(12.6MiB/15002msec) - slat (usec): min=29, max=112, avg=38.63, stdev= 7.99 - clat (usec): min=1520, max=2976, avg=2230.59, stdev=73.00 - lat (usec): min=1629, max=3025, avg=2277.39, stdev=70.91 - clat percentiles (usec): - | 1.00th=[ 2147], 5.00th=[ 2180], 10.00th=[ 2180], 20.00th=[ 2212], - | 30.00th=[ 2212], 40.00th=[ 2212], 50.00th=[ 2212], 60.00th=[ 2245], - | 70.00th=[ 2245], 80.00th=[ 2245], 90.00th=[ 2278], 95.00th=[ 2278], - | 99.00th=[ 2638], 99.50th=[ 2671], 99.90th=[ 2868], 99.95th=[ 2868], - | 99.99th=[ 2966] - bw ( KiB/s): min= 768, max= 968, per=99.53%, avg=855.93, stdev=55.26, samples=29 - iops : min= 192, max= 242, avg=213.97, stdev=13.82, samples=29 - write: IOPS=222, BW=891KiB/s (912kB/s)(13.0MiB/15002msec); 0 zone resets - slat (usec): min=43, max=137, avg=56.50, stdev=10.74 - clat (usec): min=1585, max=5462, avg=2202.96, stdev=84.77 - lat (usec): min=1648, max=5547, avg=2269.14, stdev=83.71 - clat percentiles (usec): - | 1.00th=[ 2114], 5.00th=[ 2147], 10.00th=[ 2180], 20.00th=[ 2180], - | 30.00th=[ 2180], 40.00th=[ 2180], 50.00th=[ 2212], 60.00th=[ 2212], - | 70.00th=[ 2212], 80.00th=[ 2245], 90.00th=[ 2245], 95.00th=[ 2278], - | 99.00th=[ 2311], 99.50th=[ 2343], 99.90th=[ 2835], 99.95th=[ 2900], - | 99.99th=[ 5473] - bw ( KiB/s): min= 784, max= 984, per=100.00%, avg=897.03, stdev=55.05, samples=29 - iops : min= 196, max= 246, avg=224.24, stdev=13.77, samples=29 - lat (msec) : 2=0.79%, 4=99.19%, 10=0.02% - cpu : usr=0.25%, sys=2.07%, ctx=13174, majf=0, minf=80 - IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% - submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% - complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% - issued rwts: total=3226,3340,0,0 short=0,0,0,0 dropped=0,0,0,0 - latency : target=0, window=0, percentile=100.00%, depth=1 - -Run status group 0 (all jobs): - READ: bw=860KiB/s (881kB/s), 860KiB/s-860KiB/s (881kB/s-881kB/s), io=12.6MiB (13.2MB), run=15002-15002msec - WRITE: bw=891KiB/s (912kB/s), 891KiB/s-891KiB/s (912kB/s-912kB/s), io=13.0MiB (13.7MB), run=15002-15002msec - -Disk stats (read/write): - nvme0n1: ios=3257/3317, merge=0/0, ticks=7362/7448, in_queue=13097, util=44.71% - -(15.74 seconds) -should be able to disconnect from the target -initiator: must succeed: nvme disconnect-all -initiator # [ 22.396334] nvme nvme0: Removing ctrl: NQN "nqn.2019-05.io.openebs:00000000-76b6-4fcf-864d-1027d4038756" -(0.02 seconds) - -(0.02 seconds) -(23.04 seconds) -test script finished in 23.06s -cleaning up -killing initiator (pid 9) -killing target (pid 25) -(0.00 seconds) -/nix/store/0024692x0a7mpbqkzblnyam97w6y77ja-vm-test-run-fio_against_nvmf_target - -``` - - # Rerun the tests - - Once the test has completed succesfully and you want to re-run it, - the output has to be destroyed. Simple way to do this is: - - ``` - nix-store --delete ./result --ignore-liveness - ``` - - Running the driver manually should always be possible regardless - of result directory \ No newline at end of file diff --git a/nix/test/basic/fio_nvme_basic.nix b/nix/test/basic/fio_nvme_basic.nix deleted file mode 100644 index 30e6be3fd..000000000 --- a/nix/test/basic/fio_nvme_basic.nix +++ /dev/null @@ -1,44 +0,0 @@ -{ pkgs, lib, ... }: -let - targetIp = "192.168.0.1"; - initiatorIp = "192.168.0.2"; - common = import ../common.nix { inherit pkgs; }; -in -{ - name = "fio_against_nvmf_target"; - meta = with pkgs.stdenv.lib.maintainers; { - maintainers = [ gila ]; - }; - - nodes = { - target = common.defaultMayastorNode { ip = targetIp; mayastorConfigYaml = ./mayastor-config.yaml; }; - initiator = common.defaultMayastorNode { ip = initiatorIp; }; - }; - - testScript = '' - ${common.importMayastorUtils} - - start_all() - mayastorUtils.wait_for_mayastor_all(machines) - - with subtest("the bdev of the target should be listed"): - print(target.succeed("mayastor-client -a ${targetIp} bdev list")) - - with subtest("should be able to discover the target"): - print(initiator.succeed("nvme discover -a ${targetIp} -t tcp -s 8420")) - - with subtest("should be able to connect to the target"): - print(initiator.succeed("nvme connect-all -a ${targetIp} -t tcp -s 8420")) - - with subtest("should be able to run FIO with verify=crc32"): - print( - initiator.succeed( - "fio --thread=1 --ioengine=libaio --direct=1 --bs=4k --iodepth=1 --rw=randrw --verify=crc32 --numjobs=1 --group_reporting=1 --runtime=15 --name=job --filename=" - + "/dev/nvme0n1" - ) - ) - - with subtest("should be able to disconnect from the target"): - print(initiator.succeed("nvme disconnect-all")) - ''; -} diff --git a/nix/test/basic/mayastor-config.yaml b/nix/test/basic/mayastor-config.yaml deleted file mode 100644 index e247623d7..000000000 --- a/nix/test/basic/mayastor-config.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -source: ~ -nexus_opts: - nvmf_enable: true - nvmf_discovery_enable: true - nvmf_nexus_port: 8420 - nvmf_replica_port: 8430 - iscsi_enable: false -err_store_opts: - err_store_size: 256 - enable_err_store: true -base_bdevs: - - uri: "aio:///dev/vdb" - uuid: 00000000-76b6-4fcf-864d-1027d4038756 -nexus_bdevs: ~ -pools: ~ -implicit_share_base: true diff --git a/nix/test/child_status/child_status.nix b/nix/test/child_status/child_status.nix deleted file mode 100644 index 519a9ce04..000000000 --- a/nix/test/child_status/child_status.nix +++ /dev/null @@ -1,93 +0,0 @@ -{ pkgs, lib, ... }: -let - node_ip = "192.168.0.1"; - nexus_uuid = "19b98ac8-c1ea-11ea-8e3b-d74f5d324a22"; - child_1 = "malloc:///malloc0?blk_size=512&size_mb=20"; - child_2 = "malloc:///malloc1?blk_size=512&size_mb=20"; - common = import ../common.nix { inherit pkgs; }; -in -{ - name = "child_status"; - - nodes = { - node = common.defaultMayastorNode { ip = node_ip; childStatusConfigYaml = "/tmp/child-status.yaml"; }; - }; - - testScript = '' - ${common.importMayastorUtils} - - from time import sleep - - - def init(): - start_all() - mayastorUtils.wait_for_mayastor_all(machines) - node.succeed( - "mayastor-client -a ${node_ip} nexus create ${nexus_uuid} 20MiB '${child_1}'" - ) - - - def get_nexus_state(): - result = node.succeed("mayastor-client -a ${node_ip} nexus list").split() - return result[7] - - - def check_nexus_state(expected_state): - state = get_nexus_state() - assert state == expected_state, "Nexus state {}, expected, {}".format( - state, expected_state - ) - - - def get_children_states(): - result = node.succeed( - "mayastor-client -a ${node_ip} nexus children ${nexus_uuid}" - ).split() - child1_state = result[3] - child2_state = result[5] - return [child1_state, child2_state] - - - def check_children_states(child1_expected_state, child2_expected_state): - states = get_children_states() - assert states[0] == child1_expected_state, "Child 1 state {}, expected {}".format( - states[0], child1_expected_state - ) - assert states[1] == child2_expected_state, "Child 2 state {}, expected {}".format( - states[1], child2_expected_state - ) - - - init() - - with subtest("rebuild on mayastor restart"): - node.succeed( - "mayastor-client -a ${node_ip} nexus add ${nexus_uuid} '${child_2}' true" - ) - check_nexus_state("degraded") - check_children_states("online", "degraded") - - # Restart mayastor service - node.systemctl("restart mayastor") - sleep(1) - - # Rebuild should have been completed and everything should be healthy - check_nexus_state("online") - check_children_states("online", "online") - - with subtest("fault child"): - node.succeed( - "mayastor-client -a ${node_ip} nexus child fault ${nexus_uuid} '${child_2}'" - ) - check_nexus_state("degraded") - check_children_states("online", "faulted") - - # Restart mayastor service - node.systemctl("restart mayastor") - sleep(1) - - # The faulted child should remain faulted causing the nexus to be in a degraded state - check_nexus_state("degraded") - check_children_states("online", "faulted") - ''; -} diff --git a/nix/test/common.nix b/nix/test/common.nix deleted file mode 100644 index ca361d334..000000000 --- a/nix/test/common.nix +++ /dev/null @@ -1,68 +0,0 @@ -{ pkgs, ... }: -{ - importMayastorUtils = '' - import sys - - sys.path.insert(0, "${./pythonLibs}") - import mayastorUtils - ''; - - # We provide sensible defaults for these fields, so that tests can be decluttered. - # TODO Find a way to have the default IP just be DHCP - defaultMayastorNode = - { ip ? "192.168.0.1" - , mayastorConfigYaml ? ./default-mayastor-config.yaml - , childStatusConfigYaml ? "" - }: { config, lib, ... }: { - - virtualisation = { - memorySize = 4096; - emptyDiskImages = [ 512 ]; - vlans = [ 1 ]; - }; - - boot = { - kernel.sysctl = { - "vm.nr_hugepages" = 512; - }; - kernelModules = [ - "nvme-tcp" - ]; - }; - - networking.firewall.enable = false; - networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [ - { address = ip; prefixLength = 24; } - ]; - - environment = { - systemPackages = with pkgs; [ - mayastor - nvme-cli - fio - ]; - - etc."mayastor-config.yaml" = { - mode = "0664"; - source = mayastorConfigYaml; - }; - }; - - systemd.services.mayastor = { - enable = true; - wantedBy = [ "multi-user.target" ]; - after = [ "network.target" ]; - description = "Mayastor"; - environment = { - MY_POD_IP = ip; - }; - - serviceConfig = { - ExecStart = - if childStatusConfigYaml == "" - then "${pkgs.mayastor}/bin/mayastor -g 0.0.0.0:10124 -y /etc/mayastor-config.yaml" - else "${pkgs.mayastor}/bin/mayastor -g 0.0.0.0:10124 -y /etc/mayastor-config.yaml -C ${childStatusConfigYaml}"; - }; - }; - }; -} diff --git a/nix/test/default-mayastor-config.yaml b/nix/test/default-mayastor-config.yaml deleted file mode 100644 index d3675c84f..000000000 --- a/nix/test/default-mayastor-config.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -source: ~ -nexus_opts: - nvmf_enable: true - nvmf_discovery_enable: true - nvmf_nexus_port: 8420 - nvmf_replica_port: 8430 - iscsi_enable: false -err_store_opts: - err_store_size: 256 - enable_err_store: true -base_bdevs: ~ -nexus_bdevs: ~ -pools: ~ -implicit_share_base: false diff --git a/nix/test/default.nix b/nix/test/default.nix deleted file mode 100644 index 1651dfded..000000000 --- a/nix/test/default.nix +++ /dev/null @@ -1,20 +0,0 @@ -# -# TODO: not sure if we need to import the sources -# -let - sources = import ./../../nix/sources.nix; - pkgs = import sources.nixpkgs { - overlays = [ - (_: _: { inherit sources; }) - (import ./../../nix/mayastor-overlay.nix) - ]; - }; -in -{ - fio_nvme_basic = pkgs.nixosTest ./basic/fio_nvme_basic.nix; - nvmf_ports = pkgs.nixosTest ./nvmf/nvmf_ports.nix; - nvmf_distributed = pkgs.nixosTest ./nvmf/nvmf_distributed.nix; - rebuild = pkgs.nixosTest ./rebuild/rebuild.nix; - disconnect = pkgs.nixosTest ./disconnect/disconnect.nix; - child_status = pkgs.nixosTest ./child_status/child_status.nix; -} diff --git a/nix/test/disconnect/disconnect.nix b/nix/test/disconnect/disconnect.nix deleted file mode 100644 index d2cafa52e..000000000 --- a/nix/test/disconnect/disconnect.nix +++ /dev/null @@ -1,57 +0,0 @@ -{ pkgs, lib, ... }: -let - targetIp = "192.168.0.1"; - initiatorIp = "192.168.0.2"; - common = import ../common.nix { inherit pkgs; }; -in -{ - name = "fio_against_nvmf_target"; - - nodes = { - target = common.defaultMayastorNode { ip = targetIp; mayastorConfigYaml = ./mayastor-config.yaml; }; - initiator = common.defaultMayastorNode { ip = initiatorIp; }; - }; - - testScript = '' - ${common.importMayastorUtils} - - from time import sleep - - start_all() - mayastorUtils.wait_for_mayastor_all(machines) - - with subtest("the bdev of the target should be listed"): - print(target.succeed("mayastor-client -a ${targetIp} bdev list")) - - with subtest("should be able to discover the target"): - print(initiator.succeed("nvme discover -a ${targetIp} -t tcp -s 8420")) - - with subtest("should be able to connect to the target"): - print(initiator.succeed("nvme connect-all -a ${targetIp} -t tcp -s 8420")) - - # First verify IO passes over the connection - with subtest("should be able to run FIO with verify=crc32"): - print( - initiator.succeed( - "fio --thread=1 --ioengine=libaio --direct=1 --bs=4k --iodepth=1 --rw=randrw --verify=crc32 --numjobs=1 --group_reporting=1 --runtime=15 --name=job --filename=" - + "/dev/nvme0n1" - ) - ) - - # Create a network fault to the target and expect fio to hang - with subtest("FIO should fail to complete due to the network disconnection"): - # break the network connection - target.block() - print( - initiator.fail( - "timeout -k 1s 60s fio --thread=1 --ioengine=libaio --direct=1 --bs=4k --iodepth=1 --rw=randrw --verify=crc32 --numjobs=1 --group_reporting=1 --runtime=15 --name=job --filename=" - + "/dev/nvme0n1" - ) - ) - # reconnect the network to allow nvme disconnection - target.unblock() - - with subtest("should be able to disconnect from the target"): - print(initiator.succeed("nvme disconnect-all")) - ''; -} diff --git a/nix/test/disconnect/mayastor-config.yaml b/nix/test/disconnect/mayastor-config.yaml deleted file mode 100644 index ddef7241c..000000000 --- a/nix/test/disconnect/mayastor-config.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -source: ~ -nexus_opts: - nvmf_enable: true - nvmf_discovery_enable: true - nvmf_nexus_port: 8420 - nvmf_replica_port: 8430 - iscsi_enable: false -base_bdevs: - - uri: "aio:///dev/vdb" - uuid: 9f1a9204-fb89-47f2-9a99-491800999999 -nexus_bdevs: ~ -pools: ~ -implicit_share_base: true diff --git a/nix/test/nvmf/nvmf_distributed.nix b/nix/test/nvmf/nvmf_distributed.nix deleted file mode 100644 index 487e781e8..000000000 --- a/nix/test/nvmf/nvmf_distributed.nix +++ /dev/null @@ -1,70 +0,0 @@ -{ pkgs, lib, ... }: -let - backendIp = "192.168.0.1"; - targetIp = "192.168.0.2"; - initiatorIp = "192.168.0.3"; - common = import ../common.nix { inherit pkgs; }; -in -{ - name = "fio_against_nvmf_nexus_with_replica"; - meta = with pkgs.stdenv.lib.maintainers; { - maintainers = [ tjoshum ]; - }; - - nodes = { - backend = common.defaultMayastorNode {}; - target = common.defaultMayastorNode { ip = targetIp; }; - initiator = common.defaultMayastorNode { ip = initiatorIp; }; - }; - - testScript = '' - ${common.importMayastorUtils} - - start_all() - mayastorUtils.wait_for_mayastor_all(machines) - - replicaId = "5b5b04ea-c1e3-11ea-bd82-a7d5cb04b391" - with subtest("setup replica"): - print(backend.succeed("mayastor-client pool create pool1 /dev/vdb")) - print( - backend.succeed( - "mayastor-client replica create --protocol nvmf --size 64MiB pool1 " - + replicaId - ) - ) - - with subtest("connect nexus to replica"): - print( - target.succeed( - "mayastor-client nexus create 19b98ac8-c1ea-11ea-8e3b-d74f5d324a22 64MiB nvmf://${backendIp}:" - + mayastorUtils.DEFAULT_REPLICA_PORT - + "/nqn.2019-05.io.openebs:" - + replicaId - ) - ) - print( - target.succeed( - "mayastor-client nexus publish -p nvmf 19b98ac8-c1ea-11ea-8e3b-d74f5d324a22" - ) - ) - - with subtest("should be able to connect to the target"): - print( - initiator.succeed( - "nvme connect-all -a ${targetIp} -t tcp -s " - + mayastorUtils.DEFAULT_NEXUS_PORT - ) - ) - - with subtest("should be able to run FIO with verify=crc32"): - print( - initiator.succeed( - "fio --thread=1 --ioengine=libaio --direct=1 --bs=4k --iodepth=1 --rw=randrw --verify=crc32 --numjobs=1 --group_reporting=1 --runtime=15 --name=job --filename=" - + "/dev/nvme0n1" - ) - ) - - with subtest("should be able to disconnect from the target"): - print(initiator.succeed("nvme disconnect-all")) - ''; -} diff --git a/nix/test/nvmf/nvmf_ports.nix b/nix/test/nvmf/nvmf_ports.nix deleted file mode 100644 index 182a05a7d..000000000 --- a/nix/test/nvmf/nvmf_ports.nix +++ /dev/null @@ -1,44 +0,0 @@ -{ pkgs, lib, ... }: -let - backendIp = "192.168.0.1"; - targetIp = "192.168.0.2"; - initiatorIp = "192.168.0.3"; - common = import ../common.nix { inherit pkgs; }; -in -{ - name = "nvmf_against_replica_and_nexus_ports"; - meta = with pkgs.stdenv.lib.maintainers; { - maintainers = [ tjoshum ]; - }; - - nodes = { - backend = common.defaultMayastorNode { ip = backendIp; }; - target = common.defaultMayastorNode { ip = targetIp; }; - initiator = common.defaultMayastorNode { ip = initiatorIp; }; - }; - - testScript = '' - ${common.importMayastorUtils} - - start_all() - mayastorUtils.wait_for_mayastor_all(machines) - - replicaId = "5b5b04ea-c1e3-11ea-bd82-a7d5cb04b391" - print(backend.succeed("mayastor-client pool create pool1 /dev/vdb")) - print( - backend.succeed( - "mayastor-client replica create --protocol nvmf --size 64MiB pool1 " + replicaId - ) - ) - - with subtest("discover replica over replica port"): - assert mayastorUtils.subsystem_is_discoverable( - backend, "${backendIp}", mayastorUtils.DEFAULT_REPLICA_PORT, replicaId - ) - - with subtest("discover replica over nexus port"): - assert mayastorUtils.subsystem_is_discoverable( - backend, "${backendIp}", mayastorUtils.DEFAULT_NEXUS_PORT, replicaId - ) - ''; -} diff --git a/nix/test/pythonLibs/mayastorUtils/MayastorSystemUtils.py b/nix/test/pythonLibs/mayastorUtils/MayastorSystemUtils.py deleted file mode 100644 index 5c36b11c3..000000000 --- a/nix/test/pythonLibs/mayastorUtils/MayastorSystemUtils.py +++ /dev/null @@ -1,7 +0,0 @@ -DEFAULT_NEXUS_PORT = "8420" -DEFAULT_REPLICA_PORT = "8430" - -def wait_for_mayastor_all(machines): - for node in machines: - node.wait_for_unit("multi-user.target") - node.wait_for_open_port(10124) diff --git a/nix/test/pythonLibs/mayastorUtils/NvmfUtils.py b/nix/test/pythonLibs/mayastorUtils/NvmfUtils.py deleted file mode 100644 index 1ec48de5a..000000000 --- a/nix/test/pythonLibs/mayastorUtils/NvmfUtils.py +++ /dev/null @@ -1,3 +0,0 @@ -def subsystem_is_discoverable(host, ip, port, subsys): - discoveryResponse = host.succeed("nvme discover -a " + ip + " -t tcp -s " + port) - return subsys in discoveryResponse diff --git a/nix/test/pythonLibs/mayastorUtils/__init__.py b/nix/test/pythonLibs/mayastorUtils/__init__.py deleted file mode 100644 index 27151c94d..000000000 --- a/nix/test/pythonLibs/mayastorUtils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .MayastorSystemUtils import * -from .NvmfUtils import * diff --git a/nix/test/rebuild/README.md b/nix/test/rebuild/README.md deleted file mode 100644 index 55930b1ef..000000000 --- a/nix/test/rebuild/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Summary - -Nix tests for rebuild functionality - -# Run the test - -In order to run the test: -``` -cd path/to/test -nix-build default.nix -A rebuild -``` - - - # Rerun the tests - - Once the test has completed succesfully and you want to re-run it, - the output has to be destroyed. Simple way to do this is: - - ``` - nix-store --delete ./result --ignore-liveness - ``` diff --git a/nix/test/rebuild/node1-mayastor-config.yaml b/nix/test/rebuild/node1-mayastor-config.yaml deleted file mode 100644 index 0c2ca156f..000000000 --- a/nix/test/rebuild/node1-mayastor-config.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -source: ~ -nexus_opts: - nvmf_enable: true - nvmf_discovery_enable: true - nvmf_nexus_port: 8420 - nvmf_replica_port: 8430 - iscsi_enable: false -err_store_opts: - err_store_size: 256 - enable_err_store: true -pools: - - name: "pool1" - disks: - - "aio:///dev/vdb?blk_size=4096" - replicas: [] -implicit_share_base: true diff --git a/nix/test/rebuild/node2-mayastor-config.yaml b/nix/test/rebuild/node2-mayastor-config.yaml deleted file mode 100644 index 297ce1a48..000000000 --- a/nix/test/rebuild/node2-mayastor-config.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -source: ~ -nexus_opts: - nvmf_enable: true - nvmf_discovery_enable: true - nvmf_nexus_port: 8420 - nvmf_replica_port: 8430 - iscsi_enable: false -err_store_opts: - err_store_size: 256 - enable_err_store: true -pools: - - name: "pool2" - disks: - - "aio:///dev/vdb?blk_size=4096" - replicas: [] -implicit_share_base: true diff --git a/nix/test/rebuild/rebuild.nix b/nix/test/rebuild/rebuild.nix deleted file mode 100644 index 34bc7270b..000000000 --- a/nix/test/rebuild/rebuild.nix +++ /dev/null @@ -1,247 +0,0 @@ -{ pkgs, lib, ... }: -let - node1ip = "192.168.0.1"; - node2ip = "192.168.0.2"; - nexus_uuid = "19b98ac8-c1ea-11ea-8e3b-d74f5d324a22"; - replica1_uuid = "9a9843db-f715-4f52-8aa4-119f5df3d05d"; - replica2_uuid = "9a9843db-f715-4f52-8aa4-119f5df3d06e"; - common = import ../common.nix { inherit pkgs; }; -in -{ - name = "rebuild"; - meta = with pkgs.stdenv.lib.maintainers; { - maintainers = [ paulyoong ]; - }; - - nodes = { - node1 = common.defaultMayastorNode { ip = node1ip; mayastorConfigYaml = ./node1-mayastor-config.yaml; }; - node2 = common.defaultMayastorNode { ip = node2ip; mayastorConfigYaml = ./node2-mayastor-config.yaml; }; - }; - - testScript = '' - ${common.importMayastorUtils} - - from time import sleep - - - def get_replica_states(): - result = node1.succeed( - "mayastor-client -a ${node1ip} nexus children ${nexus_uuid}" - ).split() - replica1_state = result[3] - replica2_state = result[5] - return [replica1_state, replica2_state] - - - def get_replica1_uri(): - result = node1.succeed("mayastor-client -a ${node1ip} replica list") - return "bdev:///" + result.split("bdev:///")[1] - - - def get_replica2_uri(): - result = node2.succeed("mayastor-client -a ${node2ip} replica list") - print(result) - return "nvmf://" + result.split("nvmf://")[1] - - - def get_num_rebuilds(): - result = node1.succeed("mayastor-client -a ${node1ip} nexus list").split() - # Number of rebuilds is the last entry - return int(result[len(result) - 1]) - - - def get_rebuild_progress_percentage(): - progress = exec_rebuild_operation("progress", get_replica2_uri()).split("%")[0] - return int(progress) - - - def exec_rebuild_operation(operation, child_uri): - return node1.succeed( - "mayastor-client -a ${node1ip} rebuild {} ${nexus_uuid} '{}'".format( - operation, child_uri - ) - ) - - - def get_rebuild_state(child_uri): - return exec_rebuild_operation("state", child_uri).strip().lower() - - - def startup(): - start_all() - mayastorUtils.wait_for_mayastor_all(machines) - - # Create replicas on nodes - node1.succeed( - "mayastor-client -a ${node1ip} replica create pool1 ${replica1_uuid} --size 100MiB --protocol none" - ) - node2.succeed( - "mayastor-client -a ${node2ip} replica create pool2 ${replica2_uuid} --size 100MiB --protocol nvmf" - ) - - # Create nexus on node 1 - node1.succeed( - "mayastor-client -a ${node1ip} nexus create ${nexus_uuid} 100MiB '{}'".format( - get_replica1_uri() - ) - ) - # Add a second child to nexus and don't start rebuilding - node1.succeed( - "mayastor-client -a ${node1ip} nexus add ${nexus_uuid} '{}' true".format( - get_replica2_uri() - ) - ) - - - def teardown(): - # Destroy nexus - node1.succeed( - "mayastor-client -a ${node1ip} nexus destroy ${nexus_uuid}" - ) - # Delete replicas - node1.succeed( - "mayastor-client -a ${node1ip} replica destroy ${replica1_uuid}" - ) - node2.succeed( - "mayastor-client -a ${node2ip} replica destroy ${replica2_uuid}" - ) - - node1.shutdown() - node2.shutdown() - - - def new_env(): - teardown() - startup() - - - """ - Test cases - """ - - startup() - replica2_uri = get_replica2_uri() - - with subtest("start a rebuild"): - exec_rebuild_operation("start", replica2_uri) - rebuild_state = get_rebuild_state(replica2_uri) - expected_state = "running" - assert rebuild_state == expected_state, "Rebuild state {}, expected {}".format( - rebuild_state, expected_state - ) - - with subtest("pause a rebuild"): - exec_rebuild_operation("pause", replica2_uri) - - # Wait for the rebuild to pause. - # We retry because this may take a little time (the rebuild must complete outstanding I/Os before becoming paused). - retries = 5 - while get_rebuild_state(replica2_uri) != "paused" and retries > 0: - sleep(1) - retries -= 1 - - rebuild_state = get_rebuild_state(replica2_uri) - expected_state = "paused" - assert rebuild_state == expected_state, "Rebuild state {}, expected {}".format( - rebuild_state, expected_state - ) - - with subtest("resume a rebuild"): - exec_rebuild_operation("resume", replica2_uri) - rebuild_state = get_rebuild_state(replica2_uri) - expected_state = "running" - assert rebuild_state == expected_state, "Rebuild state {}, expected {}".format( - rebuild_state, expected_state - ) - - with subtest("get number of rebuilds"): - num_rebuilds = get_num_rebuilds() - expected_num_rebuilds = 1 - assert ( - num_rebuilds == expected_num_rebuilds - ), "Number of rebuilds {}, expected {}".format(num_rebuilds, expected_num_rebuilds) - - with subtest("network fault"): - # Wait for some rebuild progress - while get_rebuild_progress_percentage() < 5: - sleep(1) - - # Create a transient network fault on the rebuild source. - # We sleep between blocking and unblocking to ensure the - # network fault has time to take effect. - node1.block() - sleep(2) - node1.unblock() - - # Wait for the rebuild job to terminate. - # We retry a number of times as this may take a while. - retries = 5 - while get_num_rebuilds() > 0 and retries > 0: - sleep(1) - retries -= 1 - - # Expect the rebuild job to have been terminated - num_rebuilds = get_num_rebuilds() - expected_num_rebuilds = 0 - assert ( - num_rebuilds == expected_num_rebuilds - ), "Number of rebuilds {}, expected {}".format(num_rebuilds, expected_num_rebuilds) - - states = get_replica_states() - expected_replica1_state = "online" - assert ( - states[0] == expected_replica1_state - ), "Replica 1 has state {}, expected {}".format(states[0], expected_replica1_state) - - expected_replica2_state = "faulted" - assert ( - states[1] == expected_replica2_state - ), "Replica 2 has state {}, expected {}".format(states[1], expected_replica2_state) - - # Create a fresh environment for the subsequent tests - new_env() - replica2_uri = get_replica2_uri() - - with subtest("crash rebuild destination node"): - # Start rebuild - exec_rebuild_operation("start", replica2_uri) - rebuild_state = get_rebuild_state(replica2_uri) - expected_state = "running" - assert rebuild_state == expected_state, "Rebuild state {}, expected {}".format( - rebuild_state, expected_state - ) - - # Wait for some rebuild progress - while get_rebuild_progress_percentage() < 5: - sleep(1) - - # Crash and restart destination node - node2.crash() - node2.start() - node2.wait_for_unit("multi-user.target") - - # Wait for the rebuild job to terminate. - # We retry a number of times as this may take a while. - retries = 5 - while get_num_rebuilds() > 0 and retries > 0: - sleep(1) - retries -= 1 - - num_rebuilds = get_num_rebuilds() - expected_num_rebuilds = 0 - assert ( - num_rebuilds == expected_num_rebuilds - ), "Number of rebuilds {}, expected {}".format(num_rebuilds, expected_num_rebuilds) - - states = get_replica_states() - expected_replica1_state = "online" - assert ( - states[0] == expected_replica1_state - ), "Replica 1 has state {}, expected {}".format(states[0], expected_replica1_state) - - expected_replica2_state = "faulted" - assert ( - states[1] == expected_replica2_state - ), "Replica 2 has state {}, expected {}".format(states[1], expected_replica2_state) - ''; -} From 8510fb5cb469c5b46e9ee4ec3933a1e706d5e104 Mon Sep 17 00:00:00 2001 From: chriswldenyer Date: Mon, 11 Jan 2021 08:38:08 +0000 Subject: [PATCH 81/85] fix: avoid panic with missing bdev When calling child_retire() and bdev_destroy returns an error, log the error instead of panicking. The bdev may be missing due to a simultaneous bdev removal via the control plane. A further change will introduce better synchronization to avoid this condition. --- mayastor/src/bdev/nexus/nexus_io.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/mayastor/src/bdev/nexus/nexus_io.rs b/mayastor/src/bdev/nexus/nexus_io.rs index 2b110ea7f..041c4ad87 100644 --- a/mayastor/src/bdev/nexus/nexus_io.rs +++ b/mayastor/src/bdev/nexus/nexus_io.rs @@ -337,7 +337,16 @@ impl Bio { nexus.pause().await.unwrap(); nexus.reconfigure(DREvent::ChildFault).await; //nexus.remove_child(&uri).await.unwrap(); - bdev_destroy(&uri).await.unwrap(); + + // Note, an error can occur here if a separate task, + // e.g. grpc request is also deleting the child, + // in which case the bdev may no longer exist at + // this point. To be addressed by CAS-632 to + // improve synchronization. + if let Err(err) = bdev_destroy(&uri).await { + error!("{} destroying bdev {}", err, uri) + } + nexus.resume().await.unwrap(); if nexus.status() == NexusStatus::Faulted { error!(":{} has no children left... ", nexus); From 82a677ae100bf93e1ef979f96a4e461358a4917f Mon Sep 17 00:00:00 2001 From: Paul Yoong Date: Mon, 18 Jan 2021 10:33:22 +0000 Subject: [PATCH 82/85] test(rebuild): disable test Disable the rebuild_dst_disconnect test. This test is occasionally failing on CI/CD even with a suitably high timeout time. When this occurs an error message is output indicating that we failed to get IO channel because we were probably low on memory. A ticket will be raised to investigate this fully and re-enable the test. --- mayastor/tests/rebuild.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mayastor/tests/rebuild.rs b/mayastor/tests/rebuild.rs index 71b4aff6e..49c33af8c 100644 --- a/mayastor/tests/rebuild.rs +++ b/mayastor/tests/rebuild.rs @@ -776,6 +776,7 @@ async fn rebuild_src_disconnect() { /// Test rebuild when disconnecting the destination container from the /// network. #[tokio::test] +#[ignore] async fn rebuild_dst_disconnect() { let test_name = "rebuild_dst_disconnect"; let test = start_infrastructure(test_name).await; From c3f846d1fb2cd809bdee0f87def8b14ba9730f16 Mon Sep 17 00:00:00 2001 From: Blaise Dias Date: Fri, 18 Dec 2020 18:40:00 +0000 Subject: [PATCH 83/85] ci: run CSI e2e tests in CI/CD pipeline CAS-577 Move tests in csi-e2e under e2e refactor go.mod, directory locations. refactor tests to use newer go modules. Disable test with reclaimPolicy = retain, see CAS-566 Bump install timeout for MOAC to 360s from 180s Delete MSV and MSN CRDs Disable the check for pod restart. The check runs a shell script and changes directories. Works locally when run from the root of the repo, but fails when run on CI. Add general purpose cleanup function Generate junit xml reports Add --logs option to e2e script Use alpine fio image Add utiility functions AfterEachCheck for resource leakage AfterSuiteCleanup to cleanup resources, and invoke in TeardownTestEnv CleanupPVCs to remove PVCs and associated resources without asserting for use in AfterEach clauses Use AfterEach clauses to check for resource leakage, in basic volume IO and pvc stress tests In uninstall test Delete all pods in default namespace Delete all pvcs Then uninstall mayastor This is more likely to yield a reusable cluster --- scripts/e2e-cluster-dump.sh | 55 +- scripts/e2e-test.sh | 133 ++- scripts/e2e_check_pod_restarts.sh | 2 + test/csi-e2e/go.mod | 62 -- test/csi-e2e/go.sum | 861 ------------------ test/csi-e2e/runtest.sh | 8 - .../basic_volume_io/basic_volume_io_test.go | 25 +- test/e2e/common/test.go | 3 +- test/e2e/common/util.go | 227 ++++- test/{csi-e2e => e2e/csi}/README.md | 4 +- .../csi}/check_driver_pods_restart.sh | 0 test/{csi-e2e => e2e/csi}/driver/driver.go | 0 .../csi}/driver/mayastor_driver.go | 0 .../csi}/dynamic_provisioning_test.go | 54 +- test/{csi-e2e => e2e/csi}/e2e_suite_test.go | 19 +- ...namically_provisioned_cmd_volume_tester.go | 2 +- ...cally_provisioned_collocated_pod_tester.go | 2 +- ...namically_provisioned_delete_pod_tester.go | 2 +- ...ically_provisioned_pod_with_multiple_pv.go | 2 +- ...lly_provisioned_read_only_volume_tester.go | 2 +- ...cally_provisioned_reclaim_policy_tester.go | 2 +- test/{csi-e2e => e2e/csi}/testsuites/specs.go | 2 +- .../csi}/testsuites/testsuites.go | 0 test/e2e/go.mod | 64 +- test/e2e/go.sum | 438 +++++++++ test/e2e/install/install_test.go | 16 +- .../pvc_stress_fio/pvc_stress_fio_test.go | 21 +- .../replica_disconnection_test.go | 9 +- .../replica_pod_remove_test.go | 14 +- .../replica_reassign/replica_reassign_test.go | 9 +- test/e2e/rebuild/basic_rebuild_test.go | 12 +- test/e2e/replica/replica_test.go | 14 +- test/e2e/uninstall/uninstall_test.go | 138 ++- 33 files changed, 1048 insertions(+), 1154 deletions(-) delete mode 100644 test/csi-e2e/go.mod delete mode 100644 test/csi-e2e/go.sum delete mode 100755 test/csi-e2e/runtest.sh rename test/{csi-e2e => e2e/csi}/README.md (92%) rename test/{csi-e2e => e2e/csi}/check_driver_pods_restart.sh (100%) rename test/{csi-e2e => e2e/csi}/driver/driver.go (100%) rename test/{csi-e2e => e2e/csi}/driver/mayastor_driver.go (100%) rename test/{csi-e2e => e2e/csi}/dynamic_provisioning_test.go (86%) rename test/{csi-e2e => e2e/csi}/e2e_suite_test.go (83%) rename test/{csi-e2e => e2e/csi}/testsuites/dynamically_provisioned_cmd_volume_tester.go (98%) rename test/{csi-e2e => e2e/csi}/testsuites/dynamically_provisioned_collocated_pod_tester.go (98%) rename test/{csi-e2e => e2e/csi}/testsuites/dynamically_provisioned_delete_pod_tester.go (98%) rename test/{csi-e2e => e2e/csi}/testsuites/dynamically_provisioned_pod_with_multiple_pv.go (98%) rename test/{csi-e2e => e2e/csi}/testsuites/dynamically_provisioned_read_only_volume_tester.go (98%) rename test/{csi-e2e => e2e/csi}/testsuites/dynamically_provisioned_reclaim_policy_tester.go (98%) rename test/{csi-e2e => e2e/csi}/testsuites/specs.go (99%) rename test/{csi-e2e => e2e/csi}/testsuites/testsuites.go (100%) diff --git a/scripts/e2e-cluster-dump.sh b/scripts/e2e-cluster-dump.sh index 575775254..3a0521a44 100755 --- a/scripts/e2e-cluster-dump.sh +++ b/scripts/e2e-cluster-dump.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash -set -e +# This script makes the best attempt to dump stuff +# so ignore fails and keep paddling. +# set -e help() { cat < do not generate pod logs function dump-to-stdout { + echo "# Cluster ---------------------------------" + cluster-get + cluster-describe + if [ "$1" -ne 0 ]; then logs-moac logs-mayastor @@ -120,9 +126,7 @@ function dump-to-stdout { logs-csi-mayastor -p logs-csi-containers -p fi - - cluster-get - cluster-describe + echo "# END ---------------------------------" } # $1 = podlogs, 0 => do not generate pod logs @@ -131,6 +135,10 @@ function dump-to-dir { dest="$2" echo "Generating logs in $dest" mkdir -p "$dest" + + cluster-get >& "$dest/cluster.get.txt" + cluster-describe >& "$dest/cluster.describe.txt" + if [ "$1" -ne 0 ]; then logs-moac >& "$dest/moac.log" logs-mayastor >& "$dest/mayastor.log" @@ -142,9 +150,6 @@ function dump-to-dir { logs-csi-mayastor -p >& "$dest/csi-mayastor.previous.log" logs-csi-containers -p >& "$dest/csi-containers.previous.log" fi - - cluster-get >& "$dest/cluster.get.txt" - cluster-describe >& "$dest/cluster.describe.txt" } # $1 = podlogs, 0 => do not generate pod logs diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index c745b06e1..38e744f11 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -1,16 +1,18 @@ #!/usr/bin/env bash -# e2e tests disabled until we can make them more reliable -exit 0 - set -eux SCRIPTDIR=$(dirname "$(realpath "$0")") # new tests should be added before the replica_pod_remove test -TESTS="install basic_volume_io replica rebuild node_disconnect/replica_pod_remove" +#TESTS="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall" +TESTS="install basic_volume_io csi uninstall" DEVICE= REGISTRY= TAG= +TESTDIR=$(realpath "$SCRIPTDIR/../test/e2e") +REPORTSDIR=$(realpath "$SCRIPTDIR/..") +GENERATE_LOGS=0 +ON_FAIL="continue" help() { cat < Device path to use for storage pools. --registry Registry to pull the mayastor images from. --tag Docker image tag of mayastor images (default "ci") - + --tests Lists of tests to run, delimited by spaces (default: "$TESTS") + Note: the last 2 tests should be (if they are to be run) + node_disconnect/replica_pod_remove uninstall + --reportsdir Path to use for junit xml test reports (default: repo root) + --logs Generate logs and cluster state dump at the end of successful test run. + --onfail On fail, stop immediately or continue default($ON_FAIL) + Behaviour for "continue" only differs if uninstall is in the list of tests (the default). Examples: $0 --registry 127.0.0.1:5000 --tag a80ce0c EOF @@ -32,28 +40,51 @@ while [ "$#" -gt 0 ]; do -d|--device) shift DEVICE=$1 - shift ;; -r|--registry) shift REGISTRY=$1 - shift ;; -t|--tag) shift TAG=$1 + ;; + -T|--tests) shift + TESTS="$1" + ;; + -R|--reportsdir) + shift + REPORTSDIR="$1" ;; -h|--help) help exit 0 ;; + -l|--logs) + GENERATE_LOGS=1 + ;; + --onfail) + shift + case $1 in + continue) + ON_FAIL=$1 + ;; + stop) + ON_FAIL=$1 + ;; + *) + help + exit 2 + esac + ;; *) echo "Unknown option: $1" help exit 1 ;; esac + shift done if [ -z "$DEVICE" ]; then @@ -62,40 +93,94 @@ if [ -z "$DEVICE" ]; then exit 1 fi export e2e_pool_device=$DEVICE + if [ -n "$TAG" ]; then export e2e_image_tag="$TAG" fi + if [ -n "$REGISTRY" ]; then export e2e_docker_registry="$REGISTRY" fi -test_failed= +export e2e_reports_dir="$REPORTSDIR" +if [ ! -d "$e2e_reports_dir" ] ; then + echo "Reports directory $e2e_reports_dir does not exist" + exit 1 +fi + +test_failed=0 +# Run go test in directory specified as $1 (relative path) +function runGoTest { + cd "$TESTDIR" + echo "Running go test in $PWD/\"$1\"" + if [ -z "$1" ] || [ ! -d "$1" ]; then + return 1 + fi + + cd "$1" + if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0; then + return 1 + fi + + return 0 +} + +# Check if $2 is in $1 +contains() { + [[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && return 0 || return 1 +} + +echo "list of tests: $TESTS" for dir in $TESTS; do - cd "$SCRIPTDIR/../test/e2e/$dir" - if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0 ; then - test_failed=1 - break - fi - if ! ("$SCRIPTDIR"/e2e_check_pod_restarts.sh) ; then - test_failed=1 - break + # defer uninstall till after other tests have been run. + if [ "$dir" != "uninstall" ] ; then + if ! runGoTest "$dir" ; then + test_failed=1 + break + fi + + if ! ("$SCRIPTDIR"/e2e_check_pod_restarts.sh) ; then + test_failed=1 + break + fi + fi done -if [ -n "$test_failed" ]; then - "$SCRIPTDIR"/e2e-cluster-dump.sh +if [ "$test_failed" -ne 0 ]; then + if ! "$SCRIPTDIR"/e2e-cluster-dump.sh ; then + # ignore failures in the dump script + : + fi + + if [ "$ON_FAIL" == "stop" ]; then + exit 3 + fi fi -# must always run uninstall test in order to clean up the cluster -cd "$SCRIPTDIR/../test/e2e/uninstall" -if ! go test -v . -ginkgo.v -ginkgo.progress -timeout 0 ; then - "$SCRIPTDIR"/e2e-cluster-dump.sh --clusteronly +# Always run uninstall test if specified +if contains "$TESTS" "uninstall" ; then + if ! runGoTest "uninstall" ; then + test_failed=1 + if ! "$SCRIPTDIR"/e2e-cluster-dump.sh --clusteronly ; then + # ignore failures in the dump script + : + fi + fi fi -if [ -n "$test_failed" ]; then +if [ "$test_failed" -ne 0 ]; then + echo "At least one test has FAILED!" exit 1 fi -echo "All tests have passed" +if [ "$GENERATE_LOGS" -ne 0 ]; then + if ! "$SCRIPTDIR"/e2e-cluster-dump.sh ; then + # ignore failures in the dump script + : + fi +fi + +echo "All tests have PASSED!" exit 0 diff --git a/scripts/e2e_check_pod_restarts.sh b/scripts/e2e_check_pod_restarts.sh index 6befff3cf..e4962be93 100755 --- a/scripts/e2e_check_pod_restarts.sh +++ b/scripts/e2e_check_pod_restarts.sh @@ -13,6 +13,8 @@ restarts=$(kubectl get pods -n mayastor | grep -e mayastor -e moac | awk '{print for num in $restarts do if [ "$num" -ne "0" ]; then + # Useful to dump what failure was seen + kubectl get pods -n mayastor exit 255 fi done diff --git a/test/csi-e2e/go.mod b/test/csi-e2e/go.mod deleted file mode 100644 index 86a46a848..000000000 --- a/test/csi-e2e/go.mod +++ /dev/null @@ -1,62 +0,0 @@ -module mayastor-csi-e2e - -go 1.15 - -require ( - github.com/container-storage-interface/spec v1.2.0 - github.com/onsi/ginkgo v1.12.1 - github.com/onsi/gomega v1.10.1 - github.com/stretchr/testify v1.5.1 // indirect - google.golang.org/protobuf v1.25.0 // indirect - k8s.io/api v0.19.0 - k8s.io/apimachinery v0.19.0 - k8s.io/client-go v0.19.0 - k8s.io/klog/v2 v2.4.0 - k8s.io/kubernetes v1.19.0 -) - -replace k8s.io/api => k8s.io/api v0.19.0 - -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.0 - -replace k8s.io/apimachinery => k8s.io/apimachinery v0.19.0 - -replace k8s.io/apiserver => k8s.io/apiserver v0.19.0 - -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.0 - -replace k8s.io/client-go => k8s.io/client-go v0.19.0 - -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.0 - -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.0 - -replace k8s.io/code-generator => k8s.io/code-generator v0.19.0 - -replace k8s.io/component-base => k8s.io/component-base v0.19.0 - -replace k8s.io/cri-api => k8s.io/cri-api v0.19.0 - -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.0 - -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.0 - -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.0 - -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.0 - -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.0 - -replace k8s.io/kubectl => k8s.io/kubectl v0.19.0 - -replace k8s.io/kubelet => k8s.io/kubelet v0.19.0 - -replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.0 - -replace k8s.io/metrics => k8s.io/metrics v0.19.0 - -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.0 - -replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.19.0 - -replace k8s.io/sample-controller => k8s.io/sample-controller v0.19.0 diff --git a/test/csi-e2e/go.sum b/test/csi-e2e/go.sum deleted file mode 100644 index 70b8ff4c2..000000000 --- a/test/csi-e2e/go.sum +++ /dev/null @@ -1,861 +0,0 @@ -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= -github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= -github.com/cilium/ebpf v0.0.0-20200601085316-9f1617e5c574/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.37.0/go.mod h1:OhDE+goNVel0eGY8mR7Ifq1QUI1in5vJBIgIpcajK/I= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= -github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= -github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc= -k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= -k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apiserver v0.19.0 h1:jLhrL06wGAADbLUUQm8glSLnAGP6c7y5R3p19grkBoY= -k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= -k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= -k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= -k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/cloud-provider v0.19.0 h1:Ae09nHr6BVPEzmAWbZedYC0gjsIPbt7YsIY0V/NHGr0= -k8s.io/cloud-provider v0.19.0/go.mod h1:TYh7b7kQ6wiqF7Ftb+u3lN4IwvgOPbBrcvC3TDAW4cw= -k8s.io/cluster-bootstrap v0.19.0/go.mod h1:kBn1DKyqoM245wzz+AAnGkuysJ+9GqVbPYveTo4KiaA= -k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/component-base v0.19.0 h1:OueXf1q3RW7NlLlUCj2Dimwt7E1ys6ZqRnq53l2YuoE= -k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= -k8s.io/cri-api v0.19.0/go.mod h1:UN/iU9Ua0iYdDREBXNE9vqCJ7MIh/FW3VIL0d8pw7Fw= -k8s.io/csi-translation-lib v0.19.0/go.mod h1:zGS1YqV8U2So/t4Hz8SoRXMx5y5/KSKnA6BXXxGuo4A= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= -k8s.io/kube-controller-manager v0.19.0/go.mod h1:uGZyiHK73NxNEN5EZv/Esm3fbCOzeq4ndttMexVZ1L0= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-proxy v0.19.0/go.mod h1:7NoJCFgsWb7iiMB1F6bW1St5rEXC+ir2aWiJehASmTU= -k8s.io/kube-scheduler v0.19.0/go.mod h1:1XGjJUgstM0/0x8to+bSGSyCs3Dp3dbCEr3Io/mvd4s= -k8s.io/kubectl v0.19.0 h1:t9uxaZzGvqc2jY96mjnPSjFHtaKOxoUegeGZdaGT6aw= -k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= -k8s.io/kubelet v0.19.0/go.mod h1:cGds22piF/LnFzfAaIT+efvOYBHVYdunqka6NVuNw9g= -k8s.io/kubernetes v1.19.0 h1:ir53YuXsfsuVABmtYHCTUa3xjD41Htxv3o+xoQjJdUo= -k8s.io/kubernetes v1.19.0/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A= -k8s.io/legacy-cloud-providers v0.19.0/go.mod h1:Q5czDCPnStdpFohMpcbnqL+MLR75kUhIDIsnmwEm0/o= -k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= -k8s.io/sample-apiserver v0.19.0/go.mod h1:Bq9UulNoKnT72JqlkWF2JS14cXxJqcmvLtb5+EcwiNA= -k8s.io/system-validators v1.1.2/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/test/csi-e2e/runtest.sh b/test/csi-e2e/runtest.sh deleted file mode 100755 index 379fcf005..000000000 --- a/test/csi-e2e/runtest.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -GINKGO_FLAGS="-ginkgo.v -ginkgo.progress" -go test -v -timeout=0 . ${GINKGO_FLAGS} - -# Required until CAS-566 -# "Mayastor volumes not destroyed when PV is destroyed if storage class reclaim policy is Retain" -# is fixed. -kubectl -n mayastor delete msv --all diff --git a/test/e2e/basic_volume_io/basic_volume_io_test.go b/test/e2e/basic_volume_io/basic_volume_io_test.go index afd21c15f..43aab5f48 100644 --- a/test/e2e/basic_volume_io/basic_volume_io_test.go +++ b/test/e2e/basic_volume_io/basic_volume_io_test.go @@ -4,15 +4,17 @@ package basic_volume_io_test import ( "e2e-basic/common" + "os" "testing" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var defTimeoutSecs = "90s" +var defTimeoutSecs = "120s" type volSc struct { volName string @@ -24,7 +26,9 @@ var volNames []volSc func TestBasicVolumeIO(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Basic volume IO tests, NVMe-oF TCP and iSCSI") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/basic-volume-io-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Basic volume IO tests, NVMe-oF TCP and iSCSI", []Reporter{junitReporter}) } func basicVolumeIOTest(scName string) { @@ -63,6 +67,15 @@ func basicVolumeIOTest(scName string) { } var _ = Describe("Mayastor Volume IO test", func() { + + AfterEach(func() { + logf.Log.Info("AfterEach") + + // Check resource leakage. + err := common.AfterEachCheck() + Expect(err).ToNot(HaveOccurred()) + }) + It("should verify an NVMe-oF TCP volume can process IO", func() { basicVolumeIOTest("mayastor-nvmf") }) @@ -79,14 +92,6 @@ var _ = BeforeSuite(func(done Done) { }, 60) var _ = AfterSuite(func() { - // Cleanup resources leftover in the event of failure. - for _, pod := range podNames { - _ = common.DeletePod(pod) - } - for _, vol := range volNames { - common.RmPVC(vol.volName, vol.scName) - } - // NB This only tears down the local structures for talking to the cluster, // not the kubernetes cluster itself. By("tearing down the test environment") common.TeardownTestEnv() diff --git a/test/e2e/common/test.go b/test/e2e/common/test.go index 5ff09cd77..5b8b73134 100644 --- a/test/e2e/common/test.go +++ b/test/e2e/common/test.go @@ -56,7 +56,7 @@ func SetupTestEnv() { mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) defer mgrSyncCtxCancel() - if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { + if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx); !synced { fmt.Println("Failed to sync") } @@ -83,6 +83,7 @@ func SetupTestEnv() { } func TeardownTestEnv() { + AfterSuiteCleanup() err := gTestEnv.TestEnv.Stop() Expect(err).ToNot(HaveOccurred()) } diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 32f4281db..b796b3653 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -26,6 +26,7 @@ import ( "reflect" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + logf "sigs.k8s.io/controller-runtime/pkg/log" ) var defTimeoutSecs = "90s" @@ -139,6 +140,17 @@ func IsMSVDeleted(uuid string) bool { return false } +func DeleteMSV(uuid string) error { + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").Delete(context.TODO(), uuid, metav1.DeleteOptions{}) + return err +} + // Check for a deleted Persistent Volume Claim, // either the object does not exist // or the status phase is invalid. @@ -421,6 +433,31 @@ func DeletePod(podName string) error { return gTestEnv.KubeInt.CoreV1().Pods("default").Delete(context.TODO(), podName, metav1.DeleteOptions{}) } +/// Delete all pods in the default namespace +// returns: +// 1) success i.e. true if all pods were deleted or there were no pods to delete. +// 2) the number of pods found +func DeleteAllPods() (bool, int) { + logf.Log.Info("DeleteAllPods") + success := true + numPods := 0 + pods, err := gTestEnv.KubeInt.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "DeleteAllPods: list pods failed.") + success = false + } + if err == nil && pods != nil { + numPods = len(pods.Items) + for _, pod := range pods.Items { + logf.Log.Info("DeleteAllPods: Deleting", "pod", pod.Name) + if err := DeletePod(pod.Name); err != nil { + success = false + } + } + } + return success, numPods +} + func CreateFioPod(podName string, volName string) (*corev1.Pod, error) { podDef := CreateFioPodDef(podName, volName) return CreatePod(podDef) @@ -437,10 +474,9 @@ func CreateFioPodDef(podName string, volName string) *corev1.Pod { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: podName, - Image: "nixery.dev/shell/fio/tini", - Command: []string{"tini", "--"}, - Args: []string{"sleep", "1000000"}, + Name: podName, + Image: "dmonakhov/alpine-fio", + Args: []string{"sleep", "1000000"}, VolumeMounts: []corev1.VolumeMount{ { Name: "ms-volume", @@ -856,3 +892,186 @@ func IsVolumePublished(uuid string) bool { } return true } + +// Make best attempt to delete PVCs, PVs and MSVs +func DeleteAllVolumeResources() (bool, bool) { + logf.Log.Info("DeleteAllVolumeResources") + foundResources := false + success := true + + // Delete all PVCs found + // Phase 1 to delete dangling resources + pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "DeleteAllVolumeResources: list PVCs failed.") + success = false + } + if err == nil && pvcs != nil && len(pvcs.Items) != 0 { + foundResources = true + logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumeClaims") + for _, pvc := range pvcs.Items { + if err := DeletePVC(pvc.Name); err != nil { + success = false + } + } + } + + // Delete all PVs found + pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "DeleteAllVolumeResources: list PVs failed.") + } + if err == nil && pvs != nil && len(pvs.Items) != 0 { + logf.Log.Info("DeleteAllVolumeResources: deleting PersistentVolumes") + for _, pv := range pvs.Items { + if err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}); err != nil { + success = false + } + } + } + + // Wait 2 minutes for resources to be deleted + for attempts := 0; attempts < 120; attempts++ { + numPvcs := 0 + pvcs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if err == nil && pvcs != nil { + numPvcs = len(pvcs.Items) + } + + numPvs := 0 + pvs, err := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if err == nil && pvs != nil { + numPvs = len(pvs.Items) + } + + if numPvcs == 0 && numPvs == 0 { + break + } + time.Sleep(1 * time.Second) + } + + // If after deleting PVCs and PVs Mayastor volumes are leftover + // try cleaning them up explicitly + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + // This function may be called by AfterSuite by uninstall test so listing MSVs may fail correctly + logf.Log.Info("DeleteAllVolumeResources: list MSVs failed.", "Error", err) + } + if err == nil && msvs != nil && len(msvs.Items) != 0 { + logf.Log.Info("DeleteAllVolumeResources: deleting MayastorVolumes") + for _, msv := range msvs.Items { + if err := DeleteMSV(msv.GetName()); err != nil { + success = false + } + } + } + + // Wait 2 minutes for resources to be deleted + for attempts := 0; attempts < 120; attempts++ { + numMsvs := 0 + msvs, err := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err == nil && msvs != nil { + numMsvs = len(msvs.Items) + } + if numMsvs == 0 { + break + } + time.Sleep(1 * time.Second) + } + + return success, foundResources +} + +func AfterSuiteCleanup() { + logf.Log.Info("AfterSuiteCleanup") + _, _ = DeleteAllVolumeResources() +} + +// Check that no PVs, PVCs and MSVs are still extant. +// Returns an error if resources exists. +func AfterEachCheck() error { + var errorMsg = "" + + logf.Log.Info("AfterEachCheck") + + // Phase 1 to delete dangling resources + pvcs, _ := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims("default").List(context.TODO(), metav1.ListOptions{}) + if len(pvcs.Items) != 0 { + errorMsg += " found leftover PersistentVolumeClaims" + logf.Log.Info("AfterEachCheck: found leftover PersistentVolumeClaims, test fails.") + } + + pvs, _ := gTestEnv.KubeInt.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + if len(pvs.Items) != 0 { + errorMsg += " found leftover PersistentVolumes" + logf.Log.Info("AfterEachCheck: found leftover PersistentVolumes, test fails.") + } + + // Mayastor volumes + msvGVR := schema.GroupVersionResource{ + Group: "openebs.io", + Version: "v1alpha1", + Resource: "mayastorvolumes", + } + msvs, _ := gTestEnv.DynamicClient.Resource(msvGVR).Namespace("mayastor").List(context.TODO(), metav1.ListOptions{}) + if len(msvs.Items) != 0 { + errorMsg += " found leftover MayastorVolumes" + logf.Log.Info("AfterEachCheck: found leftover MayastorVolumes, test fails.") + } + + if len(errorMsg) != 0 { + return errors.New(errorMsg) + } + return nil +} + +func MayastorUndeletedPodCount() int { + pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "MayastorUndeletedPodCount: list pods failed.") + return 0 + } + if pods != nil { + return len(pods.Items) + } + logf.Log.Info("MayastorUndeletedPodCount: nil list returned.") + return 0 +} + +// Force deletion of all existing mayastor pods +// Returns true if pods were deleted, false otherwise +func ForceDeleteMayastorPods() bool { + logf.Log.Info("EnsureMayastorDeleted") + pods, err := gTestEnv.KubeInt.CoreV1().Pods("mayastor").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + logf.Log.Error(err, "EnsureMayastorDeleted: list pods failed.") + return false + } + if pods == nil || len(pods.Items) == 0 { + return false + } + + logf.Log.Info("EnsureMayastorDeleted: MayastorPods found.", "Count", len(pods.Items)) + for _, pod := range pods.Items { + logf.Log.Info("EnsureMayastorDeleted: Force deleting", "pod", pod.Name) + cmd := exec.Command("kubectl", "-n", "mayastor", "delete", "pod", pod.Name, "--grace-period", "0", "--force") + _, err := cmd.CombinedOutput() + if err != nil { + logf.Log.Error(err, "EnsureMayastorDeleted", "podName", pod.Name) + } + } + + // We have made the best effort to cleanup, give things time to settle. + for attempts := 0; attempts < 30 && MayastorUndeletedPodCount() != 0; attempts++ { + time.Sleep(2 * time.Second) + } + + logf.Log.Info("EnsureMayastorDeleted: lingering Mayastor pods were found !!!!!!!!") + return true +} diff --git a/test/csi-e2e/README.md b/test/e2e/csi/README.md similarity index 92% rename from test/csi-e2e/README.md rename to test/e2e/csi/README.md index 6668a2392..a61f8132b 100644 --- a/test/csi-e2e/README.md +++ b/test/e2e/csi/README.md @@ -10,7 +10,7 @@ These tests have been ported from kubernetes CSI NFS driver at https://github.co * `LARGE_CLAIM_SIZE` - Size of large PVCs created by the testsuite, defaults to `500Mi` ## Changes for mayastor -* Location of the test directory within the repo is `test/csi-e2e` +* Location of the test directory within the repo is `test/e2e/csi` * Naming from `csi-nfs` to `csi-mayastor` * Claim sizes have been downsized from * `10Gi` to `50Mi` @@ -36,6 +36,6 @@ To run the tests execute `runtests.sh` from this directory. ### TODO Remove workaround for side effect of running this test, when CAS-566 is fixed. -In `test/csi-e2e/runtest.sh` all Mayastor Volumes are deleted after +In `test/e2e/csi/runtest.sh` all Mayastor Volumes are deleted after the test run. Until CAS-566 is fixed this is required as this will have an impact on tests run subsequently in particular the uninstall test. diff --git a/test/csi-e2e/check_driver_pods_restart.sh b/test/e2e/csi/check_driver_pods_restart.sh similarity index 100% rename from test/csi-e2e/check_driver_pods_restart.sh rename to test/e2e/csi/check_driver_pods_restart.sh diff --git a/test/csi-e2e/driver/driver.go b/test/e2e/csi/driver/driver.go similarity index 100% rename from test/csi-e2e/driver/driver.go rename to test/e2e/csi/driver/driver.go diff --git a/test/csi-e2e/driver/mayastor_driver.go b/test/e2e/csi/driver/mayastor_driver.go similarity index 100% rename from test/csi-e2e/driver/mayastor_driver.go rename to test/e2e/csi/driver/mayastor_driver.go diff --git a/test/csi-e2e/dynamic_provisioning_test.go b/test/e2e/csi/dynamic_provisioning_test.go similarity index 86% rename from test/csi-e2e/dynamic_provisioning_test.go rename to test/e2e/csi/dynamic_provisioning_test.go index 5ef395609..426e0f0c6 100644 --- a/test/csi-e2e/dynamic_provisioning_test.go +++ b/test/e2e/csi/dynamic_provisioning_test.go @@ -21,6 +21,8 @@ import ( "os" "strings" + "e2e-basic/csi/driver" + "e2e-basic/csi/testsuites" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,8 +31,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclientset "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" - "mayastor-csi-e2e/driver" - "mayastor-csi-e2e/testsuites" ) // TODO: Make configurable @@ -59,13 +59,18 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { ) ginkgo.BeforeEach(func() { - checkPodsRestart := testCmd{ - command: "sh", - args: []string{"test/csi-e2e/check_driver_pods_restart.sh"}, - startLog: "Check driver pods for restarts", - endLog: "Check successful", - } - execTestCmd([]testCmd{checkPodsRestart}) + // Disabled: For Mayastor higher level scripts check for POD + // restart and the changing of directories does not work correctly + // on Mayastor e2e test cluster + // + // checkPodsRestart := testCmd{ + // command: "sh", + // args: []string{"test/e2e/csi/check_driver_pods_restart.sh"}, + // startLog: "Check driver pods for restarts", + // endLog: "Check successful", + // } + // execTestCmd([]testCmd{checkPodsRestart}) + // cs = f.ClientSet ns = f.Namespace @@ -209,21 +214,22 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(cs, ns) }) - ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [mayastor-csi.openebs.io]", v1.PersistentVolumeReclaimRetain), func() { - reclaimPolicy := v1.PersistentVolumeReclaimRetain - volumes := []testsuites.VolumeDetails{ - { - ClaimSize: smallClaimSize, - ReclaimPolicy: &reclaimPolicy, - }, - } - test := testsuites.DynamicallyProvisionedReclaimPolicyTest{ - CSIDriver: testDriver, - Volumes: volumes, - StorageClassParameters: defaultStorageClassParameters, - } - test.Run(cs, ns) - }) + // Disable for Mayastor until CAS-566 has been resolved. + // ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [mayastor-csi.openebs.io]", v1.PersistentVolumeReclaimRetain), func() { + // reclaimPolicy := v1.PersistentVolumeReclaimRetain + // volumes := []testsuites.VolumeDetails{ + // { + // ClaimSize: smallClaimSize, + // ReclaimPolicy: &reclaimPolicy, + // }, + // } + // test := testsuites.DynamicallyProvisionedReclaimPolicyTest{ + // CSIDriver: testDriver, + // Volumes: volumes, + // StorageClassParameters: defaultStorageClassParameters, + // } + // test.Run(cs, ns) + // }) ginkgo.It("should create a pod with multiple volumes [mayastor-csi.openebs.io]", func() { var cmds []string diff --git a/test/csi-e2e/e2e_suite_test.go b/test/e2e/csi/e2e_suite_test.go similarity index 83% rename from test/csi-e2e/e2e_suite_test.go rename to test/e2e/csi/e2e_suite_test.go index ce3eeae0f..619e62fd4 100644 --- a/test/csi-e2e/e2e_suite_test.go +++ b/test/e2e/csi/e2e_suite_test.go @@ -18,16 +18,14 @@ package e2e import ( "flag" - "log" "os" - "os/exec" "path/filepath" - "strings" "testing" "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" -// "github.com/pborman/uuid" + "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" ) @@ -38,7 +36,7 @@ const ( var ( defaultStorageClassParameters = map[string]string{ - "repl": "1", + "repl": "1", "protocol": "nvmf", } ) @@ -73,11 +71,13 @@ func handleFlags() { flag.Parse() } +/* Disabled for mayastor, higher level scripts check of POD restarts. + checks for pod restart after every test it runs. func execTestCmd(cmds []testCmd) { - err := os.Chdir("../..") + err := os.Chdir("../../..") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := os.Chdir("test/csi-e2e") + err := os.Chdir("test/e2e/csi") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -96,8 +96,11 @@ func execTestCmd(cmds []testCmd) { log.Println(cmd.endLog) } } +*/ func TestE2E(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) - ginkgo.RunSpecs(t, "E2E Suite") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/csi-junit.xml") + ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "CSI E2E Suite", []ginkgo.Reporter{junitReporter}) } diff --git a/test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go b/test/e2e/csi/testsuites/dynamically_provisioned_cmd_volume_tester.go similarity index 98% rename from test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go rename to test/e2e/csi/testsuites/dynamically_provisioned_cmd_volume_tester.go index ca9ae8c58..a8b85b3f5 100644 --- a/test/csi-e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go +++ b/test/e2e/csi/testsuites/dynamically_provisioned_cmd_volume_tester.go @@ -17,7 +17,7 @@ limitations under the License. package testsuites import ( - "mayastor-csi-e2e/driver" + "e2e-basic/csi/driver" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" diff --git a/test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go b/test/e2e/csi/testsuites/dynamically_provisioned_collocated_pod_tester.go similarity index 98% rename from test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go rename to test/e2e/csi/testsuites/dynamically_provisioned_collocated_pod_tester.go index dd9415b4f..93c39aac8 100644 --- a/test/csi-e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go +++ b/test/e2e/csi/testsuites/dynamically_provisioned_collocated_pod_tester.go @@ -17,10 +17,10 @@ limitations under the License. package testsuites import ( + "e2e-basic/csi/driver" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - "mayastor-csi-e2e/driver" ) // DynamicallyProvisionedCollocatedPodTest will provision required StorageClass(es), PVC(s) and Pod(s) diff --git a/test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/test/e2e/csi/testsuites/dynamically_provisioned_delete_pod_tester.go similarity index 98% rename from test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go rename to test/e2e/csi/testsuites/dynamically_provisioned_delete_pod_tester.go index f9c1dcb20..ac75880e6 100644 --- a/test/csi-e2e/testsuites/dynamically_provisioned_delete_pod_tester.go +++ b/test/e2e/csi/testsuites/dynamically_provisioned_delete_pod_tester.go @@ -17,10 +17,10 @@ limitations under the License. package testsuites import ( + "e2e-basic/csi/driver" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - "mayastor-csi-e2e/driver" ) // DynamicallyProvisionedDeletePodTest will provision required StorageClass and Deployment diff --git a/test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go b/test/e2e/csi/testsuites/dynamically_provisioned_pod_with_multiple_pv.go similarity index 98% rename from test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go rename to test/e2e/csi/testsuites/dynamically_provisioned_pod_with_multiple_pv.go index 6129d0664..396f6a3cf 100644 --- a/test/csi-e2e/testsuites/dynamically_provisioned_pod_with_multiple_pv.go +++ b/test/e2e/csi/testsuites/dynamically_provisioned_pod_with_multiple_pv.go @@ -17,10 +17,10 @@ limitations under the License. package testsuites import ( + "e2e-basic/csi/driver" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - "mayastor-csi-e2e/driver" ) // DynamicallyProvisionedPodWithMultiplePVsTest will provision diff --git a/test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/test/e2e/csi/testsuites/dynamically_provisioned_read_only_volume_tester.go similarity index 98% rename from test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go rename to test/e2e/csi/testsuites/dynamically_provisioned_read_only_volume_tester.go index a40c74666..94718c072 100644 --- a/test/csi-e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go +++ b/test/e2e/csi/testsuites/dynamically_provisioned_read_only_volume_tester.go @@ -19,12 +19,12 @@ package testsuites import ( "fmt" + "e2e-basic/csi/driver" "github.com/onsi/ginkgo" "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - "mayastor-csi-e2e/driver" ) // DynamicallyProvisionedReadOnlyVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) diff --git a/test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go b/test/e2e/csi/testsuites/dynamically_provisioned_reclaim_policy_tester.go similarity index 98% rename from test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go rename to test/e2e/csi/testsuites/dynamically_provisioned_reclaim_policy_tester.go index 7e8d1074a..4160b6cfd 100644 --- a/test/csi-e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go +++ b/test/e2e/csi/testsuites/dynamically_provisioned_reclaim_policy_tester.go @@ -17,7 +17,7 @@ limitations under the License. package testsuites import ( - "mayastor-csi-e2e/driver" + "e2e-basic/csi/driver" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" diff --git a/test/csi-e2e/testsuites/specs.go b/test/e2e/csi/testsuites/specs.go similarity index 99% rename from test/csi-e2e/testsuites/specs.go rename to test/e2e/csi/testsuites/specs.go index c546fd19a..bdc0678a1 100644 --- a/test/csi-e2e/testsuites/specs.go +++ b/test/e2e/csi/testsuites/specs.go @@ -19,11 +19,11 @@ package testsuites import ( "fmt" + "e2e-basic/csi/driver" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" - "mayastor-csi-e2e/driver" ) const ( diff --git a/test/csi-e2e/testsuites/testsuites.go b/test/e2e/csi/testsuites/testsuites.go similarity index 100% rename from test/csi-e2e/testsuites/testsuites.go rename to test/e2e/csi/testsuites/testsuites.go diff --git a/test/e2e/go.mod b/test/e2e/go.mod index b5a7c00ce..c3df99906 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -3,17 +3,63 @@ module e2e-basic go 1.15 require ( - github.com/onsi/ginkgo v1.12.1 - github.com/onsi/gomega v1.10.1 + github.com/container-storage-interface/spec v1.2.0 + github.com/onsi/ginkgo v1.14.1 + github.com/onsi/gomega v1.10.2 github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.5.1 // indirect github.com/stretchr/testify v1.5.1 // indirect - golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect - google.golang.org/appengine v1.6.5 // indirect google.golang.org/protobuf v1.25.0 // indirect - k8s.io/api v0.18.6 - k8s.io/apimachinery v0.18.6 - k8s.io/client-go v0.18.6 - sigs.k8s.io/controller-runtime v0.6.2 + k8s.io/api v0.19.2 + k8s.io/apimachinery v0.19.2 + k8s.io/client-go v0.19.2 + k8s.io/klog/v2 v2.4.0 + k8s.io/kubernetes v1.19.0 + sigs.k8s.io/controller-runtime v0.7.0 ) + +replace k8s.io/api => k8s.io/api v0.19.0 + +replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.0 + +replace k8s.io/apimachinery => k8s.io/apimachinery v0.19.0 + +replace k8s.io/apiserver => k8s.io/apiserver v0.19.0 + +replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.0 + +replace k8s.io/client-go => k8s.io/client-go v0.19.0 + +replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.0 + +replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.0 + +replace k8s.io/code-generator => k8s.io/code-generator v0.19.0 + +replace k8s.io/component-base => k8s.io/component-base v0.19.0 + +replace k8s.io/cri-api => k8s.io/cri-api v0.19.0 + +replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.0 + +replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.0 + +replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.0 + +replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.0 + +replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.0 + +replace k8s.io/kubectl => k8s.io/kubectl v0.19.0 + +replace k8s.io/kubelet => k8s.io/kubelet v0.19.0 + +replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.0 + +replace k8s.io/metrics => k8s.io/metrics v0.19.0 + +replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.0 + +replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.19.0 + +replace k8s.io/sample-controller => k8s.io/sample-controller v0.19.0 diff --git a/test/e2e/go.sum b/test/e2e/go.sum index f019956cb..87079e424 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -1,42 +1,108 @@ +bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= +github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= +github.com/cilium/ebpf v0.0.0-20200601085316-9f1617e5c574/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -44,17 +110,32 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -64,10 +145,17 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -76,14 +164,24 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= +github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -127,21 +225,30 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -150,8 +257,12 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cadvisor v0.37.0/go.mod h1:OhDE+goNVel0eGY8mR7Ifq1QUI1in5vJBIgIpcajK/I= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -159,36 +270,62 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -197,55 +334,110 @@ github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= +github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= +github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= +github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= +github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= +github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= +github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -258,42 +450,74 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -302,42 +526,97 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -346,24 +625,37 @@ golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -377,20 +669,42 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -403,39 +717,91 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -446,6 +812,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -454,15 +821,20 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -471,44 +843,110 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc= +k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo= k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= +k8s.io/apiextensions-apiserver v0.19.0 h1:jlY13lvZp+0p9fRX2khHFdiT9PYzT7zUrANz6R1NKtY= +k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= +k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ= k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= +k8s.io/apiserver v0.19.0 h1:jLhrL06wGAADbLUUQm8glSLnAGP6c7y5R3p19grkBoY= +k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= +k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw= k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= +k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= +k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v1.5.1 h1:XaX/lo2/u3/pmFau8HN+sB5C/b4dc4Dmm2eXjBH4p1E= k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= +k8s.io/cloud-provider v0.19.0 h1:Ae09nHr6BVPEzmAWbZedYC0gjsIPbt7YsIY0V/NHGr0= +k8s.io/cloud-provider v0.19.0/go.mod h1:TYh7b7kQ6wiqF7Ftb+u3lN4IwvgOPbBrcvC3TDAW4cw= +k8s.io/cluster-bootstrap v0.19.0/go.mod h1:kBn1DKyqoM245wzz+AAnGkuysJ+9GqVbPYveTo4KiaA= k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= +k8s.io/component-base v0.19.0 h1:OueXf1q3RW7NlLlUCj2Dimwt7E1ys6ZqRnq53l2YuoE= +k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= +k8s.io/cri-api v0.19.0/go.mod h1:UN/iU9Ua0iYdDREBXNE9vqCJ7MIh/FW3VIL0d8pw7Fw= +k8s.io/csi-translation-lib v0.19.0/go.mod h1:zGS1YqV8U2So/t4Hz8SoRXMx5y5/KSKnA6BXXxGuo4A= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= +k8s.io/kube-controller-manager v0.19.0/go.mod h1:uGZyiHK73NxNEN5EZv/Esm3fbCOzeq4ndttMexVZ1L0= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-proxy v0.19.0/go.mod h1:7NoJCFgsWb7iiMB1F6bW1St5rEXC+ir2aWiJehASmTU= +k8s.io/kube-scheduler v0.19.0/go.mod h1:1XGjJUgstM0/0x8to+bSGSyCs3Dp3dbCEr3Io/mvd4s= +k8s.io/kubectl v0.19.0 h1:t9uxaZzGvqc2jY96mjnPSjFHtaKOxoUegeGZdaGT6aw= +k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= +k8s.io/kubelet v0.19.0/go.mod h1:cGds22piF/LnFzfAaIT+efvOYBHVYdunqka6NVuNw9g= +k8s.io/kubernetes v1.19.0 h1:ir53YuXsfsuVABmtYHCTUa3xjD41Htxv3o+xoQjJdUo= +k8s.io/kubernetes v1.19.0/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A= +k8s.io/legacy-cloud-providers v0.19.0/go.mod h1:Q5czDCPnStdpFohMpcbnqL+MLR75kUhIDIsnmwEm0/o= +k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= +k8s.io/sample-apiserver v0.19.0/go.mod h1:Bq9UulNoKnT72JqlkWF2JS14cXxJqcmvLtb5+EcwiNA= +k8s.io/system-validators v1.1.2/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 h1:v8ud2Up6QK1lNOKFgiIVrZdMg7MpmSnvtrOieolJKoE= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= +sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= +sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/test/e2e/install/install_test.go b/test/e2e/install/install_test.go index aaef6d5db..49128aa3d 100644 --- a/test/e2e/install/install_test.go +++ b/test/e2e/install/install_test.go @@ -13,6 +13,7 @@ import ( "time" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" appsV1 "k8s.io/api/apps/v1" @@ -214,15 +215,15 @@ func installMayastor() { applyDeployYaml("../test-yamls/mayastor-daemonset.yaml") // Given the yamls and the environment described in the test readme, - // we expect mayastor to be running on exactly 2 nodes. + // we expect mayastor to be running on exactly numMayastorInstances nodes. Eventually(mayastorReadyPodCount, "180s", // timeout "1s", // polling interval ).Should(Equal(numMayastorInstances)) Eventually(moacReadyPodCount(), - "180s", // timeout - "1s", // polling interval + "360s", // timeout + "1s", // polling interval ).Should(Equal(1)) // Now create pools on all nodes. @@ -231,7 +232,10 @@ func installMayastor() { func TestInstallSuite(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Basic Install Suite") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/install-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Basic Install Suite", + []Reporter{junitReporter}) } var _ = Describe("Mayastor setup", func() { @@ -241,7 +245,7 @@ var _ = Describe("Mayastor setup", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) By("bootstrapping test environment") useCluster := true @@ -267,7 +271,7 @@ var _ = BeforeSuite(func(done Done) { mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) defer mgrSyncCtxCancel() - if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { + if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx); !synced { fmt.Println("Failed to sync") } diff --git a/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go b/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go index ede54a57c..8870973c6 100644 --- a/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go +++ b/test/e2e/nightly/pvc_stress_fio/pvc_stress_fio_test.go @@ -17,6 +17,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" ) @@ -202,10 +203,19 @@ func stressTestPVC(iters int, runFio bool) { func TestPVCStress(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "PVC Stress Test Suite") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/pvc-stress-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "PVC Stress Test Suite", + []Reporter{junitReporter}) } var _ = Describe("Mayastor PVC Stress test", func() { + AfterEach(func() { + // Check resource leakage + err := Cmn.AfterEachCheck() + Expect(err).ToNot(HaveOccurred()) + }) + It("should stress test creation and deletion of PVCs provisioned over iSCSI and NVMe-of", func() { stressTestPVC(cdIterations, false) }) @@ -240,15 +250,6 @@ var _ = BeforeSuite(func(done Done) { }, 60) var _ = AfterSuite(func() { - // Cleanup resources leftover in the event of failure. - for _, pod := range podNames { - err := Cmn.DeletePod(pod) - Expect(err).ToNot(HaveOccurred()) - } - for _, vol := range volNames { - Cmn.RmPVC(vol.volName, vol.scName) - } - // NB This only tears down the local structures for talking to the cluster, // not the kubernetes cluster itself. By("tearing down the test environment") diff --git a/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go b/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go index 185d3d106..ca58766fb 100644 --- a/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go +++ b/test/e2e/node_disconnect/replica_disconnect/replica_disconnection_test.go @@ -4,9 +4,11 @@ import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" + "os" "testing" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -23,7 +25,10 @@ const run_drop = false func TestNodeLoss(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Replica disconnection tests") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/replica-disconnect-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Replica disconnection tests", + []Reporter{junitReporter}) } var _ = Describe("Mayastor replica disconnection test", func() { @@ -79,7 +84,7 @@ var _ = Describe("Mayastor replica disconnection test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) diff --git a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go index 669cdf474..8a8776473 100644 --- a/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go +++ b/test/e2e/node_disconnect/replica_pod_remove/replica_pod_remove_test.go @@ -3,14 +3,15 @@ package replica_pod_remove_test import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "os" "testing" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" - - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var env disconnect_lib.DisconnectEnv @@ -18,7 +19,10 @@ var gStorageClass string = "" func TestMayastorPodLoss(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Replica pod removal tests") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/replica-pod-remove-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Replica pod removal tests", + []Reporter{junitReporter}) } var _ = Describe("Mayastor replica pod removal test", func() { @@ -35,7 +39,7 @@ var _ = Describe("Mayastor replica pod removal test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) diff --git a/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go b/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go index 680bdb289..2a223c826 100644 --- a/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go +++ b/test/e2e/node_disconnect/replica_reassign/replica_reassign_test.go @@ -4,9 +4,11 @@ import ( "e2e-basic/common" disconnect_lib "e2e-basic/node_disconnect/lib" + "os" "testing" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -21,7 +23,10 @@ const reject = "REJECT" func TestReplicaReassign(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Replica reassignment test") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/replica-reassign-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Replica reassignment test", + []Reporter{junitReporter}) } var _ = Describe("Mayastor replica reassignment test", func() { @@ -43,7 +48,7 @@ var _ = Describe("Mayastor replica reassignment test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) diff --git a/test/e2e/rebuild/basic_rebuild_test.go b/test/e2e/rebuild/basic_rebuild_test.go index 93272a276..cdc35efa1 100644 --- a/test/e2e/rebuild/basic_rebuild_test.go +++ b/test/e2e/rebuild/basic_rebuild_test.go @@ -1,10 +1,13 @@ package basic_rebuild_test import ( - "e2e-basic/common" + "os" "testing" + "e2e-basic/common" + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -70,7 +73,10 @@ func basicRebuildTest() { func TestRebuild(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Rebuild Test Suite") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/rebuild-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Rebuild Test Suite", + []Reporter{junitReporter}) } var _ = Describe("Mayastor rebuild test", func() { @@ -80,7 +86,7 @@ var _ = Describe("Mayastor rebuild test", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) diff --git a/test/e2e/replica/replica_test.go b/test/e2e/replica/replica_test.go index 43d41a6a3..b5af4703c 100644 --- a/test/e2e/replica/replica_test.go +++ b/test/e2e/replica/replica_test.go @@ -1,11 +1,15 @@ package replica_test import ( - "e2e-basic/common" + "os" "testing" + "e2e-basic/common" + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) @@ -59,7 +63,11 @@ func addUnpublishedReplicaTest() { func TestReplica(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Replica Test Suite") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/replica-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Replica Test Suite", + []Reporter{junitReporter}) + } var _ = Describe("Mayastor replica tests", func() { @@ -69,7 +77,7 @@ var _ = Describe("Mayastor replica tests", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) common.SetupTestEnv() close(done) }, 60) diff --git a/test/e2e/uninstall/uninstall_test.go b/test/e2e/uninstall/uninstall_test.go index b93bce924..fd2ae6977 100644 --- a/test/e2e/uninstall/uninstall_test.go +++ b/test/e2e/uninstall/uninstall_test.go @@ -1,33 +1,21 @@ package basic_test import ( - "context" - "fmt" + "e2e-basic/common" + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + "os" "os/exec" "path" "runtime" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/deprecated/scheme" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment - // Encapsulate the logic to find where the deploy yamls are func getDeployYamlDir() string { _, filename, _, _ := runtime.Caller(0) @@ -42,48 +30,73 @@ func deleteDeployYaml(filename string) { Expect(err).ToNot(HaveOccurred()) } -// Encapsulate the logic to find where the templated yamls are -func getTemplateYamlDir() string { - _, filename, _, _ := runtime.Caller(0) - return path.Clean(filename + "/../../install/deploy") -} - -func makeImageName(registryAddress string, registryport string, imagename string, imageversion string) string { - return registryAddress + ":" + registryport + "/mayadata/" + imagename + ":" + imageversion -} - -// We expect this to fail a few times before it succeeds, -// so no throwing errors from here. -func mayastorReadyPodCount() int { - var mayastorDaemonSet appsv1.DaemonSet - if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "mayastor", Namespace: "mayastor"}, &mayastorDaemonSet) != nil { - return -1 - } - return int(mayastorDaemonSet.Status.CurrentNumberScheduled) +// Helper for deleting mayastor CRDs +func deleteCRD(crdName string) { + cmd := exec.Command("kubectl", "delete", "crd", crdName) + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) } // Teardown mayastor on the cluster under test. // We deliberately call out to kubectl, rather than constructing the client-go // objects, so that we can verfiy the local deploy yamls are correct. func teardownMayastor() { - deleteDeployYaml("mayastor-daemonset.yaml") - deleteDeployYaml("moac-deployment.yaml") - deleteDeployYaml("csi-daemonset.yaml") - deleteDeployYaml("nats-deployment.yaml") + // The correct sequence for a reusable cluster is + // Delete all pods in the default namespace + // Delete all pvcs + // Then uninstall mayastor + podsDeleted, podCount := common.DeleteAllPods() + pvcsDeleted, pvcsFound := common.DeleteAllVolumeResources() + + logf.Log.Info("Cleanup done, Uninstalling mayastor") + // Deletes can stall indefinitely, try to mitigate this + // by running the deletes in different threads + go deleteDeployYaml("csi-daemonset.yaml") + time.Sleep(10 * time.Second) + go deleteDeployYaml("mayastor-daemonset.yaml") + time.Sleep(5 * time.Second) + go deleteDeployYaml("moac-deployment.yaml") + time.Sleep(5 * time.Second) + go deleteDeployYaml("nats-deployment.yaml") + time.Sleep(5 * time.Second) + + { + iters := 18 + logf.Log.Info("Waiting for Mayastor pods to be deleted", "timeout seconds", iters*10) + numMayastorPods := common.MayastorUndeletedPodCount() + for attempts := 0; attempts < iters && numMayastorPods != 0; attempts++ { + time.Sleep(10 * time.Second) + numMayastorPods = common.MayastorUndeletedPodCount() + logf.Log.Info("", "numMayastorPods", numMayastorPods) + } + } + + // The focus is on trying to make the cluster reusable, so we try to delete everything. + // TODO: When we start using a cluster for a single test run move these set of deletes to after all checks. deleteDeployYaml("mayastorpoolcrd.yaml") deleteDeployYaml("moac-rbac.yaml") deleteDeployYaml("storage-class.yaml") + deleteCRD("mayastornodes.openebs.io") + deleteCRD("mayastorvolumes.openebs.io") + // Attempt to forcefully delete pods + // TODO replace this function call when a single cluster is used for a single test run, with a check. + forceDeleted := common.ForceDeleteMayastorPods() deleteDeployYaml("namespace.yaml") + Expect(forceDeleted).To(BeFalse()) - Eventually(mayastorReadyPodCount, - "120s", // timeout - "1s", // polling interval - ).Should(Equal(-1)) + Expect(podsDeleted).To(BeTrue()) + Expect(podCount).To(BeZero()) + Expect(pvcsFound).To(BeFalse()) + Expect(pvcsDeleted).To(BeTrue()) + Expect(common.MayastorUndeletedPodCount()).To(Equal(0)) } func TestTeardownSuite(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Basic Teardown Suite") + reportDir := os.Getenv("e2e_reports_dir") + junitReporter := reporters.NewJUnitReporter(reportDir + "/uninstall-junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Basic Teardown Suite", + []Reporter{junitReporter}) } var _ = Describe("Mayastor setup", func() { @@ -93,38 +106,8 @@ var _ = Describe("Mayastor setup", func() { }) var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - By("bootstrapping test environment") - useCluster := true - testEnv = &envtest.Environment{ - UseExistingCluster: &useCluster, - AttachControlPlaneOutput: true, - } - - var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) - }() - - mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer mgrSyncCtxCancel() - if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { - fmt.Println("Failed to sync") - } - - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) + common.SetupTestEnv() close(done) }, 60) @@ -133,6 +116,5 @@ var _ = AfterSuite(func() { // NB This only tears down the local structures for talking to the cluster, // not the kubernetes cluster itself. By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) + common.TeardownTestEnv() }) From f9eb30531a3005f047689730eee06e703ea4b5f2 Mon Sep 17 00:00:00 2001 From: Jan Kryl Date: Mon, 18 Jan 2021 18:34:36 +0000 Subject: [PATCH 84/85] fix(moac): distribute nexuses more evenly We still keep the rule that one replica is always local to the nexus, but when deciding which replica is chosen as local we try to choose the node with smallest number of nexuses. --- csi/moac/test/volumes_test.js | 137 ++++++++++++++++++++++++++++++++++ csi/moac/volume.ts | 13 +++- 2 files changed, 147 insertions(+), 3 deletions(-) diff --git a/csi/moac/test/volumes_test.js b/csi/moac/test/volumes_test.js index 98aab5a4c..f126d285f 100644 --- a/csi/moac/test/volumes_test.js +++ b/csi/moac/test/volumes_test.js @@ -22,6 +22,7 @@ const enums = require('./grpc_enums'); const sleep = require('sleep-promise'); const UUID = 'ba5e39e9-0c0e-4973-8a3a-0dccada09cbb'; +const UUID2 = 'aa5e39e9-0c0e-4973-8a3a-0dccada09cbc'; const EYE_BLINK_MS = 30; module.exports = function () { @@ -379,6 +380,142 @@ module.exports = function () { expect(volume.nexus).to.equal(nexus); expect(volEvents).to.have.lengthOf(6); }); + + it('should distribute nexuses evenly over available nodes', async () => { + const replica1 = new Replica({ + uuid: UUID, + size: 95, + share: 'REPLICA_NONE', + uri: `bdev:///${UUID}` + }); + const replica2 = new Replica({ + uuid: UUID, + size: 95, + share: 'REPLICA_NONE', + uri: `bdev:///${UUID}` + }); + const replica3 = new Replica({ + uuid: UUID, + size: 95, + share: 'REPLICA_NONE', + uri: `bdev:///${UUID}` + }); + const replica4 = new Replica({ + uuid: UUID2, + size: 95, + share: 'REPLICA_NONE', + uri: `bdev:///${UUID2}` + }); + const replica5 = new Replica({ + uuid: UUID2, + size: 95, + share: 'REPLICA_NONE', + uri: `bdev:///${UUID2}` + }); + const replica6 = new Replica({ + uuid: UUID2, + size: 95, + share: 'REPLICA_NONE', + uri: `bdev:///${UUID2}` + }); + replica1.pool = pool1; + replica2.pool = pool2; + replica3.pool = pool3; + replica4.pool = pool1; + replica5.pool = pool2; + replica6.pool = pool3; + + // Fake the volume + volume = new Volume(UUID, registry, () => {}, { + replicaCount: 3, + preferredNodes: [], + requiredNodes: [], + requiredBytes: 90, + limitBytes: 110, + protocol: 'nvmf' + }); + volume.newReplica(replica1); + volume.newReplica(replica2); + volume.newReplica(replica3); + + const volume2 = new Volume(UUID2, registry, () => {}, { + replicaCount: 3, + preferredNodes: [], + requiredNodes: [], + requiredBytes: 90, + limitBytes: 110, + protocol: 'nvmf' + }); + volume2.newReplica(replica4); + volume2.newReplica(replica5); + volume2.newReplica(replica6); + volumes.volumes[UUID] = volume; + volumes.volumes[UUID2] = volume2; + volume.state = 'healthy'; + volume2.state = 'healthy'; + + volumes.start(); + + // set share pcols for replicas of the first volume + stub2.onCall(0).resolves({ uri: `nvmf://${UUID}` }); + stub3.onCall(0).resolves({ uri: `nvmf://${UUID}` }); + // create first nexus reply + stub1.onCall(0).resolves({ + uuid: UUID, + deviceUri: '', + size: 95, + state: 'NEXUS_ONLINE', + children: [{ + uri: `bdev:///${UUID}`, + state: 'CHILD_ONLINE' + }, { + uri: `nvmf://${UUID}`, + state: 'CHILD_ONLINE' + }, { + uri: `nvmf://${UUID}`, + state: 'CHILD_ONLINE' + }] + }); + // nexus publish reply + stub1.onCall(1).resolves({ + deviceUri: `nvmf://${UUID}` + }); + + // publish the first volume + let uri = await volume.publish('nvmf'); + expect(uri).to.equal(`nvmf://${UUID}`); + expect(volume.publishedOn).to.equal('node1'); + + // set share pcols for replicas of the first volume + stub1.onCall(2).resolves({ uri: `nvmf://${UUID2}` }); + stub3.onCall(1).resolves({ uri: `nvmf://${UUID2}` }); + // create second nexus reply + stub2.onCall(1).resolves({ + uuid: UUID2, + deviceUri: '', + size: 95, + state: 'NEXUS_ONLINE', + children: [{ + uri: `bdev:///${UUID2}`, + state: 'CHILD_ONLINE' + }, { + uri: `nvmf://${UUID2}`, + state: 'CHILD_ONLINE' + }, { + uri: `nvmf://${UUID2}`, + state: 'CHILD_ONLINE' + }] + }); + // nexus publish reply + stub2.onCall(2).resolves({ + deviceUri: `nvmf://${UUID2}` + }); + + // publish the second volume - should be on a different node + uri = await volume2.publish('nvmf'); + expect(uri).to.equal(`nvmf://${UUID2}`); + expect(volume2.publishedOn).to.equal('node2'); + }); }); describe('import volume', function () { diff --git a/csi/moac/volume.ts b/csi/moac/volume.ts index 7ad4abbca..02128d964 100644 --- a/csi/moac/volume.ts +++ b/csi/moac/volume.ts @@ -646,9 +646,16 @@ export class Volume { } replicaSet.splice(this.replicaCount); - // If nexus does not exist it will be created on the same node as the most - // preferred replica. - const nexusNode = this.nexus ? this.nexus.node : replicaSet[0].pool!.node; + // If nexus does not exist it will be created on one of the replica nodes + // with the least # of nexuses. + let nexusNode; + if (this.nexus) { + nexusNode = this.nexus.node; + } else { + nexusNode = replicaSet + .map((r: Replica) => r.pool!.node) + .sort((a: Node, b: Node) => a.nexus.length - b.nexus.length)[0]; + } for (let i = 0; i < replicaSet.length; i++) { const replica = replicaSet[i]; From 9a1f07ee0b351a73c226c2f263099e1674e003f3 Mon Sep 17 00:00:00 2001 From: GlennBullingham Date: Mon, 25 Jan 2021 18:28:55 +0000 Subject: [PATCH 85/85] chore(deployment): Update k8s deployment config files Update tags of mayastor pod images to reflect sematic version 0.7.0. Change memory resource request/limit specification for the mayastor engine daemonset from 500Mi to 512Mi because base-2 is a thing. --- deploy/csi-daemonset.yaml | 2 +- deploy/mayastor-daemonset-config.yaml | 6 +++--- deploy/mayastor-daemonset.yaml | 6 +++--- deploy/moac-deployment.yaml | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deploy/csi-daemonset.yaml b/deploy/csi-daemonset.yaml index f7d11ebcd..1db3104bf 100644 --- a/deploy/csi-daemonset.yaml +++ b/deploy/csi-daemonset.yaml @@ -30,7 +30,7 @@ spec: # the same. containers: - name: mayastor-csi - image: mayadata/mayastor-csi:latest + image: mayadata/mayastor-csi:v0.7.0 imagePullPolicy: Always # we need privileged because we mount filesystems and use mknod securityContext: diff --git a/deploy/mayastor-daemonset-config.yaml b/deploy/mayastor-daemonset-config.yaml index 578a685c4..8c8742ac9 100644 --- a/deploy/mayastor-daemonset-config.yaml +++ b/deploy/mayastor-daemonset-config.yaml @@ -35,7 +35,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:latest + image: mayadata/mayastor:v0.7.0 imagePullPolicy: Always env: - name: MY_NODE_NAME @@ -70,11 +70,11 @@ spec: resources: limits: cpu: "1" - memory: "500Mi" + memory: "512Mi" hugepages-2Mi: "1Gi" requests: cpu: "1" - memory: "500Mi" + memory: "512Mi" hugepages-2Mi: "1Gi" ports: - containerPort: 10124 diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index dc4f54484..d92dee673 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -33,7 +33,7 @@ spec: command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor - image: mayadata/mayastor:latest + image: mayadata/mayastor:v0.7.0 imagePullPolicy: Always env: - name: MY_NODE_NAME @@ -81,11 +81,11 @@ spec: # pressure unless they exceed those limits. limits and requests must be the same. limits: cpu: "2" - memory: "500Mi" + memory: "512Mi" hugepages-2Mi: "1Gi" requests: cpu: "2" - memory: "500Mi" + memory: "512Mi" hugepages-2Mi: "1Gi" ports: - containerPort: 10124 diff --git a/deploy/moac-deployment.yaml b/deploy/moac-deployment.yaml index 22e24a1bb..ca215367e 100644 --- a/deploy/moac-deployment.yaml +++ b/deploy/moac-deployment.yaml @@ -59,7 +59,7 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ - name: moac - image: mayadata/moac:latest + image: mayadata/moac:v0.7.0 imagePullPolicy: Always args: - "--csi-address=$(CSI_ENDPOINT)"