Skip to content

Commit

Permalink
Merge pull request kubernetes#77627 from draveness/feature/refactor-e…
Browse files Browse the repository at this point in the history
…xpect-no-error

refactor: use framework.ExpectNoError instead
  • Loading branch information
k8s-ci-robot authored May 14, 2019
2 parents 2926505 + 950f6e8 commit cf8e8e4
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 19 deletions.
6 changes: 3 additions & 3 deletions test/e2e/common/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -814,19 +814,19 @@ var _ = framework.KubeDescribe("Pods", func() {

ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Sleep for 10 seconds.
time.Sleep(maxReadyStatusUpdateTolerance)
gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")

ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
validatePodReadiness(true)

ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
validatePodReadiness(false)

})
Expand Down
7 changes: 3 additions & 4 deletions test/e2e/upgrades/apps/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/test/e2e/upgrades"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

// JobUpgradeTest is a test harness for batch Jobs.
Expand All @@ -44,19 +43,19 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.job = jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := jobutil.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Ensuring active pods == parallelism")
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

// Test verifies that the Jobs Pods are running after the an upgrade
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
ginkgo.By("Ensuring active pods == parallelism")
err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

// Teardown cleans up any remaining resources.
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/upgrades/apps/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package upgrades

import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"

apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
Expand Down Expand Up @@ -69,12 +68,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {

ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set)
Expand Down
1 change: 0 additions & 1 deletion test/e2e/upgrades/storage/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ go_library(
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

Expand Down
3 changes: 1 addition & 2 deletions test/e2e/upgrades/storage/persistent_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework/volume"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/upgrades"
)

Expand Down Expand Up @@ -69,7 +68,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {

ginkgo.By("Creating the PV and PVC")
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc))

ginkgo.By("Consuming the PV before upgrade")
Expand Down
11 changes: 5 additions & 6 deletions test/e2e/upgrades/storage/volume_mode.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/upgrades"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const devicePath = "/mnt/volume1"
Expand Down Expand Up @@ -82,20 +81,20 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
}
t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = framework.CreatePVC(cs, ns, t.pvc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Checking if PV exists as expected volume mode")
utils.CheckVolumeModeOfPath(t.pod, block, devicePath)
Expand Down

0 comments on commit cf8e8e4

Please sign in to comment.