Skip to content

Commit

Permalink
Merge pull request kubernetes#77715 from danielqsj/t2
Browse files Browse the repository at this point in the history
fix golint error make test error checking more readable in test/e2e/node
  • Loading branch information
k8s-ci-robot authored May 14, 2019
2 parents 91ba27e + 124efde commit a60d212
Show file tree
Hide file tree
Showing 15 changed files with 209 additions and 209 deletions.
1 change: 0 additions & 1 deletion hack/.golint_failures
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,6 @@ test/e2e/chaosmonkey
test/e2e/common
test/e2e/framework
test/e2e/lifecycle/bootstrap
test/e2e/node
test/e2e/scalability
test/e2e/scheduling
test/e2e/storage/drivers
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/node/apparmor.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,29 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"

. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)

var _ = SIGDescribe("AppArmor", func() {
f := framework.NewDefaultFramework("apparmor")

Context("load AppArmor profiles", func() {
BeforeEach(func() {
ginkgo.Context("load AppArmor profiles", func() {
ginkgo.BeforeEach(func() {
common.SkipIfAppArmorNotSupported()
common.LoadAppArmorProfiles(f)
})
AfterEach(func() {
if !CurrentGinkgoTestDescription().Failed {
ginkgo.AfterEach(func() {
if !ginkgo.CurrentGinkgoTestDescription().Failed {
return
}
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
})

It("should enforce an AppArmor profile", func() {
ginkgo.It("should enforce an AppArmor profile", func() {
common.CreateAppArmorTestPod(f, false, true)
})

It("can disable an AppArmor profile, using unconfined", func() {
ginkgo.It("can disable an AppArmor profile, using unconfined", func() {
common.CreateAppArmorTestPod(f, true, true)
})
})
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/node/crictl.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,22 +24,22 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"

. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)

var _ = SIGDescribe("crictl", func() {
f := framework.NewDefaultFramework("crictl")

BeforeEach(func() {
ginkgo.BeforeEach(func() {
// `crictl` is not available on all cloud providers.
framework.SkipUnlessProviderIs("gce", "gke")
// The test requires $HOME/.ssh/id_rsa key to be present.
framework.SkipUnlessSSHKeyPresent()
})

It("should be able to run crictl on the node", func() {
ginkgo.It("should be able to run crictl on the node", func() {
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil {
framework.Failf("Error getting node hostnames: %v", err)
Expand All @@ -55,7 +55,7 @@ var _ = SIGDescribe("crictl", func() {
for _, testCase := range testCases {
// Choose an arbitrary node to test.
host := hosts[0]
By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
ginkgo.By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))

result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
Expand Down
28 changes: 14 additions & 14 deletions test/e2e/node/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

var _ = SIGDescribe("Events", func() {
Expand All @@ -45,7 +45,7 @@ var _ = SIGDescribe("Events", func() {

podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)

By("creating the pod")
ginkgo.By("creating the pod")
name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
Expand All @@ -67,9 +67,9 @@ var _ = SIGDescribe("Events", func() {
},
}

By("submitting the pod to kubernetes")
ginkgo.By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
ginkgo.By("deleting the pod")
podClient.Delete(pod.Name, nil)
}()
if _, err := podClient.Create(pod); err != nil {
Expand All @@ -78,25 +78,25 @@ var _ = SIGDescribe("Events", func() {

framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

By("verifying the pod is in kubernetes")
ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1))
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))

By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{})
ginkgo.By("retrieving the pod")
podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod: %v", err)
}
e2elog.Logf("%+v\n", podWithUid)
e2elog.Logf("%+v\n", podWithUID)
var events *v1.EventList
// Check for scheduler event about the pod.
By("checking for scheduler event about the pod")
ginkgo.By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.uid": string(podWithUID.UID),
"involvedObject.namespace": f.Namespace.Name,
"source": v1.DefaultSchedulerName,
}.AsSelector().String()
Expand All @@ -112,10 +112,10 @@ var _ = SIGDescribe("Events", func() {
return false, nil
}))
// Check for kubelet event about the pod.
By("checking for kubelet event about the pod")
ginkgo.By("checking for kubelet event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.uid": string(podWithUID.UID),
"involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name,
"source": "kubelet",
Expand Down
1 change: 1 addition & 0 deletions test/e2e/node/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package node

import "k8s.io/kubernetes/test/e2e/framework"

// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return framework.KubeDescribe("[sig-node] "+text, body)
}
68 changes: 34 additions & 34 deletions test/e2e/node/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
Expand Down Expand Up @@ -119,7 +119,7 @@ func stopNfsServer(serverPod *v1.Pod) {
// will execute the passed in shell cmd. Waits for the pod to start.
// Note: the nfs plugin is defined inline, no PV or PVC.
func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
By("create pod using nfs volume")
ginkgo.By("create pod using nfs volume")

isPrivileged := true
cmdLine := []string{"-c", cmd}
Expand Down Expand Up @@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
},
}
rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

err = f.WaitForPodReady(rtnPod.Name) // running & ready
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return rtnPod
}

Expand All @@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
// use ip rather than hostname in GCE
nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

condMsg := "deleted"
if !expectClean {
Expand All @@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.Poll(poll, timeout, func() (bool, error) {
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2essh.LogResult(result)
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
if expectClean && ok { // keep trying
Expand All @@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
}
return true, nil // done, host is as expected
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
}

if expectClean {
Expand All @@ -244,7 +244,7 @@ var _ = SIGDescribe("kubelet", func() {
)
f := framework.NewDefaultFramework("kubelet")

BeforeEach(func() {
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
Expand All @@ -265,14 +265,14 @@ var _ = SIGDescribe("kubelet", func() {
{podsPerNode: 10, timeout: 1 * time.Minute},
}

BeforeEach(func() {
ginkgo.BeforeEach(func() {
// Use node labels to restrict the pods to be assigned only to the
// nodes we observe initially.
nodeLabels = make(map[string]string)
nodeLabels["kubelet_cleanup"] = "true"
nodes := framework.GetReadySchedulableNodesOrDie(c)
numNodes = len(nodes.Items)
Expect(numNodes).NotTo(BeZero())
gomega.Expect(numNodes).NotTo(gomega.BeZero())
nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them
// (if there are 1000 nodes in the cluster, starting 10 pods/node
Expand All @@ -297,7 +297,7 @@ var _ = SIGDescribe("kubelet", func() {
}
})

AfterEach(func() {
ginkgo.AfterEach(func() {
if resourceMonitor != nil {
resourceMonitor.Stop()
}
Expand All @@ -312,30 +312,30 @@ var _ = SIGDescribe("kubelet", func() {
for _, itArg := range deleteTests {
name := fmt.Sprintf(
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
It(name, func() {
ginkgo.It(name, func() {
totalPods := itArg.podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))

Expect(framework.RunRC(testutils.RCConfig{
gomega.Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: rcName,
Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(),
Replicas: totalPods,
NodeSelector: nodeLabels,
})).NotTo(HaveOccurred())
})).NotTo(gomega.HaveOccurred())
// Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status.
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
time.Second*30)).NotTo(HaveOccurred())
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
time.Second*30)).NotTo(gomega.HaveOccurred())
if resourceMonitor != nil {
resourceMonitor.LogLatest()
}

By("Deleting the RC")
ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its
Expand All @@ -345,8 +345,8 @@ var _ = SIGDescribe("kubelet", func() {
// - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness)
start := time.Now()
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
itArg.timeout)).NotTo(HaveOccurred())
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
itArg.timeout)).NotTo(gomega.HaveOccurred())
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start))
if resourceMonitor != nil {
Expand All @@ -369,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() {
// If the nfs-server pod is deleted the client pod's mount can not be unmounted.
// If the nfs-server pod is deleted and re-created, due to having a different ip
// addr, the client pod's mount still cannot be unmounted.
Context("Host cleanup after disrupting NFS volume [NFS]", func() {
ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() {
// issue #31272
var (
nfsServerPod *v1.Pod
Expand All @@ -389,38 +389,38 @@ var _ = SIGDescribe("kubelet", func() {
},
}

BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
})

AfterEach(func() {
ginkgo.AfterEach(func() {
err := framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
err = framework.DeletePodWithWait(f, c, nfsServerPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
})

// execute It blocks from above table of tests
for _, t := range testTbl {
It(t.itDescr, func() {
ginkgo.It(t.itDescr, func() {
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)

By("Stop the NFS server")
ginkgo.By("Stop the NFS server")
stopNfsServer(nfsServerPod)

By("Delete the pod mounted to the NFS volume -- expect failure")
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
err := framework.DeletePodWithWait(f, c, pod)
Expect(err).To(HaveOccurred())
framework.ExpectError(err)
// pod object is now stale, but is intentionally not nil

By("Check if pod's host has been cleaned up -- expect not")
ginkgo.By("Check if pod's host has been cleaned up -- expect not")
checkPodCleanup(c, pod, false)

By("Restart the nfs server")
ginkgo.By("Restart the nfs server")
restartNfsServer(nfsServerPod)

By("Verify that the deleted client pod is now cleaned up")
ginkgo.By("Verify that the deleted client pod is now cleaned up")
checkPodCleanup(c, pod, true)
})
}
Expand Down
Loading

0 comments on commit a60d212

Please sign in to comment.