Skip to content

Commit

Permalink
Merge pull request kubevirt#3899 from xpivarc/improve-expose-test
Browse files Browse the repository at this point in the history
Improve expose test
  • Loading branch information
kubevirt-bot authored Aug 16, 2020
2 parents 3a69a73 + 5b58673 commit c794ac0
Show file tree
Hide file tree
Showing 7 changed files with 225 additions and 203 deletions.
3 changes: 3 additions & 0 deletions tests/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"config.go",
"job.go",
"ping.go",
"test.go",
"utils.go",
Expand Down Expand Up @@ -37,6 +38,7 @@ go_library(
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
Expand Down Expand Up @@ -162,6 +164,7 @@ go_test(
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/networking/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
Expand Down
149 changes: 70 additions & 79 deletions tests/expose_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
batchv1 "k8s.io/api/batch/v1"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -37,14 +38,6 @@ func newLabeledVMI(label string, virtClient kubecli.KubevirtClient, createVMI bo
return
}

func waitForJobToCompleteWithStatus(virtClient *kubecli.KubevirtClient, jobPod *k8sv1.Pod, expectedResult k8sv1.PodPhase, timeoutSec time.Duration) {
EventuallyWithOffset(1, func() k8sv1.PodPhase {
pod, err := (*virtClient).CoreV1().Pods(jobPod.Namespace).Get(jobPod.Name, k8smetav1.GetOptions{})
ExpectWithOffset(1, err).ToNot(HaveOccurred())
return pod.Status.Phase
}, timeoutSec*time.Second, 1*time.Second).Should(Equal(expectedResult))
}

var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:component]Expose", func() {

var virtClient kubecli.KubevirtClient
Expand All @@ -56,6 +49,26 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
})
runHelloWorldJob := func(host, port, namespace string) *batchv1.Job {
job := tests.NewHelloWorldJob(host, port)
job, err := virtClient.BatchV1().Jobs(namespace).Create(job)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
return job
}

runHelloWorldJobUDP := func(host, port, namespace string) *batchv1.Job {
job := tests.NewHelloWorldJobUDP(host, port)
job, err := virtClient.BatchV1().Jobs(namespace).Create(job)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
return job
}

runHelloWorldJobHttp := func(host, port, namespace string) *batchv1.Job {
job := tests.NewHelloWorldJobHTTP(host, port)
job, err := virtClient.BatchV1().Jobs(namespace).Create(job)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
return job
}

Context("Expose service on a VM", func() {
var tcpVM *v1.VirtualMachineInstance
Expand All @@ -80,13 +93,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
serviceIP := svc.Spec.ClusterIP

By("Starting a pod which tries to reach the VMI via ClusterIP")
job := tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(tcpVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP")
job := runHelloWorldJob(serviceIP, servicePort, tcpVM.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 420)
})
})

Expand Down Expand Up @@ -170,13 +181,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(node.Status.Addresses).ToNot(BeEmpty())
nodeIP := node.Status.Addresses[0].Address

By("Starting a pod which tries to reach the VMI via NodePort")
job := tests.NewHelloWorldJob(nodeIP, strconv.Itoa(int(nodePort)))
job, err = virtClient.CoreV1().Pods(tcpVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via NodePort")
job := runHelloWorldJob(nodeIP, strconv.Itoa(int(nodePort)), tcpVM.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 420)
}
})
})
Expand Down Expand Up @@ -207,13 +216,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
serviceIP := svc.Spec.ClusterIP

By("Starting a pod which tries to reach the VMI via ClusterIP")
job := tests.NewHelloWorldJobUDP(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(udpVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP")
job := runHelloWorldJobUDP(serviceIP, servicePort, udpVM.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 420)
})
})

Expand All @@ -236,11 +243,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
nodePort := svc.Spec.Ports[0].NodePort
Expect(nodePort).To(BeNumerically(">", 0))

By("Starting a pod which tries to reach the VMI via ClusterIP")
job := tests.NewHelloWorldJobUDP(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(udpVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120)
By("Starting a job which tries to reach the VMI via ClusterIP")
job := runHelloWorldJobUDP(serviceIP, servicePort, udpVM.Namespace)

By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 120)

By("Getting the node IP from all nodes")
nodes, err := virtClient.CoreV1().Nodes().List(k8smetav1.ListOptions{})
Expand All @@ -250,13 +257,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(node.Status.Addresses).ToNot(BeEmpty())
nodeIP := node.Status.Addresses[0].Address

By("Starting a pod which tries to reach the VMI via NodePort")
job := tests.NewHelloWorldJobUDP(nodeIP, strconv.Itoa(int(nodePort)))
job, err = virtClient.CoreV1().Pods(udpVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via NodePort")
job := runHelloWorldJobUDP(nodeIP, strconv.Itoa(int(nodePort)), udpVM.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 420)
}
})
})
Expand Down Expand Up @@ -312,13 +317,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
serviceIP := svc.Spec.ClusterIP

By("Starting a pod which tries to reach the VMI via ClusterIP")
job := tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vmrs.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP")
job := runHelloWorldJob(serviceIP, servicePort, vmrs.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 420)
})
})
})
Expand Down Expand Up @@ -376,21 +379,17 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
serviceIP := svc.Spec.ClusterIP

By("Starting a pod which tries to reach the VMI via ClusterIP")
job := tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP")
job := runHelloWorldJob(serviceIP, servicePort, vm.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 120)

By("Starting a pod which tries to reach the VMI again via the same ClusterIP, this time over HTTP.")
job = tests.NewHelloWorldJobHttp(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI again via the same ClusterIP, this time over HTTP.")
job = runHelloWorldJobHttp(serviceIP, servicePort, vm.Namespace)

By("Waiting for the HTTP job pod to report a successful connection attempt.")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120)
By("Waiting for the HTTP job to report a successful connection attempt.")
tests.WaitForJobToSucceed(&virtClient, job, 120)
})

It("[test_id:345][label:masquerade_binding_connectivity]Should verify the exposed service is functional before and after VM restart.", func() {
Expand All @@ -401,13 +400,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
serviceIP := svc.Spec.ClusterIP

By("Starting a pod which tries to reach the VMI via ClusterIP.")
job := tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vmObj.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP.")
job := runHelloWorldJob(serviceIP, servicePort, vmObj.Namespace)

By("Waiting for the pod to report a successful connection attempt.")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120)
By("Waiting for the job to report a successful connection attempt.")
tests.WaitForJobToSucceed(&virtClient, job, 120)

// Retrieve the current VMI UID, to be compared with the new UID after restart.
var vmi *v1.VirtualMachineInstance
Expand Down Expand Up @@ -436,13 +433,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
tests.GenerateHelloWorldServer(vmi, testPort, "tcp")

By("Repeating the sequence as prior to restarting the VM: Connect to exposed ClusterIP service.")
By("Starting a pod which tries to reach the VMI via ClusterIP.")
job = tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vmObj.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP.")
job = runHelloWorldJob(serviceIP, servicePort, vmObj.Namespace)

By("Waiting for the pod to report a successful connection attempt.")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120)
By("Waiting for the job to report a successful connection attempt.")
tests.WaitForJobToSucceed(&virtClient, job, 120)
})

It("[test_id:343][label:masquerade_binding_connectivity]Should Verify an exposed service of a VM is not functional after VM deletion.", func() {
Expand All @@ -451,13 +446,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
serviceIP := svc.Spec.ClusterIP

By("Starting a pod which tries to reach the VMI via ClusterIP")
job := tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via ClusterIP")
job := runHelloWorldJob(serviceIP, servicePort, vm.Namespace)

By("Waiting for the pod to report a successful connection attempt")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120)
By("Waiting for the job to report a successful connection attempt")
tests.WaitForJobToSucceed(&virtClient, job, 120)

By("Comparing the service's endpoints IP address to the VM pod IP address.")
// Get the IP address of the VM pod.
Expand Down Expand Up @@ -489,13 +482,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:[email protected]][level:comp
Expect(err).ToNot(HaveOccurred())
Expect(svcEndpoints.Subsets).To(BeNil())

By("Starting a pod which tries to reach the VMI via the ClusterIP service.")
job = tests.NewHelloWorldJob(serviceIP, servicePort)
job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("Starting a job which tries to reach the VMI via the ClusterIP service.")
job = runHelloWorldJob(serviceIP, servicePort, vm.Namespace)

By("Waiting for the pod to report a failed connection attempt.")
waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodFailed, 120)
By("Waiting for the job to report a failed connection attempt.")
tests.WaitForJobToFail(&virtClient, job, 120)
})
})
})
Expand Down
90 changes: 90 additions & 0 deletions tests/job.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
package tests

import (
"fmt"
"strconv"
"time"

batchv1 "k8s.io/api/batch/v1"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"kubevirt.io/client-go/kubecli"

. "github.com/onsi/gomega"
)

func WaitForJobToSucceed(virtClient *kubecli.KubevirtClient, job *batchv1.Job, timeoutSec time.Duration) {
EventuallyWithOffset(1, func() bool {
job, err := (*virtClient).BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
ExpectWithOffset(1, err).ToNot(HaveOccurred())
Expect(job.Status.Failed).NotTo(Equal(*job.Spec.BackoffLimit), "Job was expected to succeed but failed")
return job.Status.Succeeded > 0
}, timeoutSec*time.Second, 1*time.Second).Should(BeTrue(), "Job should succeed")

}

func WaitForJobToFail(virtClient *kubecli.KubevirtClient, job *batchv1.Job, timeoutSec time.Duration) {
EventuallyWithOffset(1, func() int32 {
job, err := (*virtClient).BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
ExpectWithOffset(1, err).ToNot(HaveOccurred())
Expect(job.Status.Succeeded).ToNot(BeNumerically(">", 0), "Job should not succeed")
return job.Status.Failed
}, timeoutSec*time.Second, 1*time.Second).Should(BeNumerically(">", 0), "Job should fail")
}

func RenderJob(name string, cmd, args []string) *batchv1.Job {
pod := RenderPod(name, cmd, args)
job := batchv1.Job{
ObjectMeta: pod.ObjectMeta,
Spec: batchv1.JobSpec{
BackoffLimit: NewInt32(3),
TTLSecondsAfterFinished: NewInt32(60),
ActiveDeadlineSeconds: NewInt64(480),
Template: k8sv1.PodTemplateSpec{
ObjectMeta: pod.ObjectMeta,
Spec: pod.Spec,
},
},
}
return &job
}

// NewHelloWorldJob takes a DNS entry or an IP and a port which it will use to create a job
// which tries to contact the host on the provided port.
// It expects to receive "Hello World!" to succeed.
func NewHelloWorldJob(host string, port string) *batchv1.Job {
check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(nc %s %s -i 3 -w 3))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, host, port)}
job := RenderJob("netcat", []string{"/bin/bash", "-c"}, check)
return job
}

// NewHelloWorldJobUDP takes a DNS entry or an IP and a port which it will use create a pod
// which tries to contact the host on the provided port. It expects to receive "Hello World!" to succeed.
// Note that in case of UDP, the server will not see the connection unless something is sent over it
// However, netcat does not work well with UDP and closes before the answer arrives, for that another netcat call is needed,
// this time as a UDP listener
func NewHelloWorldJobUDP(host string, port string) *batchv1.Job {
localPort, err := strconv.Atoi(port)
if err != nil {
return nil
}
// local port is used to catch the reply - any number can be used
// we make it different than the port to be safe if both are running on the same machine
localPort--
check := []string{fmt.Sprintf(`set -x; trap "kill 0" EXIT; x="$(head -n 1 < <(echo | nc -up %d %s %s -i 3 -w 3 & nc -ul %d))"; echo "$x" ; if [ "$x" = "Hello UDP World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`,
localPort, host, port, localPort)}
job := RenderJob("netcat", []string{"/bin/bash", "-c"}, check)

return job
}

// NewHelloWorldJobHTTP gets an IP address and a port, which it uses to create a pod.
// This pod tries to contact the host on the provided port, over HTTP.
// On success - it expects to receive "Hello World!".
func NewHelloWorldJobHTTP(host string, port string) *batchv1.Job {
check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(curl %s:%s))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, FormatIPForURL(host), port)}
job := RenderJob("curl", []string{"/bin/bash", "-c"}, check)

return job
}
2 changes: 1 addition & 1 deletion tests/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ var _ = Describe("Storage", func() {
diskPath = filepath.Join(mountDir, "disk.img")
srcDir := filepath.Join(tmpDir, "src")
cmd := "mkdir -p " + mountDir + " && mkdir -p " + srcDir + " && chcon -t container_file_t " + srcDir + " && mount --bind " + srcDir + " " + mountDir + " && while true; do sleep 1; done"
pod = tests.RenderHostPathJob("host-path-preparator", tmpDir, k8sv1.HostPathDirectoryOrCreate, k8sv1.MountPropagationBidirectional, []string{"/usr/bin/bash", "-c"}, []string{cmd})
pod = tests.RenderHostPathPod("host-path-preparator", tmpDir, k8sv1.HostPathDirectoryOrCreate, k8sv1.MountPropagationBidirectional, []string{"/usr/bin/bash", "-c"}, []string{cmd})
pod.Spec.Containers[0].Lifecycle = &k8sv1.Lifecycle{
PreStop: &k8sv1.Handler{
Exec: &k8sv1.ExecAction{
Expand Down
Loading

0 comments on commit c794ac0

Please sign in to comment.