diff --git a/tests/BUILD.bazel b/tests/BUILD.bazel index 1b606ee4bea6..909c4128faeb 100644 --- a/tests/BUILD.bazel +++ b/tests/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "config.go", + "job.go", "ping.go", "test.go", "utils.go", @@ -37,6 +38,7 @@ go_library( "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/golang.org/x/crypto/ssh:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", @@ -162,6 +164,7 @@ go_test( "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/authorization/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", diff --git a/tests/expose_test.go b/tests/expose_test.go index 07c9a3525075..9dae2e3c67b6 100644 --- a/tests/expose_test.go +++ b/tests/expose_test.go @@ -6,6 +6,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" k8sv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,14 +38,6 @@ func newLabeledVMI(label string, virtClient kubecli.KubevirtClient, createVMI bo return } -func waitForJobToCompleteWithStatus(virtClient *kubecli.KubevirtClient, jobPod *k8sv1.Pod, expectedResult k8sv1.PodPhase, timeoutSec time.Duration) { - EventuallyWithOffset(1, func() k8sv1.PodPhase { - pod, err := (*virtClient).CoreV1().Pods(jobPod.Namespace).Get(jobPod.Name, k8smetav1.GetOptions{}) - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - return pod.Status.Phase - }, timeoutSec*time.Second, 1*time.Second).Should(Equal(expectedResult)) -} - var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:component]Expose", func() { var virtClient kubecli.KubevirtClient @@ -56,6 +49,26 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp virtClient, err = kubecli.GetKubevirtClient() tests.PanicOnError(err) }) + runHelloWorldJob := func(host, port, namespace string) *batchv1.Job { + job := tests.NewHelloWorldJob(host, port) + job, err := virtClient.BatchV1().Jobs(namespace).Create(job) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + return job + } + + runHelloWorldJobUDP := func(host, port, namespace string) *batchv1.Job { + job := tests.NewHelloWorldJobUDP(host, port) + job, err := virtClient.BatchV1().Jobs(namespace).Create(job) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + return job + } + + runHelloWorldJobHttp := func(host, port, namespace string) *batchv1.Job { + job := tests.NewHelloWorldJobHTTP(host, port) + job, err := virtClient.BatchV1().Jobs(namespace).Create(job) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + return job + } Context("Expose service on a VM", func() { var tcpVM *v1.VirtualMachineInstance @@ -80,13 +93,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) serviceIP := svc.Spec.ClusterIP - By("Starting a pod which tries to reach the VMI via ClusterIP") - job := tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(tcpVM.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP") + job := runHelloWorldJob(serviceIP, servicePort, tcpVM.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 420) }) }) @@ -170,13 +181,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(node.Status.Addresses).ToNot(BeEmpty()) nodeIP := node.Status.Addresses[0].Address - By("Starting a pod which tries to reach the VMI via NodePort") - job := tests.NewHelloWorldJob(nodeIP, strconv.Itoa(int(nodePort))) - job, err = virtClient.CoreV1().Pods(tcpVM.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via NodePort") + job := runHelloWorldJob(nodeIP, strconv.Itoa(int(nodePort)), tcpVM.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 420) } }) }) @@ -207,13 +216,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) serviceIP := svc.Spec.ClusterIP - By("Starting a pod which tries to reach the VMI via ClusterIP") - job := tests.NewHelloWorldJobUDP(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(udpVM.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP") + job := runHelloWorldJobUDP(serviceIP, servicePort, udpVM.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 420) }) }) @@ -236,11 +243,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp nodePort := svc.Spec.Ports[0].NodePort Expect(nodePort).To(BeNumerically(">", 0)) - By("Starting a pod which tries to reach the VMI via ClusterIP") - job := tests.NewHelloWorldJobUDP(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(udpVM.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120) + By("Starting a job which tries to reach the VMI via ClusterIP") + job := runHelloWorldJobUDP(serviceIP, servicePort, udpVM.Namespace) + + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 120) By("Getting the node IP from all nodes") nodes, err := virtClient.CoreV1().Nodes().List(k8smetav1.ListOptions{}) @@ -250,13 +257,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(node.Status.Addresses).ToNot(BeEmpty()) nodeIP := node.Status.Addresses[0].Address - By("Starting a pod which tries to reach the VMI via NodePort") - job := tests.NewHelloWorldJobUDP(nodeIP, strconv.Itoa(int(nodePort))) - job, err = virtClient.CoreV1().Pods(udpVM.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via NodePort") + job := runHelloWorldJobUDP(nodeIP, strconv.Itoa(int(nodePort)), udpVM.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 420) } }) }) @@ -312,13 +317,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) serviceIP := svc.Spec.ClusterIP - By("Starting a pod which tries to reach the VMI via ClusterIP") - job := tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vmrs.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP") + job := runHelloWorldJob(serviceIP, servicePort, vmrs.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 420) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 420) }) }) }) @@ -376,21 +379,17 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) serviceIP := svc.Spec.ClusterIP - By("Starting a pod which tries to reach the VMI via ClusterIP") - job := tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP") + job := runHelloWorldJob(serviceIP, servicePort, vm.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 120) - By("Starting a pod which tries to reach the VMI again via the same ClusterIP, this time over HTTP.") - job = tests.NewHelloWorldJobHttp(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI again via the same ClusterIP, this time over HTTP.") + job = runHelloWorldJobHttp(serviceIP, servicePort, vm.Namespace) - By("Waiting for the HTTP job pod to report a successful connection attempt.") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120) + By("Waiting for the HTTP job to report a successful connection attempt.") + tests.WaitForJobToSucceed(&virtClient, job, 120) }) It("[test_id:345][label:masquerade_binding_connectivity]Should verify the exposed service is functional before and after VM restart.", func() { @@ -401,13 +400,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) serviceIP := svc.Spec.ClusterIP - By("Starting a pod which tries to reach the VMI via ClusterIP.") - job := tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vmObj.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP.") + job := runHelloWorldJob(serviceIP, servicePort, vmObj.Namespace) - By("Waiting for the pod to report a successful connection attempt.") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120) + By("Waiting for the job to report a successful connection attempt.") + tests.WaitForJobToSucceed(&virtClient, job, 120) // Retrieve the current VMI UID, to be compared with the new UID after restart. var vmi *v1.VirtualMachineInstance @@ -436,13 +433,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp tests.GenerateHelloWorldServer(vmi, testPort, "tcp") By("Repeating the sequence as prior to restarting the VM: Connect to exposed ClusterIP service.") - By("Starting a pod which tries to reach the VMI via ClusterIP.") - job = tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vmObj.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP.") + job = runHelloWorldJob(serviceIP, servicePort, vmObj.Namespace) - By("Waiting for the pod to report a successful connection attempt.") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120) + By("Waiting for the job to report a successful connection attempt.") + tests.WaitForJobToSucceed(&virtClient, job, 120) }) It("[test_id:343][label:masquerade_binding_connectivity]Should Verify an exposed service of a VM is not functional after VM deletion.", func() { @@ -451,13 +446,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) serviceIP := svc.Spec.ClusterIP - By("Starting a pod which tries to reach the VMI via ClusterIP") - job := tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via ClusterIP") + job := runHelloWorldJob(serviceIP, servicePort, vm.Namespace) - By("Waiting for the pod to report a successful connection attempt") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodSucceeded, 120) + By("Waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 120) By("Comparing the service's endpoints IP address to the VM pod IP address.") // Get the IP address of the VM pod. @@ -489,13 +482,11 @@ var _ = Describe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(err).ToNot(HaveOccurred()) Expect(svcEndpoints.Subsets).To(BeNil()) - By("Starting a pod which tries to reach the VMI via the ClusterIP service.") - job = tests.NewHelloWorldJob(serviceIP, servicePort) - job, err = virtClient.CoreV1().Pods(vm.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("Starting a job which tries to reach the VMI via the ClusterIP service.") + job = runHelloWorldJob(serviceIP, servicePort, vm.Namespace) - By("Waiting for the pod to report a failed connection attempt.") - waitForJobToCompleteWithStatus(&virtClient, job, k8sv1.PodFailed, 120) + By("Waiting for the job to report a failed connection attempt.") + tests.WaitForJobToFail(&virtClient, job, 120) }) }) }) diff --git a/tests/job.go b/tests/job.go new file mode 100644 index 000000000000..635d1ae7d933 --- /dev/null +++ b/tests/job.go @@ -0,0 +1,90 @@ +package tests + +import ( + "fmt" + "strconv" + "time" + + batchv1 "k8s.io/api/batch/v1" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "kubevirt.io/client-go/kubecli" + + . "github.com/onsi/gomega" +) + +func WaitForJobToSucceed(virtClient *kubecli.KubevirtClient, job *batchv1.Job, timeoutSec time.Duration) { + EventuallyWithOffset(1, func() bool { + job, err := (*virtClient).BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + Expect(job.Status.Failed).NotTo(Equal(*job.Spec.BackoffLimit), "Job was expected to succeed but failed") + return job.Status.Succeeded > 0 + }, timeoutSec*time.Second, 1*time.Second).Should(BeTrue(), "Job should succeed") + +} + +func WaitForJobToFail(virtClient *kubecli.KubevirtClient, job *batchv1.Job, timeoutSec time.Duration) { + EventuallyWithOffset(1, func() int32 { + job, err := (*virtClient).BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + Expect(job.Status.Succeeded).ToNot(BeNumerically(">", 0), "Job should not succeed") + return job.Status.Failed + }, timeoutSec*time.Second, 1*time.Second).Should(BeNumerically(">", 0), "Job should fail") +} + +func RenderJob(name string, cmd, args []string) *batchv1.Job { + pod := RenderPod(name, cmd, args) + job := batchv1.Job{ + ObjectMeta: pod.ObjectMeta, + Spec: batchv1.JobSpec{ + BackoffLimit: NewInt32(3), + TTLSecondsAfterFinished: NewInt32(60), + ActiveDeadlineSeconds: NewInt64(480), + Template: k8sv1.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + }, + }, + } + return &job +} + +// NewHelloWorldJob takes a DNS entry or an IP and a port which it will use to create a job +// which tries to contact the host on the provided port. +// It expects to receive "Hello World!" to succeed. +func NewHelloWorldJob(host string, port string) *batchv1.Job { + check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(nc %s %s -i 3 -w 3))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, host, port)} + job := RenderJob("netcat", []string{"/bin/bash", "-c"}, check) + return job +} + +// NewHelloWorldJobUDP takes a DNS entry or an IP and a port which it will use create a pod +// which tries to contact the host on the provided port. It expects to receive "Hello World!" to succeed. +// Note that in case of UDP, the server will not see the connection unless something is sent over it +// However, netcat does not work well with UDP and closes before the answer arrives, for that another netcat call is needed, +// this time as a UDP listener +func NewHelloWorldJobUDP(host string, port string) *batchv1.Job { + localPort, err := strconv.Atoi(port) + if err != nil { + return nil + } + // local port is used to catch the reply - any number can be used + // we make it different than the port to be safe if both are running on the same machine + localPort-- + check := []string{fmt.Sprintf(`set -x; trap "kill 0" EXIT; x="$(head -n 1 < <(echo | nc -up %d %s %s -i 3 -w 3 & nc -ul %d))"; echo "$x" ; if [ "$x" = "Hello UDP World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, + localPort, host, port, localPort)} + job := RenderJob("netcat", []string{"/bin/bash", "-c"}, check) + + return job +} + +// NewHelloWorldJobHTTP gets an IP address and a port, which it uses to create a pod. +// This pod tries to contact the host on the provided port, over HTTP. +// On success - it expects to receive "Hello World!". +func NewHelloWorldJobHTTP(host string, port string) *batchv1.Job { + check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(curl %s:%s))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, FormatIPForURL(host), port)} + job := RenderJob("curl", []string{"/bin/bash", "-c"}, check) + + return job +} diff --git a/tests/storage_test.go b/tests/storage_test.go index 626697f7810e..00aec972f9d4 100644 --- a/tests/storage_test.go +++ b/tests/storage_test.go @@ -621,7 +621,7 @@ var _ = Describe("Storage", func() { diskPath = filepath.Join(mountDir, "disk.img") srcDir := filepath.Join(tmpDir, "src") cmd := "mkdir -p " + mountDir + " && mkdir -p " + srcDir + " && chcon -t container_file_t " + srcDir + " && mount --bind " + srcDir + " " + mountDir + " && while true; do sleep 1; done" - pod = tests.RenderHostPathJob("host-path-preparator", tmpDir, k8sv1.HostPathDirectoryOrCreate, k8sv1.MountPropagationBidirectional, []string{"/usr/bin/bash", "-c"}, []string{cmd}) + pod = tests.RenderHostPathPod("host-path-preparator", tmpDir, k8sv1.HostPathDirectoryOrCreate, k8sv1.MountPropagationBidirectional, []string{"/usr/bin/bash", "-c"}, []string{cmd}) pod.Spec.Containers[0].Lifecycle = &k8sv1.Lifecycle{ PreStop: &k8sv1.Handler{ Exec: &k8sv1.ExecAction{ diff --git a/tests/utils.go b/tests/utils.go index 251ecfc515a9..52a1a7748c3b 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -2706,6 +2706,10 @@ func NewInt32(x int32) *int32 { return &x } +func NewInt64(x int64) *int64 { + return &x +} + func NewRandomReplicaSetFromVMI(vmi *v1.VirtualMachineInstance, replicas int32) *v1.VirtualMachineInstanceReplicaSet { name := "replicaset" + rand.String(5) rs := &v1.VirtualMachineInstanceReplicaSet{ @@ -2731,8 +2735,8 @@ func NewBool(x bool) *bool { return &x } -func RenderJob(name string, cmd []string, args []string) *k8sv1.Pod { - job := k8sv1.Pod{ +func RenderPod(name string, cmd []string, args []string) *k8sv1.Pod { + pod := k8sv1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: name, Labels: map[string]string{ @@ -2760,7 +2764,7 @@ func RenderJob(name string, cmd []string, args []string) *k8sv1.Pod { }, } - return &job + return &pod } func NewConsoleExpecter(virtCli kubecli.KubevirtClient, vmi *v1.VirtualMachineInstance, timeout time.Duration, opts ...expect.Option) (expect.Expecter, <-chan error, error) { @@ -3578,9 +3582,9 @@ func RemoveHostDiskImage(diskPath string, nodeName string) { virtClient, err := kubecli.GetKubevirtClient() PanicOnError(err) - job := newDeleteHostDisksJob(diskPath) + pod := newDeleteHostDisksPod(diskPath) // remove a disk image from a specific node - job.Spec.Affinity = &k8sv1.Affinity{ + pod.Spec.Affinity = &k8sv1.Affinity{ NodeAffinity: &k8sv1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{ NodeSelectorTerms: []k8sv1.NodeSelectorTerm{ @@ -3597,13 +3601,13 @@ func RemoveHostDiskImage(diskPath string, nodeName string) { }, }, } - job, err = virtClient.CoreV1().Pods(NamespaceTestDefault).Create(job) + pod, err = virtClient.CoreV1().Pods(NamespaceTestDefault).Create(pod) PanicOnError(err) getStatus := func() k8sv1.PodPhase { - pod, err := virtClient.CoreV1().Pods(NamespaceTestDefault).Get(job.Name, metav1.GetOptions{}) + podG, err := virtClient.CoreV1().Pods(NamespaceTestDefault).Get(pod.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - return pod.Status.Phase + return podG.Status.Phase } Eventually(getStatus, 30, 1).Should(Equal(k8sv1.PodSucceeded)) } @@ -3873,28 +3877,28 @@ func CreateHostDiskImage(diskPath string) *k8sv1.Pod { dir := filepath.Dir(diskPath) args := []string{fmt.Sprintf(`dd if=/dev/zero of=%s bs=1 count=0 seek=1G && ls -l %s`, diskPath, dir)} - job := RenderHostPathJob("hostdisk-create-job", dir, hostPathType, k8sv1.MountPropagationNone, []string{"/bin/bash", "-c"}, args) + pod := RenderHostPathPod("hostdisk-create-job", dir, hostPathType, k8sv1.MountPropagationNone, []string{"/bin/bash", "-c"}, args) - return job + return pod } -func newDeleteHostDisksJob(diskPath string) *k8sv1.Pod { +func newDeleteHostDisksPod(diskPath string) *k8sv1.Pod { hostPathType := k8sv1.HostPathDirectoryOrCreate args := []string{fmt.Sprintf(`rm -rf %s`, diskPath)} - job := RenderHostPathJob("hostdisk-delete-job", filepath.Dir(diskPath), hostPathType, k8sv1.MountPropagationNone, []string{"/bin/bash", "-c"}, args) + pod := RenderHostPathPod("hostdisk-delete-job", filepath.Dir(diskPath), hostPathType, k8sv1.MountPropagationNone, []string{"/bin/bash", "-c"}, args) - return job + return pod } -func RenderHostPathJob(jobName string, dir string, hostPathType k8sv1.HostPathType, mountPropagation k8sv1.MountPropagationMode, cmd []string, args []string) *k8sv1.Pod { - job := RenderJob(jobName, cmd, args) - job.Spec.Containers[0].VolumeMounts = append(job.Spec.Containers[0].VolumeMounts, k8sv1.VolumeMount{ +func RenderHostPathPod(podName string, dir string, hostPathType k8sv1.HostPathType, mountPropagation k8sv1.MountPropagationMode, cmd []string, args []string) *k8sv1.Pod { + pod := RenderPod(podName, cmd, args) + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, k8sv1.VolumeMount{ Name: "hostpath-mount", MountPropagation: &mountPropagation, MountPath: dir, }) - job.Spec.Volumes = append(job.Spec.Volumes, k8sv1.Volume{ + pod.Spec.Volumes = append(pod.Spec.Volumes, k8sv1.Volume{ Name: "hostpath-mount", VolumeSource: k8sv1.VolumeSource{ HostPath: &k8sv1.HostPathVolumeSource{ @@ -3904,46 +3908,7 @@ func RenderHostPathJob(jobName string, dir string, hostPathType k8sv1.HostPathTy }, }) - return job -} - -// NewHelloWorldJob takes a DNS entry or an IP and a port which it will use create a pod -// which tries to contact the host on the provided port. It expects to receive "Hello World!" to succeed. -func NewHelloWorldJob(host string, port string) *k8sv1.Pod { - check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(nc %s %s -i 3 -w 3))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, host, port)} - job := RenderJob("netcat", []string{"/bin/bash", "-c"}, check) - - return job -} - -// NewHelloWorldJobUDP takes a DNS entry or an IP and a port which it will use create a pod -// which tries to contact the host on the provided port. It expects to receive "Hello World!" to succeed. -// Note that in case of UDP, the server will not see the connection unless something is sent over it -// However, netcat does not work well with UDP and closes before the answer arrives, for that another netcat call is needed, -// this time as a UDP listener -func NewHelloWorldJobUDP(host string, port string) *k8sv1.Pod { - localPort, err := strconv.Atoi(port) - if err != nil { - return nil - } - // local port is used to catch the reply - any number can be used - // we make it different than the port to be safe if both are running on the same machine - localPort-- - check := []string{fmt.Sprintf(`set -x; trap "kill 0" EXIT; x="$(head -n 1 < <(echo | nc -up %d %s %s -i 3 -w 3 & nc -ul %d))"; echo "$x" ; if [ "$x" = "Hello UDP World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, - localPort, host, port, localPort)} - job := RenderJob("netcat", []string{"/bin/bash", "-c"}, check) - - return job -} - -// NewHelloWorldJobHttp gets an IP address and a port, which it uses to create a pod. -// This pod tries to contact the host on the provided port, over HTTP. -// On success - it expects to receive "Hello World!". -func NewHelloWorldJobHttp(host string, port string) *k8sv1.Pod { - check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(curl %s:%s))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, FormatIPForURL(host), port)} - job := RenderJob("curl", []string{"/bin/bash", "-c"}, check) - - return job + return pod } func GetNodeWithHugepages(virtClient kubecli.KubevirtClient, hugepages k8sv1.ResourceName) *k8sv1.Node { diff --git a/tests/vmi_lifecycle_test.go b/tests/vmi_lifecycle_test.go index 2e740a074ef1..5623307a2bca 100644 --- a/tests/vmi_lifecycle_test.go +++ b/tests/vmi_lifecycle_test.go @@ -1598,8 +1598,8 @@ func shouldUseEmulation(virtClient kubecli.KubevirtClient) bool { return useEmulation } -func renderPkillAllJob(processName string) *k8sv1.Pod { - return tests.RenderJob("vmi-killer", []string{"pkill"}, []string{"-9", processName}) +func renderPkillAllPod(processName string) *k8sv1.Pod { + return tests.RenderPod("vmi-killer", []string{"pkill"}, []string{"-9", processName}) } func getVirtLauncherLogs(virtCli kubecli.KubevirtClient, vmi *v1.VirtualMachineInstance) string { @@ -1632,15 +1632,15 @@ func getVirtLauncherLogs(virtCli kubecli.KubevirtClient, vmi *v1.VirtualMachineI } func pkillHandler(virtCli kubecli.KubevirtClient, node string) error { - job := renderPkillAllJob("virt-handler") - job.Spec.NodeName = node - pod, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job) + pod := renderPkillAllPod("virt-handler") + pod.Spec.NodeName = node + createdPod, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(pod) Expect(err).ToNot(HaveOccurred(), "Should create helper pod") getStatus := func() k8sv1.PodPhase { - pod, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Get(pod.Name, metav1.GetOptions{}) + podG, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Get(createdPod.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred(), "Should return current status") - return pod.Status.Phase + return podG.Status.Phase } Eventually(getStatus, 30, 0.5).Should(Equal(k8sv1.PodSucceeded), "Pod should end itself") @@ -1649,17 +1649,17 @@ func pkillHandler(virtCli kubecli.KubevirtClient, node string) error { } func pkillAllLaunchers(virtCli kubecli.KubevirtClient, node string) error { - job := renderPkillAllJob("virt-launcher") - job.Spec.NodeName = node - _, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job) + pod := renderPkillAllPod("virt-launcher") + pod.Spec.NodeName = node + _, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(pod) return err } func pkillAllVMIs(virtCli kubecli.KubevirtClient, node string) error { - job := renderPkillAllJob("qemu") - job.Spec.NodeName = node - _, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job) + pod := renderPkillAllPod("qemu") + pod.Spec.NodeName = node + _, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(pod) return err } diff --git a/tests/vmi_networking_test.go b/tests/vmi_networking_test.go index a634417cf9dd..b7cba4ba1685 100644 --- a/tests/vmi_networking_test.go +++ b/tests/vmi_networking_test.go @@ -30,6 +30,7 @@ import ( . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" k8sv1 "k8s.io/api/core/v1" v12 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -58,38 +59,20 @@ var _ = Describe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:comp var inboundVMIWithCustomMacAddress *v1.VirtualMachineInstance var outboundVMI *v1.VirtualMachineInstance - var logPodLogs func(*v12.Pod) - var waitForPodToFinish func(*v12.Pod) v12.PodPhase - const testPort = 1500 tests.BeforeAll(func() { virtClient, err = kubecli.GetKubevirtClient() tests.PanicOnError(err) - - logPodLogs = func(pod *v12.Pod) { - defer GinkgoRecover() - - var s int64 = 500 - logs := virtClient.CoreV1().Pods(inboundVMI.Namespace).GetLogs(pod.Name, &v12.PodLogOptions{SinceSeconds: &s}) - rawLogs, err := logs.DoRaw() - Expect(err).ToNot(HaveOccurred()) - log.Log.Infof("%s", string(rawLogs)) - } - - waitForPodToFinish = func(pod *v12.Pod) v12.PodPhase { - Eventually(func() v12.PodPhase { - j, err := virtClient.CoreV1().Pods(inboundVMI.ObjectMeta.Namespace).Get(pod.ObjectMeta.Name, v13.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - return j.Status.Phase - }, 90*time.Second, 1*time.Second).Should(Or(Equal(v12.PodSucceeded), Equal(v12.PodFailed))) - j, err := virtClient.CoreV1().Pods(inboundVMI.ObjectMeta.Namespace).Get(pod.ObjectMeta.Name, v13.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - logPodLogs(pod) - return j.Status.Phase - } }) + runHelloWorldJob := func(host, port, namespace string) *batchv1.Job { + job := tests.NewHelloWorldJob(host, port) + job, err := virtClient.BatchV1().Jobs(namespace).Create(job) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + return job + } + checkMacAddress := func(vmi *v1.VirtualMachineInstance, expectedMacAddress string, prompt string) { err := tests.CheckForTextExpecter(vmi, []expect.Batcher{ &expect.BSnd{S: "\n"}, @@ -269,7 +252,7 @@ var _ = Describe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:comp ip := inboundVMI.Status.Interfaces[0].IP - //TODO if node count 1, skip whe nv12.NodeSelectorOpOut + //TODO if node count 1, skip the nv12.NodeSelectorOpOut nodes, err := virtClient.CoreV1().Nodes().List(v13.ListOptions{}) Expect(err).ToNot(HaveOccurred()) Expect(nodes.Items).ToNot(BeEmpty()) @@ -277,9 +260,8 @@ var _ = Describe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:comp Skip("Skip network test that requires multiple nodes when only one node is present.") } - // Run netcat and give it one second to ghet "Hello World!" back from the VM job := tests.NewHelloWorldJob(ip, strconv.Itoa(testPort)) - job.Spec.Affinity = &v12.Affinity{ + job.Spec.Template.Spec.Affinity = &v12.Affinity{ NodeAffinity: &v12.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v12.NodeSelector{ NodeSelectorTerms: []v12.NodeSelectorTerm{ @@ -292,12 +274,11 @@ var _ = Describe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:comp }, }, } - job.Spec.HostNetwork = hostNetwork + job.Spec.Template.Spec.HostNetwork = hostNetwork - job, err = virtClient.CoreV1().Pods(inboundVMI.ObjectMeta.Namespace).Create(job) + job, err = virtClient.BatchV1().Jobs(inboundVMI.ObjectMeta.Namespace).Create(job) Expect(err).ToNot(HaveOccurred()) - phase := waitForPodToFinish(job) - Expect(phase).To(Equal(v12.PodSucceeded)) + tests.WaitForJobToSucceed(&virtClient, job, 90) }, table.Entry("[test_id:1543]on the same node from Pod", v12.NodeSelectorOpIn, false), table.Entry("[test_id:1544]on a different node from Pod", v12.NodeSelectorOpNotIn, false), @@ -327,24 +308,19 @@ var _ = Describe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:comp }) It("[test_id:1547] should be able to reach the vmi based on labels specified on the vmi", func() { - By("starting a pod which tries to reach the vmi via the defined service") - job := tests.NewHelloWorldJob(fmt.Sprintf("%s.%s", "myservice", inboundVMI.Namespace), strconv.Itoa(testPort)) - job, err = virtClient.CoreV1().Pods(inboundVMI.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("starting a job which tries to reach the vmi via the defined service") + job := runHelloWorldJob(fmt.Sprintf("%s.%s", "myservice", inboundVMI.Namespace), strconv.Itoa(testPort), inboundVMI.Namespace) - By("waiting for the pod to report a successful connection attempt") - phase := waitForPodToFinish(job) - Expect(phase).To(Equal(v12.PodSucceeded)) + By("waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 90) }) It("[test_id:1548]should fail to reach the vmi if an invalid servicename is used", func() { - By("starting a pod which tries to reach the vmi via a non-existent service") - job := tests.NewHelloWorldJob(fmt.Sprintf("%s.%s", "wrongservice", inboundVMI.Namespace), strconv.Itoa(testPort)) - job, err = virtClient.CoreV1().Pods(inboundVMI.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) - By("waiting for the pod to report an unsuccessful connection attempt") - phase := waitForPodToFinish(job) - Expect(phase).To(Equal(v12.PodFailed)) + By("starting a job which tries to reach the vmi via a non-existent service") + job := runHelloWorldJob(fmt.Sprintf("%s.%s", "wrongservice", inboundVMI.Namespace), strconv.Itoa(testPort), inboundVMI.Namespace) + + By("waiting for the job to report an unsuccessful connection attempt") + tests.WaitForJobToFail(&virtClient, job, 90) }) AfterEach(func() { @@ -376,14 +352,11 @@ var _ = Describe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:comp }) It("[test_id:1549]should be able to reach the vmi via its unique fully qualified domain name", func() { - By("starting a pod which tries to reach the vm via the defined service") - job := tests.NewHelloWorldJob(fmt.Sprintf("%s.%s.%s", inboundVMI.Spec.Hostname, inboundVMI.Spec.Subdomain, inboundVMI.Namespace), strconv.Itoa(testPort)) - job, err = virtClient.CoreV1().Pods(inboundVMI.Namespace).Create(job) - Expect(err).ToNot(HaveOccurred()) + By("starting a job which tries to reach the vm via the defined service") + job := runHelloWorldJob(fmt.Sprintf("%s.%s.%s", inboundVMI.Spec.Hostname, inboundVMI.Spec.Subdomain, inboundVMI.Namespace), strconv.Itoa(testPort), inboundVMI.Namespace) - By("waiting for the pod to report a successful connection attempt") - phase := waitForPodToFinish(job) - Expect(phase).To(Equal(v12.PodSucceeded)) + By("waiting for the job to report a successful connection attempt") + tests.WaitForJobToSucceed(&virtClient, job, 90) }) AfterEach(func() {