Skip to content

Commit

Permalink
update virtiofs test to WFFC with data volume
Browse files Browse the repository at this point in the history
Signed-off-by: Vladik Romanovsky <[email protected]>
  • Loading branch information
vladikr committed Mar 23, 2021
1 parent 3a59b97 commit 8f77e85
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 24 deletions.
21 changes: 2 additions & 19 deletions tests/storage/datavolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,23 +65,6 @@ var _ = SIGDescribe("[Serial]DataVolume Integration", func() {
}
})

runVMIAndExpectLaunch := func(vmi *v1.VirtualMachineInstance, dv *cdiv1.DataVolume, timeout int) *v1.VirtualMachineInstance {
By("Starting a VirtualMachineInstance with DataVolume")
var obj *v1.VirtualMachineInstance
var err error
Eventually(func() error {
obj, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
return err
}, timeout, 1*time.Second).ShouldNot(HaveOccurred())

By("Waiting until the DV is ready")
tests.WaitForSuccessfulDataVolumeImport(dv, timeout)

By("Waiting until the VirtualMachineInstance will start")
tests.WaitForSuccessfulVMIStartWithTimeout(obj, timeout)
return obj
}

Describe("[rfe_id:3188][crit:high][vendor:[email protected]][level:system] Starting a VirtualMachineInstance with a DataVolume as a volume source", func() {

Context("Alpine import", func() {
Expand Down Expand Up @@ -116,7 +99,7 @@ var _ = SIGDescribe("[Serial]DataVolume Integration", func() {
By("Starting and stopping the VirtualMachineInstance a number of times")
for i := 1; i <= num; i++ {
tests.WaitForDataVolumeReadyToStartVMI(vmi, 140)
vmi := runVMIAndExpectLaunch(vmi, dataVolume, 500)
vmi := tests.RunVMIAndExpectLaunchWithDataVolume(vmi, dataVolume, 500)
// Verify console on last iteration to verify the VirtualMachineInstance is still booting properly
// after being restarted multiple times
if i == num {
Expand Down Expand Up @@ -144,7 +127,7 @@ var _ = SIGDescribe("[Serial]DataVolume Integration", func() {
}
// with WFFC the run actually starts the import and then runs VM, so the timeout has to include both
// import and start
vmi = runVMIAndExpectLaunch(vmi, dataVolume, 500)
vmi = tests.RunVMIAndExpectLaunchWithDataVolume(vmi, dataVolume, 500)

By("Checking that the VirtualMachineInstance console has expected output")
Expect(console.LoginToAlpine(vmi)).To(Succeed())
Expand Down
22 changes: 17 additions & 5 deletions tests/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ import (
"k8s.io/apimachinery/pkg/util/rand"

virtv1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1"
hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
. "kubevirt.io/kubevirt/tests/framework/matcher"

Expand Down Expand Up @@ -356,14 +355,21 @@ var _ = Describe("Storage", func() {
}
dataVolume = tests.NewRandomDataVolumeWithHttpImport(tests.GetUrl(tests.AlpineHttpUrl), tests.NamespaceTestDefault, k8sv1.ReadWriteOnce)
})
AfterEach(func() {
err = virtClient.CdiClient().CdiV1alpha1().DataVolumes(dataVolume.Namespace).Delete(context.Background(), dataVolume.Name, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
})

It("[QUARANTINE][owner:@sig-compute]should be successfully started and virtiofs could be accessed", func() {
It("should be successfully started and virtiofs could be accessed", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

vmi := tests.NewRandomVMIWithFSFromDataVolume(dataVolume.Name)
_, err := virtClient.CdiClient().CdiV1alpha1().DataVolumes(dataVolume.Namespace).Create(context.Background(), dataVolume, metav1.CreateOptions{})
Expect(err).To(BeNil())
Eventually(ThisDV(dataVolume), 160, 1).Should(Or(BeInPhase(cdiv1.Succeeded), BeInPhase(v1beta1.WaitForFirstConsumer)), "Timed out waiting for DataVolume to complete")
Expect(err).ToNot(HaveOccurred())
By("Waiting until the DataVolume is ready")
if tests.HasBindingModeWaitForFirstConsumer() {
tests.WaitForDataVolumePhaseWFFC(dataVolume.Namespace, dataVolume.Name, 30)
}
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("512Mi")

vmi.Spec.Domain.Devices.Rng = &v1.Rng{}
Expand All @@ -380,7 +386,9 @@ var _ = Describe("Storage", func() {
userData := fmt.Sprintf("%s\n%s", tests.GetFedoraToolsGuestAgentUserData(), mountVirtiofsCommands)
tests.AddUserData(vmi, "cloud-init", userData)

vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
// with WFFC the run actually starts the import and then runs VM, so the timeout has to include both
// import and start
vmi = tests.RunVMIAndExpectLaunchWithDataVolume(vmi, dataVolume, 500)

// Wait for cloud init to finish and start the agent inside the vmi.
tests.WaitAgentConnected(virtClient, vmi)
Expand All @@ -405,6 +413,10 @@ var _ = Describe("Storage", func() {
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
tests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)

})
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]With ephemeral alpine PVC", func() {
Expand Down
9 changes: 9 additions & 0 deletions tests/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1445,6 +1445,15 @@ func RunVMIAndExpectLaunch(vmi *v1.VirtualMachineInstance, timeout int) *v1.Virt
return obj
}

func RunVMIAndExpectLaunchWithDataVolume(vmi *v1.VirtualMachineInstance, dv *cdiv1.DataVolume, timeout int) *v1.VirtualMachineInstance {
obj := RunVMI(vmi, timeout)
By("Waiting until the DataVolume is ready")
WaitForSuccessfulDataVolumeImport(dv, timeout)
By("Waiting until the VirtualMachineInstance will start")
WaitForSuccessfulVMIStartWithTimeout(obj, timeout)
return obj
}

func RunVMIAndExpectLaunchIgnoreWarnings(vmi *v1.VirtualMachineInstance, timeout int) *v1.VirtualMachineInstance {
obj := RunVMI(vmi, timeout)
By("Waiting until the VirtualMachineInstance will start")
Expand Down

0 comments on commit 8f77e85

Please sign in to comment.