Skip to content

Commit

Permalink
Revert "Merge pull request kubevirt#3314 from maya-r/loopdev"
Browse files Browse the repository at this point in the history
This reverts commit 5c0e98b, reversing
changes made to 7b7e02d.

The `losetup` is not namespaced, and as such is creating a new loop
device in the host for every time the `disks-images-provider` container
is created. As a result, our CI is doomed to only work 100 times on each
host (the number of pre-provisioned devices). Once the 'next available'
loop device is not a pre-provisioned one, the `disks-images-provider`
container will fail to reach a `Running` state, which will cause the
functest make target to fail after 5 minutes (default timeout of the
test script).

Signed-off-by: Miguel Duarte Barroso <[email protected]>
  • Loading branch information
maiqueb committed Jun 17, 2020
1 parent 274c340 commit 3f906fc
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 28 deletions.
5 changes: 5 additions & 0 deletions hack/cluster-deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ if [[ "$KUBEVIRT_PROVIDER" =~ os-* ]] || [[ "$KUBEVIRT_PROVIDER" =~ (okd|ocp)-*
_kubectl adm policy add-scc-to-user privileged admin
fi

if [[ "$KUBEVIRT_PROVIDER" =~ kind.* ]]; then
#removing it since it's crashing with dind because loopback devices are shared with the host
_kubectl delete -n kubevirt ds disks-images-provider
fi

# Ensure the KubeVirt CRD is created
count=0
until _kubectl get crd kubevirts.kubevirt.io; do
Expand Down
26 changes: 0 additions & 26 deletions manifests/testing/disks-images-provider.yaml.in
Original file line number Diff line number Diff line change
Expand Up @@ -114,29 +114,6 @@ spec:
- /ready
initialDelaySeconds: 10
periodSeconds: 5
- name: loopdev
command:
- sh
- -c
- |
while true; do
for i in $(seq 0 100); do
if ! [ -e /dev/loop$i ]; then
mknod /dev/loop$i b 7 $i
fi
done
# XXX: we can't finish running because we're a DaemonSet
# Switch to being a Pod!
sleep 100000000
done
image: {{.DockerPrefix}}/disks-images-provider:{{.DockerTag}}
imagePullPolicy: IfNotPresent
resources: {}
securityContext:
privileged: true
volumeMounts:
- name: dev
mountPath: /dev
volumes:
- name: images
hostPath:
Expand All @@ -146,6 +123,3 @@ spec:
hostPath:
path: /mnt/local-storage
type: DirectoryOrCreate
- name: dev
hostPath:
path: /dev
5 changes: 5 additions & 0 deletions tests/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ var _ = Describe("[rfe_id:899][crit:medium][vendor:[email protected]][level:comp
})

It("[test_id:782]Should be the fs layout the same for a pod and vmi", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

expectedOutput := "value1value2value3"

By("Running VMI")
Expand Down Expand Up @@ -162,6 +164,7 @@ var _ = Describe("[rfe_id:899][crit:medium][vendor:[email protected]][level:comp
})

It("[test_id:779]Should be the fs layout the same for a pod and vmi", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

expectedOutput := "adminredhat"

Expand Down Expand Up @@ -240,6 +243,8 @@ var _ = Describe("[rfe_id:899][crit:medium][vendor:[email protected]][level:comp
serviceAccountPath := config.ServiceAccountSourceDir

It("[test_id:998]Should be the namespace and token the same for a pod and vmi", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

By("Running VMI")
vmi := tests.NewRandomVMIWithServiceAccount("default")
tests.RunVMIAndExpectLaunch(vmi, 90)
Expand Down
12 changes: 10 additions & 2 deletions tests/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,12 @@ var _ = Describe("Storage", func() {
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]with Alpine PVC", func() {
table.DescribeTable("should be successfully started", func(newVMI VMICreationFunc, storageEngine string) {
tests.SkipPVCTestIfRunnigOnKindInfra()

var ignoreWarnings bool
var pvName string
// Start the VirtualMachineInstance with the PVC attached
if storageEngine == "nfs" {
tests.SkipNFSTestIfRunnigOnKindInfra()
pvName = initNFS()
ignoreWarnings = true
} else {
Expand All @@ -128,6 +129,8 @@ var _ = Describe("Storage", func() {
)

table.DescribeTable("should be successfully started and stopped multiple times", func(newVMI VMICreationFunc) {
tests.SkipPVCTestIfRunnigOnKindInfra()

vmi = newVMI(tests.DiskAlpineHostPath)

num := 3
Expand Down Expand Up @@ -251,11 +254,11 @@ var _ = Describe("Storage", func() {

// The following case is mostly similar to the alpine PVC test above, except using different VirtualMachineInstance.
table.DescribeTable("should be successfully started", func(newVMI VMICreationFunc, storageEngine string) {
tests.SkipPVCTestIfRunnigOnKindInfra()
var ignoreWarnings bool
var pvName string
// Start the VirtualMachineInstance with the PVC attached
if storageEngine == "nfs" {
tests.SkipNFSTestIfRunnigOnKindInfra()
pvName = initNFS()
ignoreWarnings = true
} else {
Expand All @@ -275,6 +278,8 @@ var _ = Describe("Storage", func() {

// Not a candidate for testing on NFS because the VMI is restarted and NFS PVC can't be re-used
It("[test_id:3137]should not persist data", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

vmi = tests.NewRandomVMIWithEphemeralPVC(tests.DiskAlpineHostPath)

By("Starting the VirtualMachineInstance")
Expand Down Expand Up @@ -340,6 +345,8 @@ var _ = Describe("Storage", func() {

// Not a candidate for testing on NFS because the VMI is restarted and NFS PVC can't be re-used
It("[test_id:3138]should start vmi multiple times", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

vmi = tests.NewRandomVMIWithPVC(tests.DiskAlpineHostPath)
tests.AddPVCDisk(vmi, "disk1", "virtio", tests.DiskCustomHostPath)

Expand Down Expand Up @@ -662,6 +669,7 @@ var _ = Describe("Storage", func() {

// Not a candidate for NFS because local volumes are used in test
It("[test_id:1015] should be successfully started", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()
// Start the VirtualMachineInstance with the PVC attached
vmi = tests.NewRandomVMIWithPVC(tests.BlockDiskForTest)
// Without userdata the hostname isn't set correctly and the login expecter fails...
Expand Down
6 changes: 6 additions & 0 deletions tests/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -4753,6 +4753,12 @@ func SkipStressTestIfRunnigOnKindInfra() {
}
}

func SkipPVCTestIfRunnigOnKindInfra() {
if IsRunningOnKindInfra() {
Skip("Skip PVC tests till PR https://github.com/kubevirt/kubevirt/pull/3171 is merged")
}
}

func SkipNFSTestIfRunnigOnKindInfra() {
if IsRunningOnKindInfra() {
Skip("Skip NFS tests till issue https://github.com/kubevirt/kubevirt/issues/3322 is fixed")
Expand Down
2 changes: 2 additions & 0 deletions tests/vmi_configuration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1639,6 +1639,8 @@ var _ = Describe("Configurations", func() {
}, 60)

It("[test_id:1681]should set appropriate cache modes", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()

vmi := tests.NewRandomVMI()
vmi.Spec.Domain.Resources.Requests[kubev1.ResourceMemory] = resource.MustParse("64M")

Expand Down

0 comments on commit 3f906fc

Please sign in to comment.