Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
Signed-off-by: Alvaro Romero <[email protected]>
  • Loading branch information
alromeros committed Oct 24, 2023
1 parent a460a29 commit e5f5c78
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 38 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,14 @@ func verifyHotplugVolumes(newHotplugVolumeMap, oldHotplugVolumeMap map[string]v1
})
}
disk := newDisks[k]
if disk.Disk == nil && disk.LUN == nil {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Disk %s requires diskDevice of type 'disk' or 'lun' to be hotplugged.", k),
},
})
}
if (disk.Disk == nil || disk.Disk.Bus != "scsi") && (disk.LUN == nil || disk.LUN.Bus != "scsi") {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,23 @@ var _ = Describe("Validating VMIUpdate Admitter", func() {
return res
}

makeCDRomDisks := func(indexes ...int) []v1.Disk {
res := make([]v1.Disk, 0)
for _, index := range indexes {
bootOrder := uint(index + 1)
res = append(res, v1.Disk{
Name: fmt.Sprintf("volume-name-%d", index),
DiskDevice: v1.DiskDevice{
CDRom: &v1.CDRomTarget{
Bus: "scsi",
},
},
BootOrder: &bootOrder,
})
}
return res
}

makeDisksInvalidBusLastDisk := func(indexes ...int) []v1.Disk {
res := makeDisks(indexes...)
for i, index := range indexes {
Expand Down Expand Up @@ -550,6 +567,13 @@ var _ = Describe("Validating VMIUpdate Admitter", func() {
makeLUNDisks(0),
makeStatus(1, 0),
makeExpected("hotplugged Disk volume-name-1 does not use a scsi bus", "")),
Entry("Should reject if we add disk with neither Disk nor LUN type",
makeVolumes(0, 1),
makeVolumes(0),
makeCDRomDisks(0, 1),
makeCDRomDisks(0),
makeStatus(1, 0),
makeExpected("Disk volume-name-1 requires diskDevice of type 'disk' or 'lun' to be hotplugged.", "")),
Entry("Should reject if we add disk with invalid boot order",
makeVolumes(0, 1),
makeVolumes(0),
Expand Down
36 changes: 14 additions & 22 deletions pkg/virt-launcher/virtwrap/converter/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,24 +150,26 @@ func isARM64(arch string) bool {
return false
}

func assignDiskToSCSIController(disk *api.Disk, unit int) {
// Ensure we assign this disk to the correct scsi controller
if disk.Address == nil {
disk.Address = &api.Address{}
}
disk.Address.Type = "drive"
// This should be the index of the virtio-scsi controller, which is hard coded to 0
disk.Address.Controller = "0"
disk.Address.Bus = "0"
disk.Address.Unit = strconv.Itoa(unit)
}

func Convert_v1_Disk_To_api_Disk(c *ConverterContext, diskDevice *v1.Disk, disk *api.Disk, prefixMap map[string]deviceNamer, numQueues *uint, volumeStatusMap map[string]v1.VolumeStatus) error {
if diskDevice.Disk != nil {
var unit int
disk.Device = "disk"
disk.Target.Bus = diskDevice.Disk.Bus
if diskDevice.Disk.Bus == "scsi" {
// Ensure we assign this disk to the correct scsi controller
if disk.Address == nil {
disk.Address = &api.Address{}
}
disk.Address.Type = "drive"
// This should be the index of the virtio-scsi controller, which is hard coded to 0
disk.Address.Controller = "0"
disk.Address.Bus = "0"
}
disk.Target.Device, unit = makeDeviceName(diskDevice.Name, diskDevice.Disk.Bus, prefixMap)
if diskDevice.Disk.Bus == "scsi" {
disk.Address.Unit = strconv.Itoa(unit)
assignDiskToSCSIController(disk, unit)
}
if diskDevice.Disk.PciAddress != "" {
if diskDevice.Disk.Bus != v1.DiskBusVirtio {
Expand Down Expand Up @@ -199,19 +201,9 @@ func Convert_v1_Disk_To_api_Disk(c *ConverterContext, diskDevice *v1.Disk, disk
var unit int
disk.Device = "lun"
disk.Target.Bus = diskDevice.LUN.Bus
if diskDevice.LUN.Bus == "scsi" {
// Ensure we assign this disk to the correct scsi controller
if disk.Address == nil {
disk.Address = &api.Address{}
}
disk.Address.Type = "drive"
// This should be the index of the virtio-scsi controller, which is hard coded to 0
disk.Address.Controller = "0"
disk.Address.Bus = "0"
}
disk.Target.Device, unit = makeDeviceName(diskDevice.Name, diskDevice.LUN.Bus, prefixMap)
if diskDevice.LUN.Bus == "scsi" {
disk.Address.Unit = strconv.Itoa(unit)
assignDiskToSCSIController(disk, unit)
}
disk.ReadOnly = toApiReadOnly(diskDevice.LUN.ReadOnly)
if diskDevice.LUN.Reservation {
Expand Down
11 changes: 7 additions & 4 deletions pkg/virtctl/vm/add_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func NewAddVolumeCommand(clientConfig clientcmd.ClientConfig) *cobra.Command {
cmd.Flags().StringVar(&cache, cacheArg, "", "caching options attribute control the cache mechanism")
cmd.Flags().BoolVar(&persist, persistArg, false, "if set, the added volume will be persisted in the VM spec (if it exists)")
cmd.Flags().BoolVar(&dryRun, dryRunArg, false, dryRunCommandUsage)
cmd.Flags().StringVar(&diskType, diskTypeArg, "", "specifies disk type to be hotplugged (disk/lun). Disk by default.")
cmd.Flags().StringVar(&diskType, diskTypeArg, "disk", "specifies disk type to be hotplugged (disk/lun). Disk by default.")

return cmd
}
Expand Down Expand Up @@ -135,17 +135,20 @@ func addVolume(vmiName, volumeName, namespace string, virtClient kubecli.Kubevir
VolumeSource: volumeSource,
DryRun: *dryRunOption,
}
if diskType == "" || diskType == "disk" {

switch diskType {
case "disk":
hotplugRequest.Disk.DiskDevice.Disk = &v1.DiskTarget{
Bus: "scsi",
}
} else if diskType == "lun" {
case "lun":
hotplugRequest.Disk.DiskDevice.LUN = &v1.LunTarget{
Bus: "scsi",
}
} else {
default:
return fmt.Errorf("Invalid disk type '%s'. Only LUN and Disk are supported.", diskType)
}

if serial != "" {
hotplugRequest.Disk.Serial = serial
} else {
Expand Down
5 changes: 5 additions & 0 deletions pkg/virtctl/vm/add_volume_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,5 +178,10 @@ var _ = Describe("Add volume command", func() {
Entry("addvolume pvc, no persist with dry-run should call VMI endpoint", "addvolume", "testvmi", "testvolume", false, expectVMIEndpointAddVolume, "--dry-run"),
Entry("addvolume dv, with persist with dry-run should call VM endpoint", "addvolume", "testvmi", "testvolume", true, expectVMEndpointAddVolume, "--persist", "--dry-run"),
Entry("addvolume pvc, with persist with dry-run should call VM endpoint", "addvolume", "testvmi", "testvolume", false, expectVMEndpointAddVolume, "--persist", "--dry-run"),

Entry("addvolume pvc, with persist and LUN-type disk should call VM endpoint", "addvolume", "testvmi", "testvolume", false, expectVMEndpointAddVolume, "--persist", "--disk-type", "lun"),
Entry("addvolume dv, with persist and LUN-type disk should call VM endpoint", "addvolume", "testvmi", "testvolume", true, expectVMEndpointAddVolume, "--persist", "--disk-type", "lun"),
Entry("addvolume pvc, with LUN-type disk should call VMI endpoint", "addvolume", "testvmi", "testvolume", false, expectVMIEndpointAddVolume, "--disk-type", "lun"),
Entry("addvolume dv, with LUN-type disk should call VMI endpoint", "addvolume", "testvmi", "testvolume", true, expectVMIEndpointAddVolume, "--disk-type", "lun"),
)
})
25 changes: 13 additions & 12 deletions tests/storage/hotplug.go
Original file line number Diff line number Diff line change
Expand Up @@ -1747,9 +1747,10 @@ var _ = SIGDescribe("Hotplug", func() {
})
})

// Some of the functions used here don't behave well in paralel
// Some of the functions used here don't behave well in parallel (CreateSCSIDisk).
// The device is created directly on the node and the addition and removal
// of the scsi_debug kernel module could create flakiness in parallel.
Context("[Serial]Hotplug LUN disk", Serial, func() {
const randLen = 8
var (
nodeName, address, device string
pvc *corev1.PersistentVolumeClaim
Expand All @@ -1775,10 +1776,10 @@ var _ = SIGDescribe("Hotplug", func() {

It("on an offline VM", func() {
By("Creating VirtualMachine")
vm, err = virtClient.VirtualMachine(testsuite.GetTestNamespace(nil)).Create(context.Background(), tests.NewRandomVirtualMachine(libvmi.NewCirros(), false))
vm, err = virtClient.VirtualMachine(util.NamespaceTestDefault).Create(context.Background(), tests.NewRandomVirtualMachine(libvmi.NewCirros(), false))
Expect(err).ToNot(HaveOccurred())
By("Adding test volumes")
pv2, pvc2, err := tests.CreatePVandPVCwithSCSIDisk(nodeName, device, testsuite.GetTestNamespace(nil), "scsi-disks-test2", "scsipv2", "scsipvc2")
pv2, pvc2, err := tests.CreatePVandPVCwithSCSIDisk(nodeName, device, util.NamespaceTestDefault, "scsi-disks-test2", "scsipv2", "scsipvc2")
Expect(err).NotTo(HaveOccurred(), "Failed to create PV and PVC for scsi disk")

addVolumeVMWithSource(vm.Name, vm.Namespace, getAddVolumeOptions(testNewVolume1, v1.DiskBusSCSI, &v1.HotplugVolumeSource{
Expand All @@ -1804,20 +1805,20 @@ var _ = SIGDescribe("Hotplug", func() {
})

It("on an online VM", func() {
opts := []libvmi.Option{}
opts = append(opts, libvmi.WithNodeSelectorFor(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}}))
vmi := libvmi.NewCirros(opts...)
vmi := libvmi.NewCirros(libvmi.WithNodeSelectorFor(&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
}))

vm, err = virtClient.VirtualMachine(testsuite.GetTestNamespace(vmi)).Create(context.Background(), tests.NewRandomVirtualMachine(vmi, true))
vm, err = virtClient.VirtualMachine(util.NamespaceTestDefault).Create(context.Background(), tests.NewRandomVirtualMachine(vmi, true))
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vmi)).Get(context.Background(), vm.Name, &metav1.GetOptions{})
vm, err := virtClient.VirtualMachine(util.NamespaceTestDefault).Get(context.Background(), vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vm.Status.Ready
}, 300*time.Second, 1*time.Second).Should(BeTrue())

vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
By(addingVolumeRunningVM)
addVolumeVMWithSource(vm.Name, vm.Namespace, getAddVolumeOptions("testvolume", v1.DiskBusSCSI, &v1.HotplugVolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
Expand All @@ -1827,7 +1828,7 @@ var _ = SIGDescribe("Hotplug", func() {
By(verifyingVolumeDiskInVM)
verifyVolumeAndDiskVMAdded(virtClient, vm, "testvolume")

vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{})
vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "", "testvolume")
Expand Down

0 comments on commit e5f5c78

Please sign in to comment.