Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
Signed-off-by: Shelly Kagan <[email protected]>
  • Loading branch information
ShellyKa13 committed Sep 6, 2022
1 parent 6fca435 commit 85519c5
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 40 deletions.
16 changes: 8 additions & 8 deletions pkg/storage/types/dv.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func HasFailedDataVolumes(dvs []*cdiv1.DataVolume) bool {
return false
}

func GetCloneSourceWithInformers(vm *virtv1.VirtualMachine, dvSpec *cdiv1.DataVolumeSpec, dataSourceInformer cache.SharedIndexInformer) (*CloneSource, error) {
func GetCloneSourceWithInformers(vm *virtv1.VirtualMachine, dvSpec *cdiv1.DataVolumeSpec, dataSourceInformer cache.SharedInformer) (*CloneSource, error) {
var cloneSource *CloneSource
if dvSpec.Source != nil && dvSpec.Source.PVC != nil {
cloneSource = &CloneSource{
Expand Down Expand Up @@ -183,7 +183,7 @@ func GenerateDataVolumeFromTemplate(clientset kubecli.KubevirtClient, dataVolume

return newDataVolume, nil
}
func GetDataVolumeFromCache(namespace, name string, dataVolumeInformer cache.SharedIndexInformer) (*cdiv1.DataVolume, error) {
func GetDataVolumeFromCache(namespace, name string, dataVolumeInformer cache.SharedInformer) (*cdiv1.DataVolume, error) {
key := controller.NamespacedKey(namespace, name)
obj, exists, err := dataVolumeInformer.GetStore().GetByKey(key)

Expand All @@ -202,7 +202,7 @@ func GetDataVolumeFromCache(namespace, name string, dataVolumeInformer cache.Sha
return dv, nil
}

func HasDataVolumeErrors(namespace string, volumes []virtv1.Volume, dataVolumeInformer cache.SharedIndexInformer) error {
func HasDataVolumeErrors(namespace string, volumes []virtv1.Volume, dataVolumeInformer cache.SharedInformer) error {
for _, volume := range volumes {
if volume.DataVolume == nil {
continue
Expand Down Expand Up @@ -233,7 +233,7 @@ func HasDataVolumeErrors(namespace string, volumes []virtv1.Volume, dataVolumeIn
return nil
}

func HasDataVolumeProvisioning(namespace string, volumes []virtv1.Volume, dataVolumeInformer cache.SharedIndexInformer) bool {
func HasDataVolumeProvisioning(namespace string, volumes []virtv1.Volume, dataVolumeInformer cache.SharedInformer) bool {
for _, volume := range volumes {
if volume.DataVolume == nil {
continue
Expand Down Expand Up @@ -262,7 +262,7 @@ func HasDataVolumeProvisioning(namespace string, volumes []virtv1.Volume, dataVo
return false
}

func ListDataVolumesFromTemplates(namespace string, dvTemplates []virtv1.DataVolumeTemplateSpec, dataVolumeInformer cache.SharedIndexInformer) ([]*cdiv1.DataVolume, error) {
func ListDataVolumesFromTemplates(namespace string, dvTemplates []virtv1.DataVolumeTemplateSpec, dataVolumeInformer cache.SharedInformer) ([]*cdiv1.DataVolume, error) {
dataVolumes := []*cdiv1.DataVolume{}

for _, template := range dvTemplates {
Expand All @@ -279,7 +279,7 @@ func ListDataVolumesFromTemplates(namespace string, dvTemplates []virtv1.DataVol
return dataVolumes, nil
}

func ListDataVolumesFromVolumes(namespace string, volumes []virtv1.Volume, dataVolumeInformer cache.SharedIndexInformer, pvcInformer cache.SharedIndexInformer) ([]*cdiv1.DataVolume, error) {
func ListDataVolumesFromVolumes(namespace string, volumes []virtv1.Volume, dataVolumeInformer cache.SharedInformer, pvcInformer cache.SharedInformer) ([]*cdiv1.DataVolume, error) {
dataVolumes := []*cdiv1.DataVolume{}

for _, volume := range volumes {
Expand All @@ -301,7 +301,7 @@ func ListDataVolumesFromVolumes(namespace string, volumes []virtv1.Volume, dataV
return dataVolumes, nil
}

func getDataVolumeName(namespace string, volume virtv1.Volume, pvcInformer cache.SharedIndexInformer) *string {
func getDataVolumeName(namespace string, volume virtv1.Volume, pvcInformer cache.SharedInformer) *string {
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvcInterface, pvcExists, _ := pvcInformer.GetStore().
GetByKey(fmt.Sprintf("%s/%s", namespace, volume.VolumeSource.PersistentVolumeClaim.ClaimName))
Expand All @@ -318,7 +318,7 @@ func getDataVolumeName(namespace string, volume virtv1.Volume, pvcInformer cache
return nil
}

func DataVolumeByNameFunc(dataVolumeInformer cache.SharedIndexInformer, dataVolumes []*cdiv1.DataVolume) func(name string, namespace string) (*cdiv1.DataVolume, error) {
func DataVolumeByNameFunc(dataVolumeInformer cache.SharedInformer, dataVolumes []*cdiv1.DataVolume) func(name string, namespace string) (*cdiv1.DataVolume, error) {
return func(name, namespace string) (*cdiv1.DataVolume, error) {
for _, dataVolume := range dataVolumes {
if dataVolume.Name == name && dataVolume.Namespace == namespace {
Expand Down
8 changes: 4 additions & 4 deletions pkg/storage/types/pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func VirtVolumesToPVCMap(volumes []*virtv1.Volume, pvcStore cache.Store, namespa
return volumeNamesPVCMap, nil
}

func GetPersistentVolumeClaimFromCache(namespace, name string, pvcInformer cache.SharedIndexInformer) (*k8sv1.PersistentVolumeClaim, error) {
func GetPersistentVolumeClaimFromCache(namespace, name string, pvcInformer cache.SharedInformer) (*k8sv1.PersistentVolumeClaim, error) {
key := controller.NamespacedKey(namespace, name)
obj, exists, err := pvcInformer.GetStore().GetByKey(key)

Expand All @@ -145,7 +145,7 @@ func GetPersistentVolumeClaimFromCache(namespace, name string, pvcInformer cache

return pvc, nil
}
func HasPVCBinding(namespace string, volumes []virtv1.Volume, pvcInformer cache.SharedIndexInformer) bool {
func HasUnboundPVC(namespace string, volumes []virtv1.Volume, pvcInformer cache.SharedInformer) bool {
for _, volume := range volumes {
claimName := PVCNameFromVirtVolume(&volume)
if claimName == "" {
Expand All @@ -169,7 +169,7 @@ func HasPVCBinding(namespace string, volumes []virtv1.Volume, pvcInformer cache.
return false
}

func VolumeReadyToAttachToNode(namespace string, volume virtv1.Volume, dataVolumes []*cdiv1.DataVolume, dataVolumeInformer, pvcInformer cache.SharedIndexInformer) (bool, bool, error) {
func VolumeReadyToAttachToNode(namespace string, volume virtv1.Volume, dataVolumes []*cdiv1.DataVolume, dataVolumeInformer, pvcInformer cache.SharedInformer) (bool, bool, error) {
name := PVCNameFromVirtVolume(&volume)

dataVolumeFunc := DataVolumeByNameFunc(dataVolumeInformer, dataVolumes)
Expand Down Expand Up @@ -199,7 +199,7 @@ func VolumeReadyToAttachToNode(namespace string, volume virtv1.Volume, dataVolum
return ready, wffc, nil
}

func GeneratePVC(size *resource.Quantity, claimName, namespace, storageClass, accessMode string, blockVolume bool) *k8sv1.PersistentVolumeClaim {
func RenderPVC(size *resource.Quantity, claimName, namespace, storageClass, accessMode string, blockVolume bool) *k8sv1.PersistentVolumeClaim {
pvc := &k8sv1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: claimName,
Expand Down
14 changes: 7 additions & 7 deletions pkg/virt-controller/watch/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ import (

"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/instancetype"
typesutil "kubevirt.io/kubevirt/pkg/storage/types"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/util/migrations"
"kubevirt.io/kubevirt/pkg/util/status"
Expand Down Expand Up @@ -303,7 +303,7 @@ func (c *VMController) execute(key string) error {
}
}

dataVolumes, err := typesutil.ListDataVolumesFromTemplates(vm.Namespace, vm.Spec.DataVolumeTemplates, c.dataVolumeInformer)
dataVolumes, err := storagetypes.ListDataVolumesFromTemplates(vm.Namespace, vm.Spec.DataVolumeTemplates, c.dataVolumeInformer)
if err != nil {
logger.Reason(err).Error("Failed to fetch dataVolumes for namespace from cache.")
return err
Expand Down Expand Up @@ -386,7 +386,7 @@ func (c *VMController) handleDataVolumes(vm *virtv1.VirtualMachine, dataVolumes
}
if !exists {
// Don't create DV if PVC already exists
pvc, err := typesutil.GetPersistentVolumeClaimFromCache(vm.Namespace, template.Name, c.pvcInformer)
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vm.Namespace, template.Name, c.pvcInformer)
if err != nil {
return false, err
}
Expand Down Expand Up @@ -518,7 +518,7 @@ func needUpdatePVCMemoryDumpAnnotation(pvc *k8score.PersistentVolumeClaim, reque

func (c *VMController) updatePVCMemoryDumpAnnotation(vm *virtv1.VirtualMachine) error {
request := vm.Status.MemoryDumpRequest
pvc, err := typesutil.GetPersistentVolumeClaimFromCache(vm.Namespace, request.ClaimName, c.pvcInformer)
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vm.Namespace, request.ClaimName, c.pvcInformer)
if err != nil {
log.Log.Object(vm).Errorf("Error getting PersistentVolumeClaim to update memory dump annotation: %v", err)
return err
Expand Down Expand Up @@ -1797,7 +1797,7 @@ func (c *VMController) isVirtualMachineStatusStopped(vm *virtv1.VirtualMachine,

// isVirtualMachineStatusStopped determines whether the VM status field should be set to "Provisioning".
func (c *VMController) isVirtualMachineStatusProvisioning(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return typesutil.HasDataVolumeProvisioning(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.dataVolumeInformer)
return storagetypes.HasDataVolumeProvisioning(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.dataVolumeInformer)
}

// isVirtualMachineStatusWaitingForVolumeBinding
Expand All @@ -1806,7 +1806,7 @@ func (c *VMController) isVirtualMachineStatusWaitingForVolumeBinding(vm *virtv1.
return false
}

return typesutil.HasPVCBinding(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.pvcInformer)
return storagetypes.HasUnboundPVC(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.pvcInformer)
}

// isVirtualMachineStatusStarting determines whether the VM status field should be set to "Starting".
Expand Down Expand Up @@ -1888,7 +1888,7 @@ func (c *VMController) isVirtualMachineStatusPvcNotFound(vm *virtv1.VirtualMachi

// isVirtualMachineStatusDataVolumeError determines whether the VM status field should be set to "DataVolumeError"
func (c *VMController) isVirtualMachineStatusDataVolumeError(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
err := typesutil.HasDataVolumeErrors(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.dataVolumeInformer)
err := storagetypes.HasDataVolumeErrors(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.dataVolumeInformer)
if err != nil {
log.Log.Object(vm).Errorf("%v", err)
return true
Expand Down
38 changes: 19 additions & 19 deletions pkg/virt-controller/watch/vmi.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ import (
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/network/sriov"
"kubevirt.io/kubevirt/pkg/network/vmispec"
kubevirttypes "kubevirt.io/kubevirt/pkg/storage/types"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
traceUtils "kubevirt.io/kubevirt/pkg/util/trace"
patchtypes "kubevirt.io/kubevirt/pkg/util/types"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
Expand Down Expand Up @@ -310,7 +310,7 @@ func (c *VMIController) execute(key string) error {
}

// Get all dataVolumes associated with this vmi
dataVolumes, err := kubevirttypes.ListDataVolumesFromVolumes(vmi.Namespace, vmi.Spec.Volumes, c.dataVolumeInformer, c.pvcInformer)
dataVolumes, err := storagetypes.ListDataVolumesFromVolumes(vmi.Namespace, vmi.Spec.Volumes, c.dataVolumeInformer, c.pvcInformer)
if err != nil {
logger.Reason(err).Error("Failed to fetch dataVolumes for namespace from cache.")
return err
Expand Down Expand Up @@ -396,12 +396,12 @@ func (c *VMIController) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8
key := controller.VirtualMachineInstanceKey(vmi)
defer virtControllerVMIWorkQueueTracer.StepTrace(key, "updateStatus", trace.Field{Key: "VMI Name", Value: vmi.Name})

hasFailedDataVolume := kubevirttypes.HasFailedDataVolumes(dataVolumes)
hasFailedDataVolume := storagetypes.HasFailedDataVolumes(dataVolumes)

hasWffcDataVolume := false
// there is no reason to check for waitForFirstConsumer is there are failed DV's
if !hasFailedDataVolume {
hasWffcDataVolume = kubevirttypes.HasWFFCDataVolumes(dataVolumes)
hasWffcDataVolume = storagetypes.HasWFFCDataVolumes(dataVolumes)
}

conditionManager := controller.NewVirtualMachineInstanceConditionManager()
Expand Down Expand Up @@ -1041,7 +1041,7 @@ func (c *VMIController) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod,
} else {
templatePod, err = c.templateService.RenderLaunchManifest(vmi)
}
if _, ok := err.(kubevirttypes.PvcNotFoundError); ok {
if _, ok := err.(storagetypes.PvcNotFoundError); ok {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, FailedPvcNotFoundReason, failedToRenderLaunchManifestErrFormat, err)
return &syncErrorImpl{fmt.Errorf(failedToRenderLaunchManifestErrFormat, err), FailedPvcNotFoundReason}
} else if err != nil {
Expand Down Expand Up @@ -1112,10 +1112,10 @@ func (c *VMIController) handleSyncDataVolumes(vmi *virtv1.VirtualMachineInstance
for _, volume := range vmi.Spec.Volumes {
// Check both DVs and PVCs
if volume.VolumeSource.DataVolume != nil || volume.VolumeSource.PersistentVolumeClaim != nil {
volumeReady, volumeWffc, err := kubevirttypes.VolumeReadyToAttachToNode(vmi.Namespace, volume, dataVolumes, c.dataVolumeInformer, c.pvcInformer)
volumeReady, volumeWffc, err := storagetypes.VolumeReadyToAttachToNode(vmi.Namespace, volume, dataVolumes, c.dataVolumeInformer, c.pvcInformer)
if err != nil {
// Keep existing behavior of missing PVC = ready. This in turn triggers template render, which sets conditions and events, and fails appropriately
if _, ok := err.(kubevirttypes.PvcNotFoundError); ok {
if _, ok := err.(storagetypes.PvcNotFoundError); ok {
continue
} else {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, FailedPvcNotFoundReason, "Error determining if volume is ready: %v", err)
Expand Down Expand Up @@ -1639,7 +1639,7 @@ func (c *VMIController) handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, ho
// Find all ready volumes
for _, volume := range hotplugVolumes {
var err error
ready, wffc, err := kubevirttypes.VolumeReadyToAttachToNode(vmi.Namespace, *volume, dataVolumes, c.dataVolumeInformer, c.pvcInformer)
ready, wffc, err := storagetypes.VolumeReadyToAttachToNode(vmi.Namespace, *volume, dataVolumes, c.dataVolumeInformer, c.pvcInformer)
if err != nil {
return &syncErrorImpl{fmt.Errorf("Error determining volume status %v", err), PVCNotReadyReason}
}
Expand Down Expand Up @@ -1811,7 +1811,7 @@ func (c *VMIController) createAttachmentPodTemplate(vmi *virtv1.VirtualMachineIn
var pod *k8sv1.Pod
var err error

volumeNamesPVCMap, err := kubevirttypes.VirtVolumesToPVCMap(volumes, c.pvcInformer.GetStore(), virtlauncherPod.Namespace)
volumeNamesPVCMap, err := storagetypes.VirtVolumesToPVCMap(volumes, c.pvcInformer.GetStore(), virtlauncherPod.Namespace)
if err != nil {
return nil, fmt.Errorf("failed to get PVC map: %v", err)
}
Expand Down Expand Up @@ -1840,12 +1840,12 @@ func (c *VMIController) createAttachmentPodTemplate(vmi *virtv1.VirtualMachineIn
}

func (c *VMIController) createAttachmentPopulateTriggerPodTemplate(volume *virtv1.Volume, virtlauncherPod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance) (*k8sv1.Pod, error) {
claimName := kubevirttypes.PVCNameFromVirtVolume(volume)
claimName := storagetypes.PVCNameFromVirtVolume(volume)
if claimName == "" {
return nil, errors.New("Unable to hotplug, claim not PVC or Datavolume")
}

pvc, exists, isBlock, err := kubevirttypes.IsPVCBlockFromStore(c.pvcInformer.GetStore(), virtlauncherPod.Namespace, claimName)
pvc, exists, isBlock, err := storagetypes.IsPVCBlockFromStore(c.pvcInformer.GetStore(), virtlauncherPod.Namespace, claimName)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1972,7 +1972,7 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v

if volume.VolumeSource.PersistentVolumeClaim != nil || volume.VolumeSource.DataVolume != nil || volume.VolumeSource.MemoryDump != nil {

pvcName := kubevirttypes.PVCNameFromVirtVolume(&volume)
pvcName := storagetypes.PVCNameFromVirtVolume(&volume)

pvcInterface, pvcExists, _ := c.pvcInformer.GetStore().GetByKey(fmt.Sprintf("%s/%s", vmi.Namespace, pvcName))
if pvcExists {
Expand All @@ -1982,7 +1982,7 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
VolumeMode: pvc.Spec.VolumeMode,
Capacity: pvc.Status.Capacity,
Requests: pvc.Spec.Resources.Requests,
Preallocated: kubevirttypes.IsPreallocated(pvc.ObjectMeta.Annotations),
Preallocated: storagetypes.IsPreallocated(pvc.ObjectMeta.Annotations),
}
filesystemOverhead, err := c.getFilesystemOverhead(pvc)
if err != nil {
Expand Down Expand Up @@ -2024,14 +2024,14 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
// To avoid conflicts, we only allow having one CDI instance
if cdiInstances := len(c.cdiInformer.GetStore().List()); cdiInstances != 1 {
if cdiInstances > 1 {
log.Log.V(3).Object(pvc).Reason(kubevirttypes.ErrMultipleCdiInstances).Infof(kubevirttypes.FSOverheadMsg)
log.Log.V(3).Object(pvc).Reason(storagetypes.ErrMultipleCdiInstances).Infof(storagetypes.FSOverheadMsg)
} else {
log.Log.V(3).Object(pvc).Reason(kubevirttypes.ErrFailedToFindCdi).Infof(kubevirttypes.FSOverheadMsg)
log.Log.V(3).Object(pvc).Reason(storagetypes.ErrFailedToFindCdi).Infof(storagetypes.FSOverheadMsg)
}
return kubevirttypes.DefaultFSOverhead, nil
return storagetypes.DefaultFSOverhead, nil
}

cdiConfigInterface, cdiConfigExists, err := c.cdiConfigInformer.GetStore().GetByKey(kubevirttypes.ConfigName)
cdiConfigInterface, cdiConfigExists, err := c.cdiConfigInformer.GetStore().GetByKey(storagetypes.ConfigName)
if !cdiConfigExists || err != nil {
return "0", fmt.Errorf("Failed to find CDIConfig but CDI exists: %w", err)
}
Expand All @@ -2040,7 +2040,7 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
return "0", fmt.Errorf("Failed to convert CDIConfig object %v to type CDIConfig", cdiConfigInterface)
}

return kubevirttypes.GetFilesystemOverhead(pvc.Spec.VolumeMode, pvc.Spec.StorageClassName, cdiConfig), nil
return storagetypes.GetFilesystemOverhead(pvc.Spec.VolumeMode, pvc.Spec.StorageClassName, cdiConfig), nil
}

func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
Expand All @@ -2060,7 +2060,7 @@ func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachm
}

func (c *VMIController) getVolumePhaseMessageReason(volume *virtv1.Volume, namespace string) (virtv1.VolumePhase, string, string) {
claimName := kubevirttypes.PVCNameFromVirtVolume(volume)
claimName := storagetypes.PVCNameFromVirtVolume(volume)

pvcInterface, pvcExists, _ := c.pvcInformer.GetStore().GetByKey(fmt.Sprintf("%s/%s", namespace, claimName))
if !pvcExists {
Expand Down
2 changes: 1 addition & 1 deletion pkg/virtctl/imageupload/imageupload.go
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ func createUploadPVC(client kubernetes.Interface, namespace, name, size, storage
if err != nil {
return nil, fmt.Errorf("validation failed for size=%s: %s", size, err)
}
pvc := storagetypes.GeneratePVC(&quantity, name, namespace, storageClass, accessMode, blockVolume)
pvc := storagetypes.RenderPVC(&quantity, name, namespace, storageClass, accessMode, blockVolume)

contentType := string(cdiv1.DataVolumeKubeVirt)
if archiveUpload {
Expand Down
2 changes: 1 addition & 1 deletion pkg/virtctl/vm/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ func createPVCforMemoryDump(namespace, vmName, claimName string, virtClient kube
return err
}

pvc := storagetypes.GeneratePVC(neededSize, claimName, namespace, storageClass, accessMode, false)
pvc := storagetypes.RenderPVC(neededSize, claimName, namespace, storageClass, accessMode, false)

_, err = virtClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{})
if err != nil {
Expand Down

0 comments on commit 85519c5

Please sign in to comment.