Skip to content

Commit

Permalink
Addressed review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
divyenpatel committed Feb 14, 2018
1 parent c0490fa commit 8823c22
Show file tree
Hide file tree
Showing 9 changed files with 37 additions and 53 deletions.
5 changes: 1 addition & 4 deletions test/e2e/storage/volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,10 +504,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
nodeInfo := vspheretest.TestContext.NodeMapper.GetNodeInfo(nodeList.Items[0].Name)

nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Expand Down
9 changes: 6 additions & 3 deletions test/e2e/storage/vsphere/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package vsphere

import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"sync"
)
Expand All @@ -43,10 +44,12 @@ func bootstrapOnce() {
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
// 2. Get all ready nodes
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
// 2. Get all nodes
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get nodes: %v", err)
}
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}

// 3. Get Node to VSphere mapping
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
if err != nil {
Expand Down
6 changes: 1 addition & 5 deletions test/e2e/storage/vsphere/pv_reclaimpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil
pvc = nil
volumePath = ""
Expand Down
6 changes: 1 addition & 5 deletions test/e2e/storage/vsphere/pvc_label_selector.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
c = f.ClientSet
ns = f.Namespace.Name
Bootstrap(f)
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
Expand Down
36 changes: 20 additions & 16 deletions test/e2e/storage/vsphere/vsphere_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package vsphere

import (
"fmt"
"math/rand"
"path/filepath"
"time"

Expand Down Expand Up @@ -513,18 +514,6 @@ func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
return vDiskPath
}

// isDiskAttached checks if disk is attached to the VM.
func isDiskAttached(ctx context.Context, vm *object.VirtualMachine, diskPath string) (bool, error) {
device, err := getVirtualDeviceByPath(ctx, vm, diskPath)
if err != nil {
return false, err
}
if device != nil {
return true, nil
}
return false, nil
}

// getVirtualDeviceByPath gets the virtual device by path
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
Expand All @@ -541,6 +530,8 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
} else {
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
}
}
}
Expand Down Expand Up @@ -715,21 +706,34 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Connect(ctx, nodeInfo.VSphere)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := isDiskAttached(ctx, vm, volPath)
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
if err != nil {
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
nodeName)
return false, err
}
if device != nil {
framework.Logf("diskIsAttached found the disk %q attached on node %q",
volPath,
nodeName)
}
framework.Logf("diskIsAttached result: %v and error: %v, for volume: %s", attached, err, volPath)
return attached, err
return true, nil
}

func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}

func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
rand.Seed(time.Now().Unix())
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name)
Expect(nodeInfo).NotTo(BeNil())
return nodeInfo
}
9 changes: 1 addition & 8 deletions test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ package vsphere

import (
"fmt"
"math/rand"
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -55,11 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
rand.Seed(time.Now().Unix())
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name)

nodeInfo = GetReadySchedulableRandomNodeInfo()
scParameters = make(map[string]string)
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
})
Expand Down
10 changes: 1 addition & 9 deletions test/e2e/storage/vsphere/vsphere_volume_diskformat.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ limitations under the License.
package vsphere

import (
"math/rand"
"path/filepath"
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
Expand Down Expand Up @@ -70,14 +68,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
rand.Seed(time.Now().Unix())
nodeName = nodeList.Items[rand.Int()%len(nodeList.Items)].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
Expand Down
5 changes: 4 additions & 1 deletion test/e2e/storage/vsphere/vsphere_volume_fstype.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,11 +175,14 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
}

func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil())
nodeName := pod.Spec.NodeName
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)

By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(volPath, pod.Spec.NodeName)
waitForVSphereDiskToDetach(volPath, nodeName)
}

func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/storage/vsphere/vsphere_volume_placement.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
Expand Down

0 comments on commit 8823c22

Please sign in to comment.