Skip to content

Commit

Permalink
tests: make vmlifecycle test to pick up custom hostname/docker tag
Browse files Browse the repository at this point in the history
The vmlifecycle test is currently written to assume the vagrant
setup with hostname "master" and docker tag "devel". Allow this
to be overridden via env variables populated from hack/config-local.sh

The tests can be run by doing 'make func' which takes care to set
the right envs

Signed-off-by: Daniel P. Berrange <[email protected]>
  • Loading branch information
berrange committed Jul 13, 2017
1 parent c5485da commit 031a5a1
Show file tree
Hide file tree
Showing 7 changed files with 35 additions and 40 deletions.
8 changes: 4 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ fmt:
test: build
./hack/build-go.sh test ${WHAT}

functest:
./hack/build-go.sh functest ${WHAT}

clean:
./hack/build-go.sh clean ${WHAT}
rm ./bin -rf
Expand Down Expand Up @@ -56,7 +59,4 @@ vagrant-sync-build: build
vagrant-deploy: vagrant-sync-config vagrant-sync-build
export KUBECTL="cluster/kubectl.sh --core" && ./cluster/deploy.sh

vagrant-test:
./cluster/run_tests.sh

.PHONY: build fmt test clean distclean sync docker manifests vet publish vagrant-sync-config vagrant-sync-build vagrant-deploy vagrant-test
.PHONY: build fmt test clean distclean sync docker manifests vet publish vagrant-sync-config vagrant-sync-build vagrant-deploy functest
2 changes: 1 addition & 1 deletion automation/check-merged.sh
Original file line number Diff line number Diff line change
Expand Up @@ -120,4 +120,4 @@ kubectl get pods
cluster/kubectl.sh version

# Run functional tests
cluster/run_tests.sh --ginkgo.noColor
FUNC_TEST_ARGS="--ginkgo.noColor" hack/build-go.sh functest
23 changes: 0 additions & 23 deletions cluster/run_tests.sh

This file was deleted.

2 changes: 1 addition & 1 deletion docs/getting-started.md
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ up [Vagrant](#vagrant). Then run

```bash
make vagrant-deploy # synchronize with your code, if necessary
make vagrant-test # run the functional tests against the Vagrant VMs
make functest # run the functional tests against the Vagrant VMs
```

## Use
Expand Down
2 changes: 2 additions & 0 deletions hack/build-go.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ fi
if [ $# -eq 0 ]; then
if [ "${target}" = "test" ]; then
(cd pkg; go ${target} -v ./...)
elif [ "${target}" = "functest" ]; then
(cd tests; go test -master=http://${master_ip}:${master_port} -v ./... ${FUNC_TEST_ARGS})
else
(cd pkg; go $target ./...)
(cd tests; go $target ./...)
Expand Down
8 changes: 7 additions & 1 deletion tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,11 @@ There is a helper script to run this:

```
# from the git repo root folder
cluster/run_tests.sh
hack/build-go.sh functest
```

Or simply

```
make functest
```
30 changes: 20 additions & 10 deletions tests/vmlifecycle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ package tests_test
import (
"flag"
"net/http"
"os"
"time"

. "github.com/onsi/ginkgo"
Expand All @@ -41,6 +42,15 @@ import (

var _ = Describe("Vmlifecycle", func() {

primaryNodeName := os.Getenv("primary_node_name")
if primaryNodeName == "" {
primaryNodeName = "master"
}
dockerTag := os.Getenv("docker_tag")
if dockerTag == "" {
dockerTag = "latest"
}

flag.Parse()

restClient, err := kubecli.GetRESTClient()
Expand Down Expand Up @@ -91,8 +101,8 @@ var _ = Describe("Vmlifecycle", func() {
}, 30)

It("Should log libvirt start and stop lifecycle events of the domain", func(done Done) {
// Get the pod name of virt-handler running on the master node to inspect its logs later on
handlerNodeSelector := fields.ParseSelectorOrDie("spec.nodeName=master")
// Get the pod name of virt-handler running on the primary node to inspect its logs later on
handlerNodeSelector := fields.ParseSelectorOrDie("spec.nodeName=" + primaryNodeName)
labelSelector, err := labels.Parse("daemon in (virt-handler)")
Expect(err).NotTo(HaveOccurred())
pods, err := coreCli.CoreV1().Pods(api.NamespaceDefault).List(metav1.ListOptions{FieldSelector: handlerNodeSelector.String(), LabelSelector: labelSelector.String()})
Expand All @@ -103,8 +113,8 @@ var _ = Describe("Vmlifecycle", func() {
seconds := int64(30)
logsQuery := coreCli.Pods(api.NamespaceDefault).GetLogs(handlerName, &kubev1.PodLogOptions{SinceSeconds: &seconds})

// Make sure we schedule the VM to master
vm.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": "master"}
// Make sure we schedule the VM to primary node
vm.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": primaryNodeName}

// Start the VM and wait for the confirmation of the start
obj, err := restClient.Post().Resource("vms").Namespace(api.NamespaceDefault).Body(vm).Do().Get()
Expand Down Expand Up @@ -186,7 +196,7 @@ var _ = Describe("Vmlifecycle", func() {
Expect(err).ToNot(HaveOccurred())

time.Sleep(10 * time.Second)
err = pkillAllVms(coreCli, nodeName)
err = pkillAllVms(coreCli, nodeName, dockerTag)
Expect(err).To(BeNil())

tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().WaitFor(tests.WarningEvent, v1.Stopped)
Expand All @@ -209,7 +219,7 @@ var _ = Describe("Vmlifecycle", func() {
Expect(ok).To(BeTrue(), "Object is not of type *v1.VM")
Expect(err).ToNot(HaveOccurred())

err = pkillAllVms(coreCli, nodeName)
err = pkillAllVms(coreCli, nodeName, dockerTag)
Expect(err).To(BeNil())

// Wait for stop event of the VM
Expand All @@ -229,7 +239,7 @@ var _ = Describe("Vmlifecycle", func() {
})
})

func renderPkillAllVmsJob() *kubev1.Pod {
func renderPkillAllVmsJob(dockerTag string) *kubev1.Pod {
job := kubev1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vm-killer",
Expand All @@ -242,7 +252,7 @@ func renderPkillAllVmsJob() *kubev1.Pod {
Containers: []kubev1.Container{
{
Name: "vm-killer",
Image: "kubevirt/vm-killer:devel",
Image: "kubevirt/vm-killer:" + dockerTag,
Command: []string{
"pkill",
"-9",
Expand All @@ -264,8 +274,8 @@ func renderPkillAllVmsJob() *kubev1.Pod {
return &job
}

func pkillAllVms(core *kubernetes.Clientset, node string) error {
job := renderPkillAllVmsJob()
func pkillAllVms(core *kubernetes.Clientset, node, dockerTag string) error {
job := renderPkillAllVmsJob(dockerTag)
job.Spec.NodeName = node
_, err := core.Pods(kubev1.NamespaceDefault).Create(job)

Expand Down

0 comments on commit 031a5a1

Please sign in to comment.