From e18f6cb591cc6d16fc13c9659deba6525ffa0d3e Mon Sep 17 00:00:00 2001 From: Michail Kargakis Date: Mon, 7 Nov 2016 18:23:09 +0100 Subject: [PATCH] test: set failure traps for all deployment e2e tests --- test/e2e/deployment.go | 38 +++++++++++++++- test/e2e/framework/util.go | 91 +++++++++++++------------------------- test/utils/BUILD | 1 + test/utils/deployment.go | 15 ++++--- 4 files changed, 75 insertions(+), 70 deletions(-) diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index 822d18d9f6e14..91a78627c321d 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -43,6 +43,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/test/e2e/framework" + testutil "k8s.io/kubernetes/test/utils" ) const ( @@ -60,10 +61,19 @@ var ( ) var _ = framework.KubeDescribe("Deployment", func() { + var ns string + var c clientset.Interface + + AfterEach(func() { + failureTrap(c, ns) + }) + f := framework.NewDefaultFramework("deployment") - // TODO: Add failure traps once we have JustAfterEach - // See https://github.com/onsi/ginkgo/issues/303 + BeforeEach(func() { + c = f.ClientSet + ns = f.Namespace.Name + }) It("deployment reaping should cascade to its replica sets and pods", func() { testDeleteDeployment(f) @@ -117,6 +127,30 @@ var _ = framework.KubeDescribe("Deployment", func() { // See https://github.com/kubernetes/kubernetes/issues/29229 }) +func failureTrap(c clientset.Interface, ns string) { + deployments, err := c.Extensions().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + if err != nil { + framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) + return + } + for i := range deployments.Items { + d := deployments.Items[i] + + framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d)) + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c) + if err != nil { + framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) + return + } + testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf) + rsList := allOldRSs + if newRS != nil { + rsList = append(rsList, newRS) + } + testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf) + } +} + func intOrStrP(num int) *intstr.IntOrString { intstr := intstr.FromInt(num) return &intstr diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 55986ae2f3dd4..5877b792d28aa 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -63,7 +63,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - testutil "k8s.io/kubernetes/test/utils" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" @@ -99,7 +98,7 @@ import ( utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - testutils "k8s.io/kubernetes/test/utils" + testutil "k8s.io/kubernetes/test/utils" ) const ( @@ -585,7 +584,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { continue } - res, err := testutils.PodRunningReady(&pod) + res, err := testutil.PodRunningReady(&pod) switch { case res && err == nil: nOk++ @@ -653,7 +652,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri } logFunc("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { - if res, err := testutils.PodRunningReady(&pod); !res || err != nil { + if res, err := testutil.PodRunningReady(&pod); !res || err != nil { kubectlLogPod(c, pod, "", Logf) } } @@ -776,7 +775,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou } Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+ "(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)", - podName, ns, desc, pod.Status.Phase, testutils.PodReady(pod), time.Since(start)) + podName, ns, desc, pod.Status.Phase, testutil.PodReady(pod), time.Since(start)) } return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout) } @@ -2280,25 +2279,25 @@ func (f *Framework) MatchContainerOutput( return nil } -func RunDeployment(config testutils.DeploymentConfig) error { +func RunDeployment(config testutil.DeploymentConfig) error { By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo config.ContainerDumpFunc = LogFailedContainers - return testutils.RunDeployment(config) + return testutil.RunDeployment(config) } -func RunReplicaSet(config testutils.ReplicaSetConfig) error { +func RunReplicaSet(config testutil.ReplicaSetConfig) error { By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo config.ContainerDumpFunc = LogFailedContainers - return testutils.RunReplicaSet(config) + return testutil.RunReplicaSet(config) } -func RunRC(config testutils.RCConfig) error { +func RunRC(config testutil.RCConfig) error { By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo config.ContainerDumpFunc = LogFailedContainers - return testutils.RunRC(config) + return testutil.RunRC(config) } type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error) @@ -2572,7 +2571,7 @@ func GetTTLAnnotationFromNode(node *v1.Node) (time.Duration, bool) { } func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) { - ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) + ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) } func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { @@ -2595,10 +2594,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Tai // won't fail if target label doesn't exist or has been removed. func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) { By("removing the label " + labelKey + " off the node " + nodeName) - ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey})) + ExpectNoError(testutil.RemoveLabelOffNode(c, nodeName, []string{labelKey})) By("verifying the node doesn't have the label " + labelKey) - ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) + ExpectNoError(testutil.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) } func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { @@ -2719,7 +2718,7 @@ func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind s if err != nil { return err } - err = testutils.WaitForPodsWithLabelRunning(c, ns, selector) + err = testutil.WaitForPodsWithLabelRunning(c, ns, selector) if err != nil { return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err) } @@ -2736,7 +2735,7 @@ func ScaleDeployment(clientset clientset.Interface, internalClientset internalcl // Returns true if all the specified pods are scheduled, else returns false. func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) { - PodStore := testutils.NewPodStore(c, ns, label, fields.Everything()) + PodStore := testutil.NewPodStore(c, ns, label, fields.Everything()) defer PodStore.Stop() pods := PodStore.List() if len(pods) == 0 { @@ -2798,7 +2797,7 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la } current = 0 for _, pod := range pods.Items { - if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true { + if flag, err := testutil.PodRunningReady(&pod); err == nil && flag == true { current++ } } @@ -3032,8 +3031,8 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { // podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector. // It waits until the reflector does a List() before returning. -func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutils.PodStore, error) { - ps := testutils.NewPodStore(c, ns, selector, fields.Everything()) +func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutil.PodStore, error) { + ps := testutil.NewPodStore(c, ns, selector, fields.Everything()) err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) { if len(ps.Reflector.LastSyncResourceVersion()) != 0 { return true, nil @@ -3047,7 +3046,7 @@ func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selec // This is to make a fair comparison of deletion time between DeleteRCAndPods // and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas // when the pod is inactvie. -func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { +func waitForPodsInactive(ps *testutil.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { pods := ps.List() for _, pod := range pods { @@ -3060,7 +3059,7 @@ func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration } // waitForPodsGone waits until there are no pods left in the PodStore. -func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { +func waitForPodsGone(ps *testutil.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { if pods := ps.List(); len(pods) == 0 { return true, nil @@ -3243,25 +3242,16 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) er totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) if totalCreated > maxCreated { - logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) - logPodsOfDeployment(c, deployment, allRSs) return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) } minAvailable := deploymentutil.MinAvailable(deployment) if deployment.Status.AvailableReplicas < minAvailable { - logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) - logPodsOfDeployment(c, deployment, allRSs) return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) } // When the deployment status and its underlying resources reach the desired state, we're done return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil }) - - if err == wait.ErrWaitTimeout { - logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) - logPodsOfDeployment(c, deployment, allRSs) - } if err != nil { return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err) } @@ -3325,13 +3315,6 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er status = d.Status if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas { - _, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c) - newRS, nerr := deploymentutil.GetNewReplicaSet(d, c) - if err == nil && nerr == nil { - Logf("%+v", d) - logReplicaSetsOfDeployment(d, allOldRSs, newRS) - logPodsOfDeployment(c, d, append(allOldRSs, newRS)) - } return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status) } @@ -3410,15 +3393,10 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName) - logReplicaSetsOfDeployment(d, oldRSs, nil) } return pollErr } -func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) { - testutil.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf) -} - func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) @@ -3438,19 +3416,10 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions) - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c) - if err == nil { - logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) - logPodsOfDeployment(c, deployment, append(allOldRSs, newRS)) - } } return pollErr } -func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) { - testutil.LogPodsOfDeployment(c, deployment, rsList, Logf) -} - // Waits for the number of events on the given object to reach a desired count. func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { @@ -3905,14 +3874,14 @@ func GetSigner(provider string) (ssh.Signer, error) { // podNames in namespace ns are running and ready, using c and waiting at most // timeout. func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") + return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReady, "running and ready") } // CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are // listed in podNames in namespace ns are running and ready, or succeeded; use // c and waiting at most timeout. func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") + return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReadyOrSucceeded, "running and ready, or succeeded") } // CheckPodsCondition returns whether all pods whose names are listed in podNames @@ -4603,7 +4572,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } } else { - if err := testutils.WaitForPodsWithLabelRunning( + if err := testutil.WaitForPodsWithLabelRunning( clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil { return err } @@ -5097,22 +5066,22 @@ func ListNamespaceEvents(c clientset.Interface, ns string) error { return nil } -// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used +// E2ETestNodePreparer implements testutil.TestNodePreparer interface, which is used // to create/modify Nodes before running a test. type E2ETestNodePreparer struct { client clientset.Interface // Specifies how many nodes should be modified using the given strategy. // Only one strategy can be applied to a single Node, so there needs to // be at least Nodes in the cluster. - countToStrategy []testutils.CountToStrategy - nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy + countToStrategy []testutil.CountToStrategy + nodeToAppliedStrategy map[string]testutil.PrepareNodeStrategy } -func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer { +func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutil.CountToStrategy) testutil.TestNodePreparer { return &E2ETestNodePreparer{ client: client, countToStrategy: countToStrategy, - nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy), + nodeToAppliedStrategy: make(map[string]testutil.PrepareNodeStrategy), } } @@ -5130,7 +5099,7 @@ func (p *E2ETestNodePreparer) PrepareNodes() error { for _, v := range p.countToStrategy { sum += v.Count for ; index < sum; index++ { - if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { + if err := testutil.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { glog.Errorf("Aborting node preparation: %v", err) return err } @@ -5148,7 +5117,7 @@ func (p *E2ETestNodePreparer) CleanupNodes() error { name := nodes.Items[i].Name strategy, found := p.nodeToAppliedStrategy[name] if found { - if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil { + if err = testutil.DoCleanupNode(p.client, name, strategy); err != nil { glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err) encounteredError = err } diff --git a/test/utils/BUILD b/test/utils/BUILD index 3c94c47188736..79e22e59114f8 100644 --- a/test/utils/BUILD +++ b/test/utils/BUILD @@ -30,6 +30,7 @@ go_library( "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/controller/deployment/util:go_default_library", "//pkg/util/labels:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/utils/deployment.go b/test/utils/deployment.go index 152685ad4ccb0..f457369b608e4 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -20,6 +20,8 @@ import ( "fmt" "time" + "github.com/davecgh/go-spew/spew" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" @@ -34,15 +36,15 @@ type LogfFn func(format string, args ...interface{}) func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) { if newRS != nil { - logf("New ReplicaSet of Deployment %s:\n%+v", deployment.Name, *newRS) + logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS)) } else { - logf("New ReplicaSet of Deployment %s is nil.", deployment.Name) + logf("New ReplicaSet of Deployment %q is nil.", deployment.Name) } if len(allOldRSs) > 0 { - logf("All old ReplicaSets of Deployment %s:", deployment.Name) + logf("All old ReplicaSets of Deployment %q:", deployment.Name) } for i := range allOldRSs { - logf("%+v", *allOldRSs[i]) + logf(spew.Sprintf("%+v", *allOldRSs[i])) } } @@ -53,9 +55,8 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen } podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) - if err != nil { - logf("Failed to list Pods of Deployment %s: %v", deployment.Name, err) + logf("Failed to list Pods of Deployment %q: %v", deployment.Name, err) return } for _, pod := range podList.Items { @@ -63,7 +64,7 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { availability = "available" } - logf("Pod %s is %s:\n%+v", pod.Name, availability, pod) + logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod)) } }