Skip to content

Commit

Permalink
move funs of framework/deployment to e2e/apps
Browse files Browse the repository at this point in the history
  • Loading branch information
SataQiu committed Dec 9, 2019
1 parent 2fbe432 commit 10b1738
Show file tree
Hide file tree
Showing 5 changed files with 73 additions and 110 deletions.
75 changes: 73 additions & 2 deletions test/e2e/apps/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package apps

import (
"context"
"fmt"
"math/rand"
"time"
Expand All @@ -36,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
Expand All @@ -48,6 +50,7 @@ import (
)

const (
poll = 2 * time.Second
dRetryPeriod = 2 * time.Second
dRetryTimeout = 5 * time.Minute
)
Expand Down Expand Up @@ -333,7 +336,7 @@ func testRecreateDeployment(f *framework.Framework) {
framework.ExpectNoError(err)

framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
err = e2edeploy.WatchRecreateDeployment(c, deployment)
err = watchRecreateDeployment(c, deployment)
framework.ExpectNoError(err)
}

Expand Down Expand Up @@ -403,7 +406,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
framework.ExpectNoError(err)

ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
err = e2edeploy.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
framework.ExpectNoError(err)
}

Expand Down Expand Up @@ -1018,3 +1021,71 @@ func setAffinities(d *appsv1.Deployment, setAffinity bool) {
}
d.Spec.Template.Spec.Affinity = affinity
}

// watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}

w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}

status := d.Status

condition := func(event watch.Event) (bool, error) {
d := event.Object.(*appsv1.Deployment)
status = d.Status

if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
framework.Logf("%+v", d)
testutil.LogReplicaSetsOfDeployment(d, allOldRSs, newRS, framework.Logf)
testutil.LogPodsOfDeployment(c, d, append(allOldRSs, newRS), framework.Logf)
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}

return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}

ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}

// waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*appsv1.ReplicaSet
var d *appsv1.Deployment

pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment

_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
testutil.LogReplicaSetsOfDeployment(d, oldRSs, nil, framework.Logf)
}
return pollErr
}
3 changes: 0 additions & 3 deletions test/e2e/framework/deployment/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ go_library(
name = "go_default_library",
srcs = [
"fixtures.go",
"logging.go",
"wait.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/deployment",
Expand All @@ -17,9 +16,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
Expand Down
48 changes: 0 additions & 48 deletions test/e2e/framework/deployment/fixtures.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,13 @@ limitations under the License.
package deployment

import (
"context"
"fmt"
"time"

appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
Expand All @@ -45,49 +40,6 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
}

// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}

w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}

status := d.Status

condition := func(event watch.Event) (bool, error) {
d := event.Object.(*appsv1.Deployment)
status = d.Status

if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
framework.Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}

return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}

ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}

// NewDeployment returns a deployment spec with the specified argument.
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {
zero := int64(0)
Expand Down
32 changes: 0 additions & 32 deletions test/e2e/framework/deployment/logging.go

This file was deleted.

25 changes: 0 additions & 25 deletions test/e2e/framework/deployment/wait.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,31 +77,6 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, poll, pollShortTimeout)
}

// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*appsv1.ReplicaSet
var d *appsv1.Deployment

pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment

_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}

// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
func WaitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error {
err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) {
Expand Down

0 comments on commit 10b1738

Please sign in to comment.