Skip to content

Commit

Permalink
Fix deployment lifecycle and ReplicaSet test issue
Browse files Browse the repository at this point in the history
The pause image should not run sleep commands. This will cause pod fail
to start correctly.

See details in issue kubernetes#100047. We discovered some behavior about
development in certain cases like new pod fail to start correctly, but will be further investigated.

Change-Id: I9761bbefa694f6fe51a6f1e7561fa7e566ce4d8f
  • Loading branch information
jingxu97 committed Mar 25, 2021
1 parent fcee7a0 commit 7875595
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 13 deletions.
16 changes: 12 additions & 4 deletions test/e2e/apps/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,9 @@ var _ = SIGDescribe("Deployment", func() {
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
// See https://github.com/kubernetes/kubernetes/issues/29229
// Add UnavailableReplicas check because ReadyReplicas or UpdatedReplicas might not represent
// the actual number of pods running successfully if some pods failed to start after update or patch.
// See issue ##100192

/*
Release: v1.20
Expand Down Expand Up @@ -271,9 +274,8 @@ var _ = SIGDescribe("Deployment", func() {
"spec": map[string]interface{}{
"TerminationGracePeriodSeconds": &zero,
"containers": [1]map[string]interface{}{{
"name": testDeploymentName,
"image": testDeploymentPatchImage,
"command": []string{"/bin/sleep", "100000"},
"name": testDeploymentName,
"image": testDeploymentPatchImage,
}},
},
},
Expand Down Expand Up @@ -310,6 +312,8 @@ var _ = SIGDescribe("Deployment", func() {
found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
deployment.Status.ReadyReplicas == testDeploymentMinimumReplicas &&
deployment.Status.UpdatedReplicas == testDeploymentMinimumReplicas &&
deployment.Status.UnavailableReplicas == 0 &&
deployment.Spec.Template.Spec.Containers[0].Image == testDeploymentPatchImage
if !found {
framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas)
Expand Down Expand Up @@ -386,7 +390,9 @@ var _ = SIGDescribe("Deployment", func() {
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas
deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas &&
deployment.Status.UpdatedReplicas == testDeploymentDefaultReplicas &&
deployment.Status.UnavailableReplicas == 0
if !found {
framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v and labels %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas, deployment.ObjectMeta.Labels)
}
Expand Down Expand Up @@ -439,6 +445,8 @@ var _ = SIGDescribe("Deployment", func() {
found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas &&
deployment.Status.UpdatedReplicas == testDeploymentDefaultReplicas &&
deployment.Status.UnavailableReplicas == 0 &&
deployment.Spec.Template.Spec.Containers[0].Image == testDeploymentUpdateImage
if !found {
framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas)
Expand Down
47 changes: 38 additions & 9 deletions test/e2e/apps/replica_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ import (
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"

v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -445,13 +448,23 @@ func testRSLifeCycle(f *framework.Framework) {
}

rsName := "test-rs"
label := "test-rs=patched"
labelMap := map[string]string{"test-rs": "patched"}
replicas := int32(1)
rsPatchReplicas := int32(3)
rsPatchImage := imageutils.GetE2EImage(imageutils.Pause)

w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(context.TODO(), options)
},
}
rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: label})
framework.ExpectNoError(err, "failed to list rsList")
// Create a ReplicaSet
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
_, err = c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
framework.ExpectNoError(err)

// Verify that the required pods have come up.
Expand All @@ -470,17 +483,16 @@ func testRSLifeCycle(f *framework.Framework) {
ginkgo.By("patching the ReplicaSet")
rsPatch, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"labels": map[string]string{"test-rs": "patched"},
"labels": labelMap,
},
"spec": map[string]interface{}{
"replicas": rsPatchReplicas,
"template": map[string]interface{}{
"spec": map[string]interface{}{
"TerminationGracePeriodSeconds": &zero,
"containers": [1]map[string]interface{}{{
"name": rsName,
"image": rsPatchImage,
"command": []string{"/bin/sleep", "100000"},
"name": rsName,
"image": rsPatchImage,
}},
},
},
Expand All @@ -490,8 +502,25 @@ func testRSLifeCycle(f *framework.Framework) {
_, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch ReplicaSet")

rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
framework.ExpectEqual(*(rs.Spec.Replicas), rsPatchReplicas, "replicaset should have 3 replicas")
framework.ExpectEqual(rs.Spec.Template.Spec.Containers[0].Image, rsPatchImage, "replicaset not using rsPatchImage. Is using %v", rs.Spec.Template.Spec.Containers[0].Image)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if rset, ok := event.Object.(*appsv1.ReplicaSet); ok {
found := rset.ObjectMeta.Name == rsName &&
rset.ObjectMeta.Labels["test-rs"] == "patched" &&
rset.Status.ReadyReplicas == rsPatchReplicas &&
rset.Status.AvailableReplicas == rsPatchReplicas &&
rset.Spec.Template.Spec.Containers[0].Image == rsPatchImage
if !found {
framework.Logf("observed ReplicaSet %v in namespace %v with ReadyReplicas %v, AvailableReplicas %v", rset.ObjectMeta.Name, rset.ObjectMeta.Namespace, rset.Status.ReadyReplicas,
rset.Status.AvailableReplicas)
} else {
framework.Logf("observed Replicaset %v in namespace %v with ReadyReplicas %v found %v", rset.ObjectMeta.Name, rset.ObjectMeta.Namespace, rset.Status.ReadyReplicas, found)
}
return found, nil
}
return false, nil
})

framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", rs.Name, ns, rsPatchReplicas)
}

0 comments on commit 7875595

Please sign in to comment.