forked from fluxcd/flagger
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathe2e-istio-tests-skip-analysis.sh
executable file
·138 lines (123 loc) · 3.65 KB
/
e2e-istio-tests-skip-analysis.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#!/usr/bin/env bash
# This script runs e2e tests for when the canary analysis is skipped
# Prerequisites: Kubernetes Kind and Istio
set -o errexit
echo '>>> Initialising canary'
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
skipAnalysis: true
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/"
logCmdOutput: "true"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false
sleep 10
kubectl -n istio-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Waiting for canary finalization'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'
if [[ "$1" = "canary" ]]; then
exit 0
fi
echo '>>> Triggering canary deployment with a bad release (non existent docker image)'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/potato:1.0.0
echo '>>> Waiting for canary to fail'
retries=50
count=0
ok=false
until ${ok}; do
kubectl get canary/podinfo -n test -o=jsonpath='{.status.phase}' | grep 'Failed' && ok=true || ok=false
sleep 10
kubectl -n istio-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Confirm primary pod is still running and with correct version'
retries=50
count=0
ok=false
until ${okImage} && ${okRunning}; do
kubectl get deployment podinfo-primary -n test -o jsonpath='{.spec.replicas}' | grep 1 && okRunning=true || okRunning=false
kubectl -n test describe deployment/podinfo-primary | grep '3.1.3' && okImage=true || okImage=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
kubectl -n istio-system logs deployment/flagger
echo '✔ All tests passed'