forked from databacker/mysql-backup
-
Notifications
You must be signed in to change notification settings - Fork 0
/
entrypoint
executable file
·251 lines (225 loc) · 7.06 KB
/
entrypoint
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
#!/bin/bash
. /functions.sh
if [[ -n "$DB_DUMP_DEBUG" ]]; then
set -x
fi
# get all variables from environment variables or files (e.g. VARIABLE_NAME_FILE)
# (setting defaults happens here, too)
file_env "DB_SERVER"
file_env "DB_PORT"
file_env "DB_USER"
file_env "DB_PASS"
file_env "DB_NAMES"
file_env "DB_NAMES_EXCLUDE"
file_env "DB_DUMP_FREQ" "1440"
file_env "DB_DUMP_BEGIN" "+0"
file_env "DB_DUMP_DEBUG"
file_env "DB_DUMP_TARGET" "/backup"
file_env "DB_DUMP_BY_SCHEMA"
file_env "DB_DUMP_KEEP_PERMISSIONS" "true"
file_env "DB_RESTORE_TARGET"
file_env "AWS_ENDPOINT_URL"
file_env "AWS_ENDPOINT_OPT"
file_env "AWS_CLI_OPTS"
file_env "AWS_CLI_S3_CP_OPTS"
file_env "AWS_ACCESS_KEY_ID"
file_env "AWS_SECRET_ACCESS_KEY"
file_env "AWS_DEFAULT_REGION"
file_env "OSS_REGION"
file_env "OSS_ENDPOINT_URL"
file_env "OSS_ACCESS_KEY_ID"
file_env "OSS_ACCESS_KEY_SECRET"
file_env "SMB_USER"
file_env "SMB_PASS"
file_env "TMP_PATH" "/tmp"
file_env "COMPRESSION" "gzip"
if [[ -n "$DB_DUMP_DEBUG" ]]; then
set -x
fi
# ensure it is defined
MYSQLDUMP_OPTS=${MYSQLDUMP_OPTS:-}
# login credentials
if [ -n "${DB_USER}" ]; then
DBUSER="-u${DB_USER}"
else
DBUSER=
fi
if [ -n "${DB_PASS}" ]; then
DBPASS="-p${DB_PASS}"
else
DBPASS=
fi
# database server
if [ -z "${DB_SERVER}" ]; then
echo "DB_SERVER variable is required. Exiting."
exit 1
fi
# database port
if [ -z "${DB_PORT}" ]; then
echo "DB_PORT not provided, defaulting to 3306"
DB_PORT=3306
fi
#
# set compress and decompress commands
COMPRESS=
UNCOMPRESS=
case $COMPRESSION in
gzip)
COMPRESS="gzip"
UNCOMPRESS="gunzip"
EXTENSION="tgz"
;;
bzip2)
COMPRESS="bzip2"
UNCOMPRESS="bzip2 -d"
EXTENSION="tbz2"
;;
*)
echo "Unknown compression requested: $COMPRESSION" >&2
exit 1
esac
# temporary dump dir
TMPDIR="${TMP_PATH}/backups"
TMPRESTORE="${TMP_PATH}/restorefile"
# this is global, so has to be set outside
declare -A uri
# if OSS_ACCESS_KEY_ID and OSS_ACCESS_KEY_SECRET are set, add oss profile
if [[ -n "$OSS_ACCESS_KEY_ID" && -n "$OSS_ACCESS_KEY_SECRET" ]]; then
aws configure set aws_access_key_id $OSS_ACCESS_KEY_ID --profile oss
aws configure set aws_secret_access_key $OSS_ACCESS_KEY_SECRET --profile oss
aws configure set s3.addressing_style virtual --profile oss
fi
if [[ -n "$DB_RESTORE_TARGET" ]]; then
# Execute additional scripts for pre backup restore processing. For example,
# uncompress a tarball that contains the tarballs for the sql dump and a
# wordpress installation.
if [ -d /scripts.d/pre-restore/ ]; then
for i in $(ls /scripts.d/pre-restore/*.sh); do
if [ -x $i ]; then
DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i
fi
done
fi
uri_parser ${DB_RESTORE_TARGET}
if [[ "${uri[schema]}" == "file" ]]; then
cp $DB_RESTORE_TARGET $TMPRESTORE 2>/dev/null
elif [[ "${uri[schema]}" == "s3" ]]; then
[[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL"
aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE
elif [[ "${uri[schema]}" == "oss" ]]; then
DB_RESTORE_TARGET=${DB_RESTORE_TARGET/oss:\/\//s3:\/\/}
aws --profile oss --region $OSS_REGION --endpoint-url $OSS_ENDPOINT_URL s3 cp "${DB_RESTORE_TARGET}" $TMPRESTORE
elif [[ "${uri[schema]}" == "smb" ]]; then
if [[ -n "$SMB_USER" ]]; then
UPASSARG="-U"
UPASS="${SMB_USER}%${SMB_PASS}"
elif [[ -n "${uri[user]}" ]]; then
UPASSARG="-U"
UPASS="${uri[user]}%${uri[password]}"
else
UPASSARG=
UPASS=
fi
if [[ -n "${uri[userdomain]}" ]]; then
UDOM="-W ${uri[userdomain]}"
else
UDOM=
fi
smbclient -N "//${uri[host]}/${uri[share]}" ${UPASSARG} "${UPASS}" ${UDOM} -c "get ${uri[sharepath]} ${TMPRESTORE}"
fi
# did we get a file?
if [[ -f "$TMPRESTORE" ]]; then
if [ "$SINGLE_DATABASE" = "true" ]; then
DBDATABASE="-D$DB_NAMES"
else
DBDATABASE=
fi
workdir="${TMP_PATH}/restore.$$"
rm -rf $workdir
mkdir -p $workdir
$UNCOMPRESS < $TMPRESTORE | tar -C $workdir -xvf -
RESTORE_OPTS=${RESTORE_OPTS:-}
cat $workdir/* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS $DBDATABASE
rm -rf $workdir
/bin/rm -f $TMPRESTORE
else
echo "Could not find restore file $DB_RESTORE_TARGET"
exit 1
fi
# Execute additional scripts for post backup restore processing. For example,
# uncompress a tarball that contains the files of a wordpress installation
if [ -d /scripts.d/post-restore/ ]; then
for i in $(ls /scripts.d/post-restore/*.sh); do
if [ -x $i ]; then
DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i
fi
done
fi
else
# wait for the next time to start a backup
# for debugging
echo Starting at $(date)
last_run=0
current_time=$(date +"%s")
freq_time=$(($DB_DUMP_FREQ*60))
# get the begin time on our date
# REMEMBER: we are using the basic date package in alpine
# could be a delay in minutes or an absolute time of day
if [ -n "$DB_DUMP_CRON" ]; then
# calculate how long until the next cron instance is met
waittime=$(wait_for_cron "$DB_DUMP_CRON" "$current_time" $last_run)
elif [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
today=$(date +"%Y-%m-%d")
target_time=$(date --date="${today} ${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
# If RUN_ONCE is set, don't wait
if [ -z "${RUN_ONCE}" ]; then
sleep $waittime
last_run=$(date +"%s")
fi
# enter the loop
exit_code=0
while true; do
# make sure the directory exists
mkdir -p $TMPDIR
do_dump
[ $? -ne 0 ] && exit_code=1
# we can have multiple targets
for target in ${DB_DUMP_TARGET}; do
backup_target ${target}
[ $? -ne 0 ] && exit_code=1
done
# remove lingering file
/bin/rm ${TMPDIR}/${SOURCE}
# wait, unless RUN_ONCE is set
current_time=$(date +"%s")
if [ -n "${RUN_ONCE}" ]; then
exit $exit_code
elif [ -n "${DB_DUMP_CRON}" ]; then
waittime=$(wait_for_cron "${DB_DUMP_CRON}" "$current_time" $last_run)
else
current_time=$(date +"%s")
# Calculate how long the previous backup took
backup_time=$(($current_time - $target_time))
# Calculate how many times the frequency time was passed during the previous backup.
freq_time_count=$(($backup_time / $freq_time))
# Increment the count with one because we want to wait at least the frequency time once.
freq_time_count_to_add=$(($freq_time_count + 1))
# Calculate the extra time to add to the previous target time
extra_time=$(($freq_time_count_to_add*$freq_time))
# Calculate the new target time needed for the next calculation
target_time=$(($target_time + $extra_time))
# Calculate the wait time
waittime=$(($target_time - $current_time))
fi
sleep $waittime
last_run=$(date +"%s")
done
fi