-
Notifications
You must be signed in to change notification settings - Fork 28
/
Copy pathcommon.cloud-config
105 lines (104 loc) · 3.27 KB
/
common.cloud-config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#cloud-config
repo_update: true
repo_upgrade: all
packages:
- docker
- amazon-cloudwatch-agent
- python3-pip
- dnf-automatic.noarch
output:
all: "| tee -a /var/log/cloud-init-output.log"
manage_resolve_conf: true
resolve_conf:
name_servers: [ '169.254.169.253' ]
runcmd:
- [ sysctl, -w, vm.max_map_count=262144 ]
- [ sysctl, -w, fs.file-max=65536 ]
- [ sysctl, -w, vm.overcommit_memory=1 ]
- [ ulimit, -n, '65536' ]
- [ ulimit, -u, '4096' ]
write_files:
- content: |
vm.max_map_count=262144
fs.file-max=65536
vm.overcommit_memory=1
path: /etc/sysctl.d/01-docker.conf
owner: root:root
permissions: "0444"
- content: |
* soft nproc 4096
* soft nofile 65536
path: /etc/security/limits.d/01-docker.conf
owner: root:root
permissions: "0444"
- content: |
#!/bin/sh
docker image prune -f > /dev/null
docker container prune -f > /dev/null
path: /etc/cron.hourly/docker-prune.sh
owner: root:root
permissions: "0700"
- content: |
#!/bin/sh
docker image prune --all -f > /dev/null
docker volume prune -f > /dev/null
path: /etc/cron.daily/docker-prune.sh
owner: root:root
permissions: "0700"
- content: |
#!/bin/sh
docker node update --availability drain $(docker info -f '{{.Swarm.NodeID}}')
sleep 10
docker node demote $(docker info -f '{{.Swarm.NodeID}}')
sleep 10
docker swarm leave
path: /root/bin/leave-swarm.sh
owner: root:root
permissions: "0700"
- content: |
#!/bin/sh
docker node demote $(docker node ls --format "{{.ID}} {{.Status}} {{.Availability}}" --filter 'role=manager' | grep " Down Drain" | awk '{ print $1 }')
docker node rm $(docker node ls --format "{{.ID}} {{.Status}} {{.Availability}}" | grep " Down Drain" | awk '{ print $1 }')
path: /root/bin/prune-nodes.sh
owner: root:root
permissions: "0700"
- content: |
#!/bin/bash
USER="$1"
shift
SSH_PUBLIC_KEY="command=\\\"docker system dial-stdio\\\" $*"
useradd -s /bin/sh -G docker ${USER}
su - ${USER} -c "umask 077 ; mkdir .ssh ; echo $SSH_PUBLIC_KEY >> .ssh/authorized_keys"
path: /root/bin/add-docker-user.sh
owner: root:root
permissions: "0700"
- content: |
#!/usr/bin/env python3
import boto3
import grp
import sys
iam = boto3.client('iam')
public_keys = iam.list_ssh_public_keys(UserName=sys.argv[1])['SSHPublicKeys']
for public_key in public_keys:
public_key_id = public_key['SSHPublicKeyId']
ssh_public_key = iam.get_ssh_public_key(UserName=sys.argv[1], SSHPublicKeyId=public_key_id, Encoding="SSH")['SSHPublicKey']
if sys.argv[1] in grp.getgrnam("wheel").gr_mem:
print (ssh_public_key['SSHPublicKeyBody'])
else:
print ('command="docker system dial-stdio" ' + ssh_public_key['SSHPublicKeyBody'])
path: /opt/iam-authorized-keys-command
owner: root:root
permissions: "0755"
- content: |
#!/bin/sh
for node_name in $*
do
docker node update --availability drain ${node_name}
done
sleep 10
docker node rm --force $*
path: /root/bin/rm-workers.sh
owner: root:root
permissions: "0700"
groups:
- docker