forked from gurubamal/success_taste
-
Notifications
You must be signed in to change notification settings - Fork 0
/
02ceph.sh
125 lines (113 loc) · 3.62 KB
/
02ceph.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
#add-apt-repository \
#"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
#$(lsb_release -cs) \
#stable"
#
#curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
#cat << EOF | tee /etc/apt/sources.list.d/kubernetes.list
#deb https://apt.kubernetes.io/ kubernetes-xenial main
#EOF
sudo apt-get update
#apt-get install -y docker-ce kubelet kubeadm kubectl
#
#
echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
#usermod -aG docker vagrant
#sudo swapoff -a
#sudo sed -i 's/\/swap.img/#\/swap.img/g' /etc/fstab
#echo overlay br_netfilter|sudo tee -a /etc/modules-load.d/containerd.conf
#FILE=/etc/docker/daemon.json
#if test -f "$FILE"; then
# echo "$FILE exists."
#else echo '{
# "exec-opts": ["native.cgroupdriver=systemd"]
#}'|sudo tee /etc/docker/daemon.json
#sudo systemctl daemon-reload && sudo systemctl restart docker && sudo systemctl restart kubelet
#fi
#FILE=/etc/docker/daemon.json
#if test -f "$FILE"; then
# echo "$FILE exists."
#else echo '{
# "exec-opts": ["native.cgroupdriver=systemd"]
#}'|sudo tee /etc/docker/daemon.json
#sudo systemctl daemon-reload && sudo systemctl restart docker && sudo systemctl restart kubelet
#fi
#if ! grep native.cgroupdriver=systemd /etc/systemd/system/multi-user.target.wants/docker.service
#then
# sudo systemctl stop docker
# sudo sed -i 's/containerd.sock/containerd.sock --exec-opt native.cgroupdriver=systemd/g' /etc/systemd/system/multi-user.target.wants/docker.service
# sudo systemctl daemon-reload
# sudo systemctl daemon-reload && sudo systemctl restart docker && sudo systemctl restart kubelet
#fi
#echo -e "vagrant\nvagrant" | sudo passwd root
if [ "$HOSTNAME" = node6 ]; then
echo "Host node01
Hostname node6
User root
Host node02
Hostname node7
User root
Host node03
Hostname node8
User root" |sudo tee /root/.ssh/config
sudo chmod 600 /root/.ssh/config
fi
if ! test -f /root/.ssh/id_rsa
then
sudo ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa
else echo "All Set"
fi
apt update
apt -y install ceph sshpass
#if ! 192.168.58 /etc/hosts
# then
# echo 192.168.58.7 node7 node02| sudo tee -a /etc/hosts
# echo 192.168.58.8 node8 node03| sudo tee -a /etc/hosts
# echo 192.168.58.6 node6 node01| sudo tee -a /etc/hosts
# echo 192.168.58.5 node5 controller| sudo tee -a /etc/hosts
#fi
#echo "127.0.0.1 localhost
#127.0.1.1 vagrant
# The following lines are desirable for IPv6 capable hosts
#::1 ip6-localhost ip6-loopback
#fe00::0 ip6-localnet
#ff00::0 ip6-mcastprefix
#ff02::1 ip6-allnodes
#ff02::2 ip6-allrouters
#192.168.58.7 node7 node02
#192.168.58.8 node8 node03
#192.168.58.6 node6 node01" |sudo tee /etc/hosts
#FILEX=/home/vagrant/x.txt
#if test -f "$FILEX"; then
# echo "Password was reset already"
#else
# echo -e "vagrant\nvagrant" | sudo passwd root ; touch $FILEX
#fi
if [ "$HOSTNAME" = node6 ]; then
UUID=$(uuidgen)
echo "[global]
# specify cluster network for monitoring
cluster network = 192.168.58.0/24
# specify public network
public network = 192.168.58.0/24
# specify UUID genarated above
fsid = $UUID
# specify IP address of Monitor Daemon
mon host = 192.168.58.6
# specify Hostname of Monitor Daemon
mon initial members = node01
osd pool default crush rule = -1
# mon.(Node name)
[mon.node01]
# specify Hostname of Monitor Daemon
host = node01
# specify IP address of Monitor Daemon
mon addr = 192.168.58.6
# allow to delete pools
mon allow pool delete = true" |sudo tee /etc/ceph/ceph.conf
fi
#for NODE in node6 node7 node8
# do sshpass -pvagrant ssh-copy-id root@$NODE
#done