- master节点
- node1节点
- node2节点
- node3节点
三选一,推荐docker
所有节点运行
# Install Docker CE
## Set up the repository
### Install required packages.
yum install yum-utils device-mapper-persistent-data lvm2 -y
### Add Docker repository.
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
## Install Docker CE.
yum update -y && yum install docker-ce-18.06.2.ce -y
## Create /etc/docker directory.
mkdir /etc/docker
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://uvioyo5q.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"insecure-registries":["http://172.16.50.96:6543"],
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# Restart Docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker
所有节点
systemctl stop firewalld
systemctl disable firewalld
所有节点运行
cat << EOM > /etc/hosts
192.168.40.128 node1
192.168.40.129 node2
192.168.40.130 node3
192.168.40.131 admin
EOM
所有节点运行
vi /etc/fstab
#注释掉swap那一行
sysctl -p
swapoff -a
所有节点
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable --now kubelet
systemctl restart --now kubelet
master节点运行
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.40.131
将/etc/kubernetes/admin.conf拷贝到其他节点相同位置
所有节点
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#以下命令会在初始化master完成后出现
kubeadm join 192.168.40.131:6443 --token fcvcgl.b9d5qewsvalomg3t \
--discovery-token-ca-cert-hash sha256:920cc8c1222ebfff37b7b0a49779b8f79f8040a6c75c3dc7c8b1aec00875d511
所有节点
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#查看日志
journalctl -f -u kubelet
#获取节点
kubectl get nodes
#删除节点
kubectl delete node node1
#撤销部署
kubeadm reset
#开启代理
kubectl proxy --address='0.0.0.0' --accept-hosts='^*$'
#私有仓库
kubectl -n fortest create secret docker-registry devsecret --docker-server=172.16.50.96:6543 --docker-username=docker-dev --docker-password=NP123456 [email protected]
master节点
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta1/aio/deploy/recommended.yaml
kubectl get pods -n kube-system
kubectl get svc -n kube-system
#以打补丁方式修改dasboard的访问方式
kubectl patch svc kubernetes-dashboard -p '{"spec":{"type":"NodePort"}}' -n kube-system
kubectl get svc -n kube-system
#访问https://host:port
kubectl create serviceaccount cluster-admin-dashboard-sa
kubectl create clusterrolebinding etcd-certs \
--clusterrole=cluster-admin \
--serviceaccount=default:cluster-admin-dashboard-sa
#查看用户
kubectl get secret | grep cluster-admin-dashboard-sa
#获取用户Token
kubectl describe secrets/cluster-admin-dashboard-sa-token-6thzn
mkdir key && cd key
#生成证书
openssl genrsa -out dashboard.key 2048
#填写所在主机IP
openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.246.200'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#删除原有的证书secret
kubectl delete secret kubernetes-dashboard-certs -n kube-system
#创建新的证书secret
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
#查看pod
kubectl get pod -n kube-system
#重启pod
kubectl delete pod <pod name> -n kube-system
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta1/aio/deploy/recommended.yaml
docker pull mirrorgooglecontainers/kube-apiserver-amd64:v1.15.3
docker tag mirrorgooglecontainers/kube-apiserver-amd64:v1.15.3 k8s.gcr.io/kube-apiserver:v1.15.3
docker pull mirrorgooglecontainers/kube-controller-manager:v1.15.3
docker tag mirrorgooglecontainers/kube-controller-manager:v1.15.3 k8s.gcr.io/kube-controller-manager:v1.15.3
docker pull mirrorgooglecontainers/kube-scheduler:v1.15.3
docker tag mirrorgooglecontainers/kube-scheduler:v1.15.3 k8s.gcr.io/kube-scheduler:v1.15.3
docker pull mirrorgooglecontainers/kube-proxy:v1.15.3
docker tag mirrorgooglecontainers/kube-proxy:v1.15.3 k8s.gcr.io/kube-proxy:v1.15.3
docker pull mirrorgooglecontainers/pause:3.1
docker tag mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
docker pull mirrorgooglecontainers/etcd:3.3.10
docker tag mirrorgooglecontainers/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
docker pull coredns/coredns:1.3.1
docker tag coredns/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
docker pull vbouchaud/nfs-client-provisioner-arm64
docker tag vbouchaud/nfs-client-provisioner-arm64 quay.io/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11
docker pull vbouchaud/nfs-client-provisioner
docker tag vbouchaud/nfs-client-provisioner quay.io/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11