useful links
sudo snap install microk8s --classic
sudo snap install microk8s --classic --edge
enable addons
microk8s.start
microk8s.enable dns dashboard
check installation
microk8s.inspect
check journals for services
journalctl -u snap.microk8s.daemon-docker
- snap.microk8s.daemon-apiserver
- snap.microk8s.daemon-controller-manager
- snap.microk8s.daemon-scheduler
- snap.microk8s.daemon-kubelet
- snap.microk8s.daemon-proxy
- snap.microk8s.daemon-docker
- snap.microk8s.daemon-etcd
sudo snap install minikube
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl
export MINIKUBE_WANTUPDATENOTIFICATION=false
export MINIKUBE_WANTREPORTERRORPROMPT=false
export MINIKUBE_HOME=$HOME
export CHANGE_MINIKUBE_NONE_USER=true
mkdir $HOME/.kube || true
touch $HOME/.kube/config
export KUBECONFIG=$HOME/.kube/config
sudo -E ./minikube start --vm-driver=none
# wait that Minikube has created
for i in {1..150}; do # timeout for 5 minutes
./kubectl get po &> /dev/null
if [ $? -ne 1 ]; then
break
fi
sleep 2
done
minikube completion bash
minikube start
kubectl delete node --all
kubectl delete pods --all
kubectl stop
kubectl delete
launchctl stop '*kubelet*.mount'
launchctl stop localkube.service
launchctl disable localkube.service
sudo kubeadm reset
rm -rf ~/.kube ~/.minikube
sudo rm -rf /usr/local/bin/localkube /usr/local/bin/minikube
sudo rm -rf /etc/kubernetes/
# sudo apt-get purge kubeadm kubectl kubelet kubernetes-cni kube*
sudo apt-get purge kube*
sudo apt-get autoremove
docker system prune -af --volumes
export MINIKUBE_WANTUPDATENOTIFICATION=false
export MINIKUBE_WANTREPORTERRORPROMPT=false
export MINIKUBE_HOME=$HOME
export CHANGE_MINIKUBE_NONE_USER=true
export KUBECONFIG=$HOME/.kube/config
sudo -E minikube start --vm-driver=none
permanently
kubectl config use-context minikube
temporary
kubectl get pods --context=minikube
kubectl get namespaces
at least three namespaces will be provided
default Active 15m
kube-public Active 15m
kube-system Active 15m
kubectl create namespace my-own-namespace
or via yaml file
kubectl apply -f {filename}
kind: Namespace
apiVersion: v1
metadata:
name: test
kubectl delete namespace {name of namespace}
kubectl get configmap
kubectl get configmap --namespace kube-system
kubectl get configmap --namespace kube-system kube-proxy --output json
kubectl cluster-info dump
kubectl get node
minikube dashboard
minikube addons list
minikube addons enable ingress
kubectl run hello-minikube --image=k8s.gcr.io/echoserver:1.4 --port=8080
kubectl run http --image=katacoda/docker-http-server:latest --replicas=1
kubectl scale --replicas=3 deployment {name of the deployment}
kubectl apply -f /path/to/controller.yml
kubectl create -f /path/to/controller.yml
kubectl expose deployment helloworld-deployment --type=NodePort --name=helloworld-service
kubectl expose deployment helloworld-deployment --external-ip="172.17.0.13" --port=8000 --target-port=80
minikube service helloworld-service
minikube service helloworld-service --url
kube-apiserver --service-node-port-range=30000-40000
kubectl describe deployment {name of deployment}
kubectl describe service {name of service}
kubectl --namespace kube-system describe secret admin-user
kubectl get all --all-namespaces
kubectl get pods
kubectl get pods --namespace kube-system
kubectl get pods --show-labels
kubectl get pods --output=wide --selector="run=load-balancer-example"
kubectl get service --output=wide
kubectl get service --output=wide --selector="app=helloworld"
kubectl get deployments
kubectl get replicasets
kubectl get nodes
kubectl get cronjobs
kubectl get daemonsets
kubectl get pods,deployments,services,rs,cm,pv,pvc -n demo
minikube ip
open 'kube-dns-....'/hostIP open 'kube-proxy-....'/hostIP
kubectl edit pod hello-minikube-{some random hash}
kubectl edit deploy hello-minikube
kubectl edit ReplicationControllers helloworld-controller
kubectl set image deployment/helloworld-deployment {name of image}
kubectl rollout status deployment/helloworld-deployment
kubectl rollout history deployment/helloworld-deployment
kubectl rollout undo deployment/helloworld-deployment
kubectl rollout undo deployment/helloworld-deployment --to-revision={number of revision from 'history'}
kubectl delete pod hello-minikube-6c47c66d8-td9p2
kubectl delete deploy hello-minikube
kubectl delete rc helloworld-controller
next receipts allow to redirect 127.0.0.1:8080 to pod:6379
kubectl port-forward redis-master-765d459796-258hz 8080:6379
kubectl port-forward pods/redis-master-765d459796-258hz 8080:6379
kubectl port-forward deployment/redis-master 8080:6379
kubectl port-forward rs/redis-master 8080:6379
kubectl port-forward svc/redis-master 8080:6379
spec:
nodeSelector:
kubernetes.io/hostname: gtxmachine1-ev
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-volume3
labels:
type: local
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data3"
to access created volume
ls /mnt/data3
list of existing volumes
kubectl get pv
kubectl get pvc
- OpenFaas
- Kubeless
- Fission
- OpenWhisk
kubectl get nodes --show-labels
kubectl label nodes {node name} my_label=my_value
kubectl label nodes {node name} my_label-
apiVersion: v1
kind: Pod
metadata:
...
spec:
...
nodeSelector:
my_label=my_value
apiVersion: some-version
kind: Deployment
metadata:
...
spec:
...
nodeSelector:
my_label=my_value
- nodeAffinity
-
- preferred - deploy in any case, with preferrence my_label=my_value
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: my_label
operator: In
values:
- my_value
-
- required - deploy only when label matched my_label=my_value
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: my_label
operator: In
values:
- my_value
- nodeAntiAffinity
spec:
affinity:
nodeAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- podAffinity
-
- preferred spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution
-
- required spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution
- podAntiAffinity
-
- preferred spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution
-
- required spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
kubectl get nodes
kubectl delete {node name}
ssh {master node}
kubeadm token create --print-join-command --ttl 0
expected result from previous command
kubeadm join 10.14.26.210:6443 --token 7h0dmx.2v5oe1jwed --discovery-token-ca-cert-hash sha256:1d28ebf950316b8f3fdf680af5619ea2682707f2e966fc0
go to node, clean up and apply token
ssh {node address}
rm -rf /etc/kubernetes
# apply token from previous step with additional flag: --ignore-preflight-errors=all
kubeadm join 10.14.26.210:6443 --token 7h0dmx.2v5oe1jwed --discovery-token-ca-cert-hash sha256:1d28ebf950316b8f3fdf680af5619ea2682707f2e966fc0 --ignore-preflight-errors=all
expected result from previous command
...
This node has joined the cluster:
* Certificate signing request was sent to master and a response
was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the master to see this node join the cluster.
next block is not mandatory in most cases
systemctl restart kubelet
kubectl logs <name of pod>
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
kubectl -n kube-system describe secret admin-user
http://127.0.0.1:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/overview?namespace=default
kubectl proxy
kubectl exec -it {name of a pod} -- bash -c "echo hi > /path/to/output/test.txt"
restart nodes
# remove died pods
kubectl delete pods kube-flannel-ds-amd64-zsfz --grace-period=0 --force
kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
read logs
kubectl logs --namespace kube-system kube-flannel-ds-amd64-j4frw -c kube-flannel
read logs from all pods
for each_node in $(kubectl get pods --namespace kube-system | grep flannel | awk '{print $1}');do echo $each_node;kubectl logs --namespace kube-system $each_node -c kube-flannel;done
read settings
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp ls /etc/kube-flannel/
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp cat /etc/kube-flannel/cni-conf.json
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp cat /etc/kube-flannel/net-conf.json
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp ls /run/flannel/
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp cat /run/flannel/subnet.env
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp ls /etc/cni/net.d
kubectl --namespace kube-system exec kube-flannel-ds-amd64-wc4zp cat /etc/cni/net.d/10-flannel.conflist
read DNS logs
kubectl get svc --namespace=kube-system | grep kube-dns
kubectl logs --namespace=kube-system coredns-78fcd94-7tlpw | tail
sudo snap install helm --classic
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
helm reset
helm init
# sync latest available packages
helm repo update
* $HELM_HOME: the location of Helm's configuration
* $TILLER_HOST: the host and port that Tiller is listening on
* $HELM_BIN: the path to the helm command on your system
* $HELM_PLUGIN_DIR: the full path to this plugin (not shown above, but we'll see it in a moment).
helm inspect { folder }
helm lint { folder }
helm search
helm describe {full name of the package}
helm info {name of resource}
helm status {name of resource}
helm create
ls -la ~/.helm/starters/
helm install { full name of the package }
helm install --name {my name for new package} { full name of the package }
helm install --name {my name for new package} --namespace {namespace} -f values.yml --debug --dry-run { full name of the package }
helm plugin install https://github.com/hypnoglow/helm-s3.git
helm list
helm list --all
helm ls
local package
helm upgrade {deployment/svc/rs/rc name} . --set replicas=2,maria.db.password="new password"
package by name
helm upgrade {name of package} {folder with helm scripts} --set replicas=2
check upgrade
helm history
helm rollback {name of package} {revision of history}
helm delete --purge {name of package}
E1209 22:25:57.285192 5149 portforward.go:331] an error occurred forwarding 40679 -> 44134: error forwarding port 44134 to pod de4963c7380948763c96bdda35e44ad8299477b41b5c4958f0902eb821565b19, uid : unable to do port forwarding: socat not found.
Error: transport is closing
solution
sudo apt install socat
Error: incompatible versions client[v2.12.3] server[v2.11.0]
solution
helm init --upgrade
kubectl get pods --namespace kube-system # waiting for start Tiller
helm version