- completion
source <(oc completion bash)
- trace logging
rm -rf ~/.kube/cache
oc get pods -v=6
oc get pods -v=7
oc get pods -v=8
- explain yaml schema
oc explain pods
oc explain pods --recursive
oc explain pods --recursive --api-version=autoscaling/v2beta1
- get in yaml, get source of resource, describe yaml
oc get -o yaml pod {name of the pod}
oc get -o json pod {name of the pod}
https://docs.openshift.org/latest/minishift/using/index.html
oc cli installation debian
sudo apt install oc
download appropriate release
https://api.github.com/repos/openshift/origin/releases/latest
retrieve "browser_download_url", example of link for downloading ( from previous link )
https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz
tar -xvf openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz
mv openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit /home/soft/openshift-tool
export PATH=/home/soft/openshift-tool:$PATH
oc login --username=admin --password=admin
echo "my_password" | oc login -u my_user
oc login -u developer -p developer
oc login {url}
check login
oc whoami
oc whoami -t
https://oauth-openshift.stg.zxxp.zur/oauth/token/display
oc login --token=sha256~xxxxxxxxxxxxx --server=https://api.stg.zxxp.zur:6443
oc whoami -v=8
TOKEN=$(oc whoami -t)
ENDPOINT=$(oc status | head --lines=1 | awk '{print $6}')
NAMESPACE=$(oc status | head --lines=1 | awk '{print $3}')
echo $TOKEN
echo $ENDPOINT
echo $NAMESPACE
echo $NAME
curl -k -H "Authorization: Bearer $TOKEN" -H 'Accept: application/json' $ENDPOINT/api/v1/pods
curl -k -H "Authorization: Bearer $TOKEN" -H 'Accept: application/json' $ENDPOINT/api/v1/namespaces/$NAMESPACE/pods
# watch on changes
curl -k -H "Authorization: Bearer $TOKEN" -H 'Accept: application/json' $ENDPOINT/api/v1/watch/namespaces/$NAMESPACE/pods
~/.kube/config
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://localhost:6440
name: docker-for-desktop-cluster
- cluster:
insecure-skip-tls-verify: true
server: https://openshift-master-sim.myprovider.org:8443
name: openshift-master-sim-myprovider-org:8443
kubectl config use-context kubernetes-admin@docker-for-desktop-cluster
maprlogin password -user {mapruser}
# ticket-file will be created
check expiration date
maprlogin print -ticketfile /tmp/maprticket_1000 # or another filename
using file from previous command
cat /tmp/maprticket_1000
# create secret from file ( default name )
oc create secret generic {name of secret/token} --from-file=/tmp/maprticket_1000 -n {project name}
# create secret from file with specifying the name - CONTAINER_TICKET ( oc describe {name of secret} )
oc create secret generic {name of secret/token} --from-file=CONTAINER_TICKET=/tmp/maprticket_1000 -n {project name}
automation for creating tickets in diff namespaces
function openshift-replace-maprticket(){
MAPR_TICKET_PATH="${1}"
if [[ $MAPR_TICKET_PATH == "" ]]; then
echo " first parameter should be filepath to MapR ticket PROD ! "
return 1
fi
if [ ! -f $MAPR_TICKET_PATH ]; then
echo "can't access file: ${MAPR_TICKET_PATH}"
return 2
fi
oc login -u $TECH_USER -p $TECH_PASSWORD $OPEN_SHIFT_URL
PROJECTS=("portal-pre-prod" "portal-production")
SECRET_NAME="mapr-ticket"
for OC_PROJECT in "${PROJECTS[@]}"
do
echo $OC_PROJECT
oc project $OC_PROJECT
oc delete secret $SECRET_NAME
oc create secret generic $SECRET_NAME --from-file=CONTAINER_TICKET=$MAPR_TICKET_PATH -n $OC_PROJECT
oc get secret $SECRET_NAME -n $OC_PROJECT
done
}
or from content of file from previous command
oc create secret generic {name of secret/token} --from-literal=CONTAINER_TICKET='dp.prod.ubs qEnHLE7UaW81NJaDehSH4HX+m9kcSg1UC5AzLO8HJTjhfJKrQWdHd82Aj0swwb3AsxLg==' -n {project name}
check creation
oc get secrets
secret mapping example, map secret
...
volumeMounts:
- name: mapr-ticket
mountPath: "/path/inside/container"
readOnly: true
...
volumes:
- name: mapr-ticket
secret:
secretName: my-ticket
kubectl cluster-info
oc describe {[object type:](https://docs.openshift.com/enterprise/3.0/cli_reference/basic_cli_operations.html#object-types)}
- buildconfigs
- services
- routes
- ...
# follow events
oc get --watch events
# print events and sort them out by time
oc get events --sort-by='.lastTimestamp' | grep " Warning "
oc status
oc get routes {app name / service name}
FILE_NAME=route-data-api-mdf4download-service.yaml
echo "vim $FILE_NAME" | clipboard
yq 'del(.metadata.managedFields,.status,.metadata.uid,.metadata.resourceVersion,.metadata.creationTimestamp,.metadata.labels."template.openshift.io/template-instance-owner"),(.metadata.namespace="my_namespace")' $FILE_NAME
oc get all
oc get deployment,pod,service,route,dc,pvc,secret -l deployment_name=name-of-my-deployment
oc get route/name-of-route --output json
oc rollout latest "deploy-config-example"
oc get services
curl http://${SERVICE_NAME}:${SERVICE_PORT}/data-api/v1/health/
FILE_NAME=service-data-portal.yaml
oc get service/my_service --output yaml > $FILE_NAME
echo "vim $FILE_NAME" | clipboard
yq 'del(.metadata.managedFields,.status,.metadata.uid,.metadata.resourceVersion,.metadata.creationTimestamp,.spec.clusterIP,.spec.clusterIPs),(.metadata.namespace="my_new_namespace")' $FILE_NAME | clipboard
oc get serviceaccounts
oc get rolebindings
oc project
oc policy add-role-to-user admin cherkavi
# oc policy remove-role-from-user admin cherkavi
oc get rolebindings
oc get projects
oc new-project {project name}
oc describe project {project name}
oc get images
oc get images.image.openshift.io
IMAGE_OCP=image-registry.openshift-registry.svc:5000/portal-test-env/openjdk-8-slim-enhanced:202110
IMAGE_EXTERNAL=nexus-shared.com/repository/uploadimages/openjdk-8-slim-enhanced:202110
oc import-image $IMAGE_OCP --from=$IMAGE_EXTERNAL --confirm
oc import-image approved-apache:2.4 --from=bitnami/apache:2.4 --confirm
oc import-image my-python --from=my-external.com/tdonohue/python-hello-world:latest --confirm
# if you have credential restrictions
# oc create secret docker-registry my-mars-secret --docker-server=registry.marsrover.space --docker-username="[email protected]" --docker-password=thepasswordishere
oc tag my-external.com/tdonohue/python-hello-world:latest my-python:latest
oc project
oc project {project name}
oc create -f {description file}
# oc replace -f {description file}
example of job
apiVersion: batch/v1
kind: Job
metadata:
name: scenario-description
spec:
nodeSelector:
composer: true
template:
spec:
containers:
- name: scenario-description
image: cc-artifactory.myserver.net/add-docker/scenario_description:0.23.3
command: ["python", "-c", "'import scenario_description'"]
env:
- name: MAPR_TICKETFILE_LOCATION
value: "/tmp/maprticket_202208"
# set environment variable from metadata
- name: PROJECT
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: Never
backoffLimit: 4
oc set resources dc/{app-name} --limits=cpu=400m,memory=512Mi --requests=cpu=200m,memory=256Mi
oc autoscale dc/{app-name} --min 1 --max 5 --cpu-percent=40
oc debug pods/{name of the pod}
oc get pods --field-selector=status.phase=Running
oc rsh {name of pod}
# connect to container inside the pod with multi container
POD_NAME=data-portal-67-dx
CONTAINER_NAME=data-portal-apache
oc exec -it -p $POD_NAME -c $CONTAINER_NAME /bin/bash
# or
oc exec -it $POD_NAME -c $CONTAINER_NAME /bin/bash
# example of executing program on pod: kafka-test-app
oc exec kafka-test-app "/usr/bin/java"
oc cp api-server-256-txa8n:/usr/src/cert/keystore_server /my/local/path
# copy files from POD to locally
oc rsync /my/local/folder/ test01-mz2rf:/opt/app-root/src/
oc port-forward <pod-name> <ext-port>:<int-port>
oc new-app {/local/folder/to_source}
oc new-app https://github.com/openshift/ruby-ex.git
oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
new app with "specific" (centos/ruby-22-centos7) docker container from GIT with specific sub-folder and name
oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git --context-dir=sub-project --name myruby
oc import-image jenkins:v3.7 --from='registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.7' --confirm -n openshift
oc logs pod/{name of pod}
oc logs --follow bc/{name of app}
oc describe job {job name}
oc describe pod {pod name}
# list of config maps
oc get configmap
# describe one of the config map
oc get configmaps "httpd-config" -o yaml
oc describe configmap data-api-config
oc describe configmap gatekeeper-config
oc policy add-role-to-user view -n {name of application/namespace} -z default
oc config view
the same as
cat ~/.kube/config/config
oc describe routes
Requested Host:
oc delete {type} {type name}
- buildconfigs
- services
- routes
- ...
oc get svc istio-ingressgateway -n istio-system
if your service looks like svc/web - 172.30.20.243:8080 instead of external link like: http://gateway-myproject.192.168.42.43.nip.io to pod port 8080 (svc/gateway), then you can "expose" it for external world:
- svn expose services/{app name}
- svn expose service/{app name}
- svn expose svc/{app name}
# set readiness/liveness
oc set probe dc/{app-name} --liveness --readiness --get-url=http://:8080/health
# remove readiness/liveness
oc set probe dc/{app-name} --remove --liveness --readiness --get-url=http://:8080/health
# oc set probe dc/{app-name} --remove --liveness --readiness --get-url=http://:8080/health --initial-delay-seconds=30
# Set a readiness probe to try to open a TCP socket on 3306
oc set probe rc/mysql --readiness --open-tcp=3306
minishift ip
minishift console
kubectl config get-contexts
kubectl config current-context
kubectl api-versions
--> Success Build scheduled, use 'oc logs -f bc/web' to track its progress. Application is not exposed. You can expose services to the outside world by executing one or more of the commands below: 'oc expose svc/web' Run 'oc status' to view your app.
!!! openshift job starts only command - job will skip entrypoint
apiVersion: batch/v1
kind: Job
metadata:
name: scenario-description
spec:
template:
spec:
containers:
- name: scenario-description
image: scenario_description:0.2.3
command: ["python", "-c", "'import scenario_description'"]
restartPolicy: Never
apiVersion: v1
kind: Pod
metadata:
name: test01
spec:
containers:
- name: test01
image: busybox
command: ["sleep", "36000"]
restartPolicy: Never
backoffLimit: 4
apiVersion: v1
kind: Pod
metadata:
name: test01
spec:
containers:
- name: test01
image: busybox
command: ["sleep", "36000"]
- name: test02
image: busybox
command: ["sleep", "36000"]
restartPolicy: Never
backoffLimit: 4
apiVersion: v1
kind: Pod
metadata:
name: connect-to-me
spec:
containers:
- name: just-a-example
image: busybox
command: ["sleep", "36000"]
volumeMounts:
- mountPath: /source
name: maprvolume-source
- mountPath: /destination
name: maprvolume-destination
- name: httpd-config-volume
mountPath: /usr/local/apache2/conf/httpd.conf
volumes:
- name: maprvolume-source
persistentVolumeClaim:
claimName: pvc-scenario-input-prod
- name: maprvolume-destination
persistentVolumeClaim:
claimName: pvc-scenario-output-prod
- name: httpd-config-volume
configMap:
name: httpd-config
defaultMode: 420
restartPolicy: Never
backoffLimit: 4
For MapR cluster, be aware about MapR ticket-file ----<>Secret-----<>PV------<>PVC
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-workloads-staging-01
spec:
capacity:
storage: 50Gi
csi:
driver: com.mapr.csi-kdf
volumeHandle: pv-workloads-staging-01
volumeAttributes:
cldbHosts: >-
dpmtjp0001.swiss.com dpmtjp0002.swiss.com
dpmtjp0003.swiss.com dpmtjp0004.swiss.com
cluster: dp.stg.swiss
platinum: 'false'
securityType: secure
volumePath: /data/reprocessed/sensor
nodePublishSecretRef:
name: hil-supplier-01
namespace: workloads-staging
accessModes:
- ReadWriteMany
claimRef:
kind: PersistentVolumeClaim
namespace: workloads-staging
name: pvc-supplier-01
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
status:
phase: Bound
creating secret
- login into mapr
echo $CLUSTER_PASSWORD | maprlogin password -user $CLUSTER_USER
- check secret for existence
oc get secrets -n $OPENSHIFT_NAMESPACE
- re-create secret
# delete secret
oc delete secret/volume-token-ground-truth
cat /tmp/maprticket_1000
# create secret from file
ticket_name="cluster-user--mapr-prd-ticket-1536064"
file_name=$ticket_name".txt"
project_name="tsa"
## copy file from cluster to local folder
scp -r [email protected]:/full/path/to/$file_name .
oc create secret generic $ticket_name --from-file=$file_name -n $OPENSHIFT_NAMESPACE
oc create secret generic volume-token-ground-truth --from-file=CONTAINER_TICKET=/tmp/maprticket_1000 -n $OPENSHIFT_NAMESPACE
oc create secret generic volume-token-ground-truth --from-literal=CONTAINER_TICKET='dp.prod.zurich qEnHLE7UaW81NJaDehSH4HX+m9kcSg1UC5AzLO8HJTjhfJKrQWdHd82Aj0swwb3AsxLg==' -n $OPENSHIFT_NAMESPACE
- check created ticket
maprlogin print -ticketfile /tmp/maprticket_1000
oc describe secret volume-token-ground-truth
using secret
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-scenario-extraction-input
namespace: scenario-extraction
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
claimRef:
namespace: scenario-extraction
name: pvc-scenario-extraction-input
flexVolume:
driver: "mapr.com/maprfs"
options:
platinum: "false"
cluster: "dp.prod.munich"
cldbHosts: "dpmesp000004.gedp.org dpmesp000007.gedp.org dpmesp000010.gedp.org dpmesp000009.gedp.org"
volumePath: "/tage/data/store/processed/ground-truth/"
securityType: "secure"
ticketSecretName: "volume-token-ground-truth"
ticketSecretNamespace: "scenario-extraction"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-scenario-extraction-input
namespace: scenario-extraction
spec:
accessModes:
- ReadWriteOnce
volumeName: pv-scenario-extraction-input
resources:
requests:
storage: 1G
apiVersion: apps/v1
kind: Deployment
metadata:
name: flask-pod
spec:
selector:
matchLabels:
run: my-flask
replicas: 1
template:
metadata:
labels:
run: my-flask
spec:
containers:
- name: flask-test
image: docker-registry.zur.local:5000/test-flask:0.0.1
command: ["sleep","3600"]
ports:
- containerPort: 5000
---
apiVersion: v1
kind: Service
metadata:
name: flask-service
labels:
run: my-flask
spec:
ports:
- name: flask
port: 5000
protocol: TCP
- name: apache
port: 9090
protocol: TCP
targetPort: 80
selector:
run: my-flask
# rule:
# readiness_probe.initial_delay_seconds <= stategy.rollingParams.timeoutSeconds
stategy
rollingParams
timeoutSeconds: 1500
...
readiness_probe:
initial_delay_seconds: 600
volumeMounts:
- { mountPath: /tmp/maprticket, name: mapr-ticket, readonly: true }
- { mountPath: /usr/src/classes/config/server, name: server-config-volume, readonly: false }
- { mountPath: /mapr/prod.zurich/vantage/data/store/processed, name: processed, readonly: false }
- { mountPath: /tmp/data-api, name: cache-volume, readonly: false }
volumes:
- { type: secret, name: mapr-ticket, secretName: mapr-ticket }
- { type: configMap, name: server-config-volume, config_map_name: server-config }
- { type: other, name: mapr-deploy-data-api}
- { type: pvc, name: processed, pvc_name: pvc-mapr-processed-prod }
- { type: emptyDir, name: cache-volume }
oc adm policy add-scc-to-user {name of policy} { name of project }
oc adm policy remove-scc-to-user {name of policy} { name of project }