- completion
source <(oc completion bash)
- trace logging
rm -rf ~/.kube/cache
oc get pods -v=6
oc get pods -v=7
oc get pods -v=8
- explain yaml schema
oc explain pods
oc explain pods --recursive
oc explain pods --recursive --api-version=autoscaling/v2beta1
- get in yaml, get source of resource, describe yaml
oc get -o yaml pod {name of the pod}
oc get -o json pod {name of the pod}
https://docs.openshift.org/latest/minishift/using/index.html
debian
sudo apt install oc
download appropriate release
https://api.github.com/repos/openshift/origin/releases/latest
retrieve "browser_download_url", example of link for downloading ( from previous link )
https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz
tar -xvf openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz
mv openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit /home/soft/openshift-tool
export PATH=/home/soft/openshift-tool:$PATH
oc login --username=admin --password=admin
echo "my_password" | oc login -u my_user
oc login -u developer -p developer
oc login {url}
check login
oc whoami
oc whoami -t
oc whoami -v=8
TOKEN=$(oc whoami -t)
ENDPOINT=$(oc status | head --lines=1 | awk '{print $6}')
NAMESPACE=$(oc status | head --lines=1 | awk '{print $3}')
echo $TOKEN
echo $ENDPOINT
echo $NAMESPACE
echo $NAME
curl -k -H "Authorization: Bearer $TOKEN" -H 'Accept: application/json' $ENDPOINT/api/v1/pods
curl -k -H "Authorization: Bearer $TOKEN" -H 'Accept: application/json' $ENDPOINT/api/v1/namespaces/$NAMESPACE/pods
# watch on changes
curl -k -H "Authorization: Bearer $TOKEN" -H 'Accept: application/json' $ENDPOINT/api/v1/watch/namespaces/$NAMESPACE/pods
~/.kube/config
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://localhost:6440
name: docker-for-desktop-cluster
- cluster:
insecure-skip-tls-verify: true
server: https://openshift-master-sim.myprovider.org:8443
name: openshift-master-sim-myprovider-org:8443
kubectl config use-context kubernetes-admin@docker-for-desktop-cluster
maprlogin password -user {mapruser}
# ticket-file will be created
check expiration date
maprlogin print -ticketfile /tmp/maprticket_1000 # or another filename
using file from previous command
cat /tmp/maprticket_1000
# create secret from file ( default name )
oc create secret generic {name of secret/token} --from-file=/tmp/maprticket_1000 -n {project name}
# create secret from file with specifying the name - CONTAINER_TICKET ( oc describe {name of secret} )
oc create secret generic {name of secret/token} --from-file=CONTAINER_TICKET=/tmp/maprticket_1000 -n {project name}
or from content of file from previous command
oc create secret generic {name of secret/token} --from-literal=CONTAINER_TICKET='dp.prod.ubs qEnHLE7UaW81NJaDehSH4HX+m9kcSg1UC5AzLO8HJTjhfJKrQWdHd82Aj0swwb3AsxLg==' -n {project name}
check creation
oc get secrets
secret mapping example, map secret
...
volumeMounts:
- name: mapr-ticket
mountPath: "/path/inside/container"
readOnly: true
...
volumes:
- name: mapr-ticket
secret:
secretName: my-ticket
kubectl cluster-info
oc describe {[object type:](https://docs.openshift.com/enterprise/3.0/cli_reference/basic_cli_operations.html#object-types)}
- buildconfigs
- services
- routes
- ...
oc get --watch events
oc status
oc get routes {app name / service name}
oc get all
oc get deployment,pod,service,route,dc,pvc,secret -l deployment_name=name-of-my-deployment
oc get route/name-of-route --output json
oc rollout latest "deploy-config-example"
oc get serviceaccounts
oc get rolebindings
oc project
oc policy add-role-to-user admin cherkavi
# oc policy remove-role-from-user admin cherkavi
oc get rolebindings
oc get projects
oc new-project {project name}
oc describe project {project name}
oc project
oc project {project name}
oc create -f {description file}
# oc replace -f {description file}
example of job
apiVersion: batch/v1
kind: Job
metadata:
name: scenario-description
spec:
nodeSelector:
composer: true
template:
spec:
containers:
- name: scenario-description
image: cc-artifactory.myserver.net/add-docker/scenario_description:0.23.3
command: ["python", "-c", "'import scenario_description'"]
env:
- name: MAPR_TICKETFILE_LOCATION
value: "/tmp/maprticket_202208"
# set environment variable from metadata
- name: PROJECT
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: Never
backoffLimit: 4
oc set resources dc/{app-name} --limits=cpu=400m,memory=512Mi --requests=cpu=200m,memory=256Mi
oc autoscale dc/{app-name} --min 1 --max 5 --cpu-percent=40
oc debug pods/{name of the pod}
oc get pods --field-selector=status.phase=Running
oc rsh {name of pod}
# example of executing program on pod: kafka-test-app
oc exec kafka-test-app "/usr/bin/java"
oc cp api-server-256-txa8n:/usr/src/cert/keystore_server /my/local/path
oc port-forward <pod-name> <ext-port>:<int-port>
oc new-app {/local/folder/to_source}
oc new-app https://github.com/openshift/ruby-ex.git
oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
new app with "specific" (centos/ruby-22-centos7) docker container from GIT with specific sub-folder and name
oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git --context-dir=sub-project --name myruby
oc import-image jenkins:v3.7 --from='registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.7' --confirm -n openshift
oc logs pod/{name of pod}
oc logs --follow bc/{name of app}
oc describe job {job name}
oc describe pod {pod name}
# list of config maps
oc get configmap
# describe one of the config map
oc describe configmap data-api-config
oc describe configmap gatekeeper-config
oc policy add-role-to-user view -n {name of application/namespace} -z default
oc config view
the same as
cat ~/.kube/config/config
oc describe routes
Requested Host:
oc delete {type} {type name}
- buildconfigs
- services
- routes
- ...
if your service looks like svc/web - 172.30.20.243:8080 instead of external link like: http://gateway-myproject.192.168.42.43.nip.io to pod port 8080 (svc/gateway), then you can "expose" it for external world:
- svn expose services/{app name}
- svn expose service/{app name}
- svn expose svc/{app name}
# set readiness/liveness
oc set probe dc/{app-name} --liveness --readiness --get-url=http://:8080/health
# remove readiness/liveness
oc set probe dc/{app-name} --remove --liveness --readiness --get-url=http://:8080/health
# oc set probe dc/{app-name} --remove --liveness --readiness --get-url=http://:8080/health --initial-delay-seconds=30
# Set a readiness probe to try to open a TCP socket on 3306
oc set probe rc/mysql --readiness --open-tcp=3306
minishift ip
minishift console
kubectl config get-contexts
kubectl config current-context
kubectl api-versions
--> Success Build scheduled, use 'oc logs -f bc/web' to track its progress. Application is not exposed. You can expose services to the outside world by executing one or more of the commands below: 'oc expose svc/web' Run 'oc status' to view your app.
apiVersion: batch/v1
kind: Job
metadata:
name: scenario-description
spec:
template:
spec:
containers:
- name: scenario-description
image: scenario_description:0.2.3
command: ["python", "-c", "'import scenario_description'"]
restartPolicy: Never
apiVersion: v1
kind: Pod
metadata:
name: connect-to-me
spec:
containers:
- name: just-a-example
image: busybox
command: ["sleep", "36000"]
volumeMounts:
- mountPath: /source
name: maprvolume-source
- mountPath: /destination
name: maprvolume-destination
volumes:
- name: maprvolume-source
persistentVolumeClaim:
claimName: pvc-scenario-input-prod
- name: maprvolume-destination
persistentVolumeClaim:
claimName: pvc-scenario-output-prod
restartPolicy: Never
backoffLimit: 4
For MapR cluster, be aware about MapR ticket-file ----<>Secret-----<>PV------<>PVC
creating secret
- login into mapr
echo $CLUSTER_PASSWORD | maprlogin password -user $CLUSTER_USER
- check secret for existence
oc get secrets -n $OPENSHIFT_NAMESPACE
- re-create secret
# delete secret
oc delete secret/volume-token-ground-truth
cat /tmp/maprticket_1000
# create secret from file
ticket_name="cluster-user--mapr-prd-ticket-1536064"
file_name=$ticket_name".txt"
project_name="tsa"
## copy file from cluster to local folder
scp -r [email protected]:/full/path/to/$file_name .
oc create secret generic $ticket_name --from-file=$file_name -n $OPENSHIFT_NAMESPACE
oc create secret generic volume-token-ground-truth --from-file=CONTAINER_TICKET=/tmp/maprticket_1000 -n $OPENSHIFT_NAMESPACE
oc create secret generic volume-token-ground-truth --from-literal=CONTAINER_TICKET='dp.prod.zurich qEnHLE7UaW81NJaDehSH4HX+m9kcSg1UC5AzLO8HJTjhfJKrQWdHd82Aj0swwb3AsxLg==' -n $OPENSHIFT_NAMESPACE
- check created ticket
maprlogin print -ticketfile /tmp/maprticket_1000
oc describe secret volume-token-ground-truth
using secret
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-scenario-extraction-input
namespace: scenario-extraction
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
claimRef:
namespace: scenario-extraction
name: pvc-scenario-extraction-input
flexVolume:
driver: "mapr.com/maprfs"
options:
platinum: "false"
cluster: "dp.prod.munich"
cldbHosts: "dpmesp000004.gedp.org dpmesp000007.gedp.org dpmesp000010.gedp.org dpmesp000009.gedp.org"
volumePath: "/tage/data/store/processed/ground-truth/"
securityType: "secure"
ticketSecretName: "volume-token-ground-truth"
ticketSecretNamespace: "scenario-extraction"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-scenario-extraction-input
namespace: scenario-extraction
spec:
accessModes:
- ReadWriteOnce
volumeName: pv-scenario-extraction-input
resources:
requests:
storage: 1G
apiVersion: apps/v1
kind: Deployment
metadata:
name: flask-pod
spec:
selector:
matchLabels:
run: my-flask
replicas: 1
template:
metadata:
labels:
run: my-flask
spec:
containers:
- name: flask-test
image: docker-registry.zur.local:5000/test-flask:0.0.1
command: ["sleep","3600"]
ports:
- containerPort: 5000
---
apiVersion: v1
kind: Service
metadata:
name: flask-service
labels:
run: my-flask
spec:
ports:
- port: 5000
protocol: TCP
selector:
run: my-flask
oc adm policy add-scc-to-user {name of policy} { name of project }
oc adm policy remove-scc-to-user {name of policy} { name of project }