Skip to content

CKAD Commands

Non-Admin Commands

.
β”œβ”€ docs/
β”‚  └─ blog/
β”‚     β”œβ”€ posts/
β”‚     └─ index.md
└─ mkdocs.yml

How k apply command works

It compares the

  • local file with last applied config
  • local file with kubernetes live object

Where is the last applied config stored?

It is stored in the with kubernetes live object in an annotation as shown below

Commands for CKA/CKAD exam

-o yaml: This will output the resource definition in YAML format on screen.

--dry-run: By default as soon as the command is run, the resource will be created. If you simply want to test your command , use the --dry-run=client option. This will not create the resource, instead, tell you whether the resource can be created and if your command is right.

# replace the file with temp created yaml file for forbidden updates
k replace --force -f <file-name.yaml>

# create an nginx pod
k run nginx --image =nginx

# Generate a pod manifest file using the dry-run which does not create a resource. Also save the file to local
k run nginx --image =nginx --dry-run=client -o yaml > pod.yaml

# Generate a deployment manifest file using the dry-run which does not create a resource
k create deployment --image =nginx dep_name --dry-run=client -o yaml > dep.yaml

# Generate a deployment manifest file with 4 replicas using the dry-run which does not create a resource
k create deployment --image =nginx dep_name --replicas=4 --dry-run=client -o yaml > dep

# Scale deployment
kubectl scale deployment nginx --replicas=4

# Create a Service named redis-service of type ClusterIP to expose pod redis on port 6379
kubectl expose pod redis --port=6379 --name redis-service --dry-run=client -o yaml

#Create a Service named nginx of type NodePort to expose pod nginx's port 80 on port 30080 on the nodes
# This will automatically use the pod's labels as selectors, but you cannot specify the node port.
kubectl expose pod nginx --type=NodePort --port=80 --name=nginx-service --dry-run=client -o yaml

# create a pod with labels
k run redis  --image=redis:alpine --labels=tier=db

# select the resources with node name label
k get po/deploy/ep -l kubernetes.io/hostname=<node-name>

Namespace

# create a namescpace 
kubectl create -f custom-namespace.yaml

# using declarative command
kubectl create namespace custom-namespace

# create a resource in namespace
kubectl create -f kubia-manual.yaml -n custom-namespace

# delete namespace
kubectl delete ns custom-namespace

Get the detailed request logs

k get po -v=6

Pod

# create a pod using dry-run
kubectl run busybox --image=busybox --restart=Never --dry-run -o yaml > testPod.yaml

# create a pod with come commands using dry-run
kubectl run busyboxWithCommands --image=busybox --restart=Never --dry-run -o yaml -- bin/sh -c "sleep 3600; ls" > testPod.yaml

# settting image
kubectl set image pod podname nginx=nginx:1.15-alpine

# it's better to edit pod sometime and make quick changes
kubectl edit pod podName

# Create the pod named amardev with version 1.17.4 and expose it on port 80
kubectl run amardev --image=nginx:1.17.4 --restart=Never --port=80

# add the command in the pod by editing it
command: ['/bin/bash', '-c', 'sleep 5000']

# adding args in the pod
args: ['--color', 'green']
# get logs for nginx pod
kubectl logs nginx

# get previous logs of the pod
kubectl logs nginx -p
# just open the terminal for the pod with one container
kubectl exec -it nginx -- /bin/sh

# echo hello world in the container
kubectl exec -it nginx -c containerName -- /bin/sh -c 'echo hello world'

# ssh to multi-container pod
kubectl exec -it  multi-cont-pod -c main-container -- sh cat /var/log/main.txt
# get all pods
kubectl get pods

# get info for particular pod
kubectl get po kubia-liveness

# show labels while showing pods
kubectl get pods --show-labels

# select pods with multiple labels
kubectl get all --selector env=prod,bu=finance,tier=frontend

# Warn: delete all pods
kubectl delete po --all 

# Warning: delete pods in all namespaces
kubectl delete all --all

# delete pod
kubectl delete po podName

# Important: delete pods using labels
kubectl delete po -l creation_method=manual

# get the metircs about the nodes
kubectl top node/pod
# See the pod logs
kubectl logs podName | less

# Tail the pod logs using `-f`
kubectl logs -f podName

Labels and annotations

# show labels
kubectl get pods --show-labels

# apply label
kubectl run nginx-dev1 --image=nginx --restart=Never --labels=env=dev

#Get the pods with label env=dev
kebectl get pods -l env=dev --show-labels

# show labels which env in dev and prod
kubectl get pods -l 'env in (dev,prod)'

# update the label with overwrite
kubectl label po podone area=monteal --overwrite

# remove label named env
kubectl label pods podone env-

# show the labels for the nodes
kubectl get nodes --show-labels

# Annotate the pods with name=webapp
kubectl annnotate po podone name=webapp

Configmap

# create a configmap
kubectl create configmap <cm-name> --from-literal=special.how=very --from-literal=special.type=charm

# Example
k create cm webapp-config-map --from-literal APP_COLOR=darkblue --from-literal APP_OTHER=disregard

Env variables

# set an env variable while creating a pod
kubectl run nginx --image=nginx --restart=Never --env=var1=val1

# get all of the env variables for a pod
kubectl exec -it nginx -- env

Logs

# check logs for a container 1 and 2 in the pod busybox
kubectl logs busybox -c busybox1
kubectl logs busybox -c busybox2

# Check the previous logs of the second container busybox2 if any
kubectl logs busybox -c busybox2 --previous

# Run command ls in the third container busybox3 of the above pod
kubectl exec busybox -c busybox3 -- ls

# Show metrics of the above pod containers and puts them into the file.log and verify
kubectl top pod busybox --containers > file.log

Deployment

# create a deployment with a name and replicas of 3
kubectl create deployment webapp --image=nginx --replicas=3

# scale deployment to have 20 replicas
kubectl scale deploy webapp --replicas=20

# get the rollout status
kubectl rollout status deploy webapp

# get the rollout history
kubectl rollout history deploy webapp

# delete the deployment and watch it getting deleted
kubectl delete deployment webapp --watch

# update the image in the deployment. Note that here nginx in the container name
# set image will cause the rollut to happen, so you can check the status using the `kubectl rollout status` command
kubectl set image deployment depName nginx=nginx:1.17.4

# undo the deployment to revision 1
kubectl rollout undo deployment webapp --to-revision=1

#  Check the history of the specific revision of that deployment
kubectl rollout history deployment webapp --revision=3

# pause the rollout
kubectl rollout pause deployment webapp

# resume the rollout
kubectl rollout resume deployment webapp

# scale a deployment
kubectl autoscale deployment webapp --min=5 --max=10 --cpu-percent=80

Jobs

# jobs will create a pod and then finish when work is done
# then we can see the output of that job using the logs command for the pod created
kubectl create job jobName --image=nginx -- node -v

# create a job which will save the TIME information
kubectl create job jonname --image=nginx -- /bin/sh -c "date; echo 'time container'"

# delete all the jobs
kubectl delete jobs --all

Cron Jobs

# get all the cronjobs
kubectl get cj

# create a cronjob which will show the time everyminute
kubectl create cronjob timecj  --image=nginx --schedule="*/1 * * * *" -- /bin/sh -c "date"

Secret

# create a secret and add some values
kubectl create secret generic db-secret --from-literal=DB_Host=sql01

Base64 encode/decode

echo -n 'Lets learn K8S' | base64
echo -n 'TGV0cyBsZWFybiBLOFM=' | base64 -- decode

Data secret

In this secret, the values are base64 encoded as shown below

apiVersion: v1
kind: Secret
metadata:
  name: mysecret
type: Opaque
data:
  username: YWRtaW4=
  password: MWYyZDFlMmU2N2Rm

Data + stringData secret

apiVersion: v1
kind: Secret
metadata:
  name: mysecret
type: Opaque
data:
  username: YWRtaW4=
stringData:
  name: Amarjit

See Secret contents

kubectl get secret sampleSecret -o jsonpath='{.data}'
kubectl get secret sampleSecret -o jsonpath='{.data}' | base64 --decode

We can see the contents of secret that was created using

HPA

# horizontal pod autoscaling
kubectl get hpa

Taint and Toleration

# creating taints
kubectl taint node nodename key=value:NoSchedule

###Example
k taint node node01 spray=mortein:NoSchedule

# see the taint using describe
kubectl describe deployment depname | grep -i taint

# remove the Taints in the node using, add - at end to remove the taint
kubectl taint node nodename node-role.kubernetes.io/master:NoSchedule-

# Add toleration to pod

Role

Create a role
kubectl create role developer --namespace=default --verb=list,create,delete --resource=pods
Create a role binding
kubectl create rolebinding dev-user-binding --namespace=default --role=developer --user=dev-user

Security Context

# to add the user id we can defice security context in the container and pod level using
securityContext:
  runAsUser: 1000
  runAsOwner: 1010=

# capabilities are added at the container level not at the pod level
containers:
    securityContext:
      capabilities:
        add: ['SYS_TIME']
      runAsUser: 1000
      runAsOwner: 1010

Session Affinity

# use the below affinity in the pod spec
affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: labelKey
            operator: In
            values:
            - labelValue

Notes

  1. NodeSelector is used in the pod if we want it to get allocated to a particular node
  2. A Job creates one or more Pods and ensures that a specified number of them successfully terminate. As pods successfully complete, the Job tracks the successful completions. When a specified number of successful completions is reached, the task (ie, Job) is complete. Deleting a Job will clean up the Pods it created.
  3. A hostPath volume mounts a file or directory from the host node’s filesystem into your Pod.
  4. pod talks to API-server using the service account
  5. Use nodeaffinity to place the pods in the right nodes. Use the label to select the node. First apply a label on the node and then use that label on the node affinity
  6. Startup probe, the application will have a maximum of 5 minutes (30 * 10 = 300s) to finish its startup. Once the startup probe has succeeded once, the liveness probe takes over to provide a fast response to container deadlocks. If the startup probe never succeeds, the container is killed after 300s and subject to the pod's restartPolicy

See special chars in VIM

use :set list to show the special chars and :set nolist to go back to normal

Admin Commands

Get current context

# show all conexts, this is detailed
k config get-contexts

# just the context name
k config current-context

See the access level

Check access level for you

# Check if you can create pod
k auth can-i create pod 

# Can I delete node?
k auth can-i delete node

# check all you can access
 k auth can-i --list=true

Check access for someone else

Only Admin can check this
# Can dev user delete the node
k auth can-i delete node --as dev-user

See list of resources

Namespaced Resorces
k api-resources --namespaced=true
Non-Namespaced Resorces
k api-resources --namespaced=false

See resources without headers

# get the pods
k get po --no-headers

# count pods
k get pods --no-headers | wc -l

ps aux  # see process names

cat /etc/hosts # local DNS

ip link

# example to see the MAC address, here the state is shown as up
controlplane ~ βœ– ip link show cni0
3: cni0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP mode DEFAULT group default qlen 1000
    link/ether d6:8c:12:d8:fe:88 brd ff:ff:ff:ff:ff:ff
ip addr

ip addr add 192.168.1.2/24 dev eth0

ip route

ip route add 192.168.1.2/24 via 192.168.2.1

arp


route
See the default gateway and all gateways
controlplane ~ ➜  ip route
default via 172.25.0.1 dev eth1 
10.244.0.0/24 dev cni0 proto kernel scope link src 10.244.0.1 
10.244.1.0/24 via 10.244.1.0 dev flannel.1 onlink 
172.25.0.0/24 dev eth1 proto kernel scope link src 172.25.0.70 
192.25.128.0/24 dev eth0 proto kernel scope link src 192.25.128.12 

## See Default route
ip route show default

Check all the ports the services are listening on using netstat -nltp

Example
controlplane ~ ➜  netstat -nplt
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:10257         0.0.0.0:*               LISTEN      3563/kube-controlle 
tcp        0      0 127.0.0.1:10259         0.0.0.0:*               LISTEN      3496/kube-scheduler 
tcp        0      0 127.0.0.1:35253         0.0.0.0:*               LISTEN      1074/containerd     
tcp        0      0 127.0.0.53:53           0.0.0.0:*               LISTEN      640/systemd-resolve 
tcp        0      0 127.0.0.11:34293        0.0.0.0:*               LISTEN      -                   
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1079/sshd: /usr/sbi 
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      4526/kubelet        
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      4988/kube-proxy     
tcp        0      0 192.25.128.12:2379      0.0.0.0:*               LISTEN      3550/etcd           
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      3550/etcd           
tcp        0      0 192.25.128.12:2380      0.0.0.0:*               LISTEN      3550/etcd           
tcp        0      0 127.0.0.1:2381          0.0.0.0:*               LISTEN      3550/etcd           
tcp        0      0 0.0.0.0:8080            0.0.0.0:*               LISTEN      1066/ttyd           
tcp6       0      0 :::22                   :::*                    LISTEN      1079/sshd: /usr/sbi 
tcp6       0      0 :::8888                 :::*                    LISTEN      4558/kubectl        
tcp6       0      0 :::10250                :::*                    LISTEN      4526/kubelet        
tcp6       0      0 :::6443                 :::*                    LISTEN      3551/kube-apiserver 
tcp6       0      0 :::10256                :::*                    LISTEN      4988/kube-proxy     

Check service status

# ssh to worker name
ssh workerNodeName

# get the services
systemctl list-units --type=service --state=active

# check logs of kubelet service
journalctl -u kubelet

# static files link 
 /etc/kubernetes/*

Was this page helpful?
-->