James Sherry PRO
Web Development Tutor and Co-Founder of { The Jump } Digital School
A Practical guide
#docker-compose.yml
version: "3.0"
services:
registry:
container_name: docker-registry
restart: always
image: registry:2
ports:
- 6000:5000
volumes:
- docker-registry-data:/var/lib/registry
volumes:
docker-registry-data: {}
docker build -t localhost:6000/basic-node-server:0.0.1 .
docker push localhost:6000/basic-node-server:0.0.1
https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
sudo kubectl create secret generic regcred --from-file=.dockerconfigjson=/Users/jamessherry/.docker/config.json --type=kubernetes.io/dockerconfigjson
imagePullSecrets:
- name: regcred
alias k='kubectl'
kubectl version
kubectl cluster-info
kubectl get all
kubectl run [container-name]
--image=[image-name]
kubectl port-forward [pod] [ports]
kubectl expose ...
kubectl create [resource]
kubectl apply [resource]
You can monitor your cluster using the command line
BUT you can use a GUI
You need to create it as a 'service' (see later)
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.6.1/aio/deploy/recommended.yaml
# backup
kubectl apply -f https://kube-dashboard-recommended.s3.eu-west-2.amazonaws.com/recommended.yml
To start it:
kubectl proxy
To create it:
Follow: https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
Get token
kubectl create token admin-user -n kube-system
Copy massive token and put it into dashboard
One master node controls several worker nodes
secrets ->
(Likely configured by your cluster admin)
apiVersion: v1
kind: ConfigMap
metadata:
name: db-config
labels:
app: db-config
data:
host: localhost
port: 27017
apiVersion: apps/v1
spec:
...
template:
spec:
....
containers:...
env:
- NAME: DBPORT
valueFrom:
configMapKeyRef:
key: db-config
name: port
apiVersion: apps/v1
spec:
...
template:
spec:
....
containers:...
env:
envFrom:
configMapKeyRef:
name: db-config
apiVersion: apps/v1
spec:
...
template:
spec:
....
volumes:
- name: db-config-vol
configMap:
name: db-config
containers:
....
volumeMounts:
- name: db-config-vol
mountPath: /etc/config/
apiVersion: apps/v1
spec:
...
template:
spec:
....
containers:...
env:
- NAME: DBPASSWORD
valueFrom:
secretKeyRef:
key: db-config
name: db-pass
apiVersion: apps/v1
spec:
...
template:
spec:
....
volumes:
- name: secrets
secret-name: db-password
containers:...
volumeMounts:
- name: secrets
mountPath: /etc/db-password
readOnly: true
Create deployment
kubectl create deployment nginx --image=nginx
Expose Service
kubectl port-forward [podname] 8080:80
Delete deployment (service, etc)
kubectl delete <entity type> <entity name>
e.g. kubectl delete deployment nginx
If you remember at the beginning we were saying that manually doing things takes time
TRUE, K8 can let us batch those task and make them easier, but at scale it's still the same thing.
Is there a way we can write down a description of everything we want to happen in advance and then just launch K8, passing it that description?
Also, what format should that take?
The answer to all of that is YES, and YAML
YAML can be used to describe and configure/launch ANY K8 structure!
---
apiVersion: v1
kind: Pod
metadata:
name: basic-server
namespace: demo
labels:
app: web
spec:
containers:
- name: front-end
image: basic-node-server:0.0.1
ports:
- containerPort: 80
imagePullSecrets:
- name: regcred
---
---
apiVersion: v1
kind: Pod
metadata:
name: basic-server
namespace: demo
labels:
app: web
spec:
containers:
- name: front-end
image: localhost:6000/basic-node-server:0.0.1
readinessProbe:
httpGet:
path: /index.html
port: 80
initialDelaySeconds: 5
periodSeconds: 30
livenessProbe:
httpGet:
path: /index.html
port: 80
initialDelaySeconds: 5
timeoutSeconds: 2
periodSeconds: 30
failureThreshold: 1
ports:
- containerPort: 80
imagePullSecrets:
- name: regcred
---
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: basic-server
namespace: demo
labels:
app: web
spec:
# modify replicas according to your case
replicas: 3
selector:
matchLabels:
tier: frontend
template:
metadata:
labels:
tier: frontend
spec:
containers:
- name: front-end
image: localhost:6000/basic-node-server:0.0.1
ports:
- containerPort: 80
imagePullSecrets:
- name: regcred
apiVersion: apps/v1
kind: Deployment
metadata:
name: basic-server
namespace: demo
labels:
app: web
spec:
replicas: 5
strategy:
type: Recreate # choose a strategy
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- name: front-end
image: localhost:6000/basic-node-server:0.0.1
ports:
- containerPort: 80
imagePullSecrets:
- name: regcred
apiVersion: v1
kind: Service
metadata:
name: basic-server-service
namespace: demo
spec:
type: NodePort
selector:
app: web
ports:
# By default and for convenience, the `targetPort` is set to the same value as the `port` field.
- port: 80
targetPort: 80
# Optional field
# By default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767)
nodePort: 30007
gcloud components install gke-gcloud-auth-plugin # allow logins from CLI
gcloud auth login # Log in via browser
gcloud projects list # look for your project
gcloud config set project kube-demo-310004 #set it as current project
#do this to set a 'context' in docker desktop
# autopilot-cluster-1 is the name of the cluster, europe-west1 the zone it's in
gcloud container clusters get-credentials autopilot-cluster-1 --zone europe-west1
By James Sherry
Container Orchestration