27.4.
2016
Docker meetup
Slides: http://bit.ly/1SmqIyS
Developer. IBMer. Vi(m) lover. DevOps kid. Runner. Performing cloud infrastructure and application architecture with passion for the edge thing.
Reach me on Twitter as @epcim.
Pavel Čižinský, @LotharKAtt
Cloud Junior at tcp cloud. Proud debian user.
Filters
Strategies
Rescheduling
Placement decisions
Right now
Under Dev
##
# Default off
# evironment variable
docker run -d -e reschedule:on-node-failure redis
# or as label
docker run -d -l 'com.docker.swarm.reschedule-policy=["on-node-failure"]' redis
##
# Swarm manager logs:
ERRO[2173] Flagging engine as unhealthy. Connect failed 3 times id=VD3Q:XLOA:U2FH:TW7F:LMDM:YK3I:ZF4L:LRMM:X2GF:EI5X:NUEY:TQMP
name=swarm-node-1
INFO[2191] Rescheduled container d96a624fcf8b52488b5f057f7032b8666f1c338c7d62ac5a9f6c8aa3cb6d330c
from swarm-node-1 to swarm-node-2 as e85a6056d533d21c4334738a866d4349a1ce72abbf2917735f976ef469debd49
INFO[2191] Container d96a624fcf8b52488b5f057f7032b8666f1c338c7d62ac5a9f6c8aa3cb6d330c
was running, starting container e85a6056d533d21c4334738a866d4349a1ce72abbf2917735f976ef469debd49
# Start a Nginx node on the edge
$ docker run -d -p 80:80 \
--name edge_webserver \
-e constraint:zone==external \
-e constraint:disk==ssd \
-t nginx:latest
# Start an app container on the same Docker host
$ docker run -d -p 5000 \
--name app_1 \
-e affinity:container==edge_webserver \
-t app:latest
# Or with labels
$ docker run -itd --label 'com.docker.swarm.affinities=["container==c1"]' busybox
$ docker run -itd --label 'com.docker.swarm.constraints=["disk==ssd"]' busybox
# Some possible expressions:
constraint:node==node1 # matches node node1.
constraint:node!=node1 # matches all nodes, except node1.
constraint:region!=us* # matches all nodes outside with a region tag prefixed with us.
constraint:node==/node[12]/ # matches nodes node1 and node2.
constraint:node==/node\d/ # matches all nodes with node + 1 digit.
constraint:node!=/node-[01]/ # matches all nodes, except node-0 and node-1.
constraint:node!=/foo\[bar\]/ # matches all nodes, except foo[bar]. You can see the use of escape characters here.
constraint:node==/(?i)node1/ # matches node node1 case-insensitive. So NoDe1 or NODE1 also match.
affinity:image==~redis # tries to match for nodes running container with a redis image
constraint:region==~us* # searches for nodes in the cluster belonging to the us region
affinity:container!=~redis* # schedule a new redis5 container to a node without a container that satisfies redis*
$ docker-machine stop default
$ docker-machine create -d virtualbox manager
$ docker-machine create -d virtualbox agent1
$ docker-machine create -d virtualbox agent2
$ eval $(docker-machine env manager)
$ TOKEN=$(docker run --rm swarm create) # using discovery service on docker hub
$ docker run -d -p 3376:3376 -t -v /var/lib/boot2docker:/certs:ro swarm manage -H 0.0.0.0:3376 \
--tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server.pem --tlskey=/certs/server-key.pem \
token://$TOKEN
$ eval $(docker-machine env agent1)
$ docker run -d swarm join --addr=$(docker-machine ip agent1):2376 token://$TOKEN
$ eval $(docker-machine env agent2)
$ docker run -d swarm join --addr=$(docker-machine ip agent2):2376 token://$TOKEN
$ DOCKER_HOST=$(docker-machine ip manager):3376
$ docker info
$ docker ps
# get token from hosted service
export TOKEN=$(docker run swarm create)
# start first node with manager
docker-machine create \
--engine-opt cluster-store=token://$TOKEN \
-d virtualbox \
--swarm \
--swarm-master \
--swarm-discovery token://$TOKEN \
--swarm-strategy spread \
--swarm-opt heartbeat=5s \
--engine-label foo=bar \
--engine-label spam=eggs \
swarm-manager
# start other nodes of cluster
docker-machine create \
--engine-opt cluster-store=token://$TOKEN \
-d virtualbox \
--swarm \
--swarm-discovery token://$TOKEN \
--swarm-opt heartbeat=5s \
--engine-label foo=baz \
swarm-node-1
docker-machine create \
--engine-opt cluster-store=token://$TOKEN \
-d virtualbox \
--swarm \
--swarm-discovery token://$TOKEN \
--swarm-opt heartbeat=5s \
--engine-label foo=baz \
swarm-node-2
# use cluster
eval "$(docker-machine env -swarm swarm-manager)"
docker info
docker ps
# get local kv store
docker run -d -p 8500:8500 --name=consul --restart=always progrium/consul -server -bootstrap
CONSUL=$(hostname -I | awk '{print $1}'):8500
MNGRS=(manager-1 manager-2)
NODES=(front-node-1 app-node-2 data-node-3)
# start manager nodes
for n in ${MNGRS[@]}; do
docker-machine create \
--engine-opt cluster-store=consul://$CONSUL \
--engine-opt cluster-advertise=eth1:2376 \ # optional
-d virtualbox \
--swarm \
--swarm-master \
--swarm-discovery consul://$CONSUL \
--swarm-strategy spread \ # optional
--swarm-opt heartbeat=5s \ # optional, customize
--swarm-opt replication \
--swarm-opt advertise=eth1:3376 \
--engine-label foo=bar \ # customize
--engine-label host=${n//-*/} \ # eg: manager, front, app, data
$n
done
# start workload nodes of cluster
for n in ${NODES[@]}; do
docker-machine create \
--engine-opt cluster-store=consul://$CONSUL \
-d virtualbox \
--swarm \
--swarm-discovery consul://$CONSUL \
--swarm-opt heartbeat=5s \ # optional, customize
--engine-label foo=baz \ # customize
--engine-label spam=eggs \ # customize
--engine-label host=${n//-*/} \ # eg: manager, front, app, data
# add some driver options for CPU, RAM, STORAGE: https://docs.docker.com/machine/drivers/
$n
# add registrator at each node
eval "$(docker-machine env ${n})"
docker run -d --name=registrator -h $(docker-machine ip ${n}) \
-v=/var/run/docker.sock:/tmp/docker.sock \
gliderlabs/registrator:latest \
consul://$CONSUL
done
# use cluster
eval "$(docker-machine env -swarm manager-1)"
docker info
docker network create --subnet 10.10.10.0/24 swarm
docker network ls
docker ps
Finally right !
user@my-machine $ docker info
Containers: 0
Images: 25
Storage Driver:
Role: Primary #<--------- manager-1 is the Primary manager
Primary: 192.168.42.200
Strategy: spread
Filters: affinity, health, constraint, port, dependency
Nodes: 3
swarm-agent-0: 192.168.42.100:2375
└ Containers: 0
└ Reserved CPUs: 0 / 1
└ Reserved Memory: 0 B / 2.053 GiB
└ Labels: executiondriver=native-0.2, kernelversion=3.13.0-49-generic, operatingsystem=Ubuntu 14.04.2 LTS, storagedriver=aufs
swarm-agent-1: 192.168.42.101:2375
└ Containers: 0
└ Reserved CPUs: 0 / 1
└ Reserved Memory: 0 B / 2.053 GiB
└ Labels: executiondriver=native-0.2, kernelversion=3.13.0-49-generic, operatingsystem=Ubuntu 14.04.2 LTS, storagedriver=aufs
swarm-agent-2: 192.168.42.102:2375
└ Containers: 0
└ Reserved CPUs: 0 / 1
└ Reserved Memory: 0 B / 2.053 GiB
└ Labels: executiondriver=native-0.2, kernelversion=3
# netwok create
docker network create -d overlay dev-www
# docker network ls
NETWORK ID NAME DRIVER
a4d45b64d6b0 app-node-2/bridge bridge
4231de0c7baa app-node-2/host host
13765007b25f app-node-2/none null
ad4371891dee data-node-3/bridge bridge
95ffa1b57328 data-node-3/host host
354cc6d91fac data-node-3/none null
b3abccb078ca front-node-1/bridge bridge
a0b039bdfa2d front-node-1/host host
fe0509247bd8 front-node-1/none null
d6abd3ae9810 manager-1/bridge bridge
3fb5ef9bbb18 manager-1/host host
78c6834b6e71 manager-1/none null
2a2ac374f2ca manager-2/bridge bridge
44d3a3b76786 manager-2/host host
0e91607fdf99 manager-2/none null
9eb82f97c6fa dev-www overlay
# docker run --net test1 -m 64m -d -p 8082:80 tutum/wordpress
bd1a41d7c1bb030f4f77abb6edd318542f2cba14deb4cc03c5ceadb74291bf7e
# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
bd1a41d7c1bb tutum/wordpress "/run.sh" 2 minutes ago Up 2 minutes 3306/tcp, 192.168.99.116:8082->80/tcp front-node-1/goofy_williams
9e2c46278eec redis "docker-entrypoint.sh" 12 minutes ago Up 12 minutes 6379/tcp manager-2/testredis
11f3846446e9 busybox "sh" 22 minutes ago Up 22 minutes manager-1/ecstatic_brahmagupta
c259f74e1660 gliderlabs/registrator:latest "/bin/registrator con" 24 minutes ago Up 24 minutes data-node-3/registrator
44c16500ee75 gliderlabs/registrator:latest "/bin/registrator con" 26 minutes ago Up 26 minutes app-node-2/registrator
84110f05cc19 gliderlabs/registrator:latest "/bin/registrator con" 27 minutes ago Up 27 minutes front-node-1/registrator
# registrator add workdpress to consul, log entry
# 2016/04/27 14:26:37 added: 0281638846de 192.168.99.117:adoring_saha:80
# docker network ls
NETWORK ID NAME DRIVER
...
b3abccb078ca front-node-1/bridge bridge
fc338f67bcc6 front-node-1/docker_gwbridge bridge
a0b039bdfa2d front-node-1/host host
fe0509247bd8 front-node-1/none null
...
9eb82f97c6fa dev-www overlay
turns a pool of Docker hosts into a single, virtual Docker host
Available as hosted service:
##
# Default off
# evironment variable
docker run -d -e reschedule:on-node-failure redis
# or as label
docker run -d -l 'com.docker.swarm.reschedule-policy=["on-node-failure"]' redis
##
# Swarm manager logs:
ERRO[2173] Flagging engine as unhealthy. Connect failed 3 times id=VD3Q:XLOA:U2FH:TW7F:LMDM:YK3I:ZF4L:LRMM:X2GF:EI5X:NUEY:TQMP
name=swarm-node-1
INFO[2191] Rescheduled container d96a624fcf8b52488b5f057f7032b8666f1c338c7d62ac5a9f6c8aa3cb6d330c
from swarm-node-1 to swarm-node-2 as e85a6056d533d21c4334738a866d4349a1ce72abbf2917735f976ef469debd49
INFO[2191] Container d96a624fcf8b52488b5f057f7032b8666f1c338c7d62ac5a9f6c8aa3cb6d330c
was running, starting container e85a6056d533d21c4334738a866d4349a1ce72abbf2917735f976ef469debd49
ERRO[2504] Update engine specs failed: Cannot connect to the Docker daemon.
Is the docker daemon running on this host? id=CRIG:LKCV:DK5O:JWHO:AHYW:LCVF:OGZW:KJT4:FSEB:FWXL:77JR:EQYB
name=swarm-node-2
INFO[2607] Removed Engine swarm-node-2
ERRO[2607] Failed to reschedule container e85a6056d533d21c4334738a866d4349a1ce72abbf2917735f976ef469debd49:
Conflict: The name /serene_archimedes is already assigned.
You have to delete (or rename) that container to be able to
assign /serene_archimedes to a container again.
Docker Swarm exceeds Kubernetes scale, http://bit.ly/1WoxIN0 , http://bit.ly/1WoxPrS
The study and article, commissioned by Docker, tested the performance of both platforms while running 30,000 containers across 1,000 node clusters
Slides: http://bit.ly/1SmqIyS
BACKUP SLIDES
# external KV store
docker run -d -p 8500:8500 --name=consul progrium/consul -server -bootstrap
consul=$(hostname -I | awk '{print $1}'):8500
SWARM_NODE=(manager-1 manager-2 frontend-node-1 app-node-2 db-node-3)
for i ${SWARM_NODE[@]}; do
docker-machine create \
--engine-opt cluster-store=consul://$consul \
--engine-opt cluster-advertise=eth1:2376 \
--engine-label spam=eggs --engine-label hosttype=${i} \
-d virtualbox ${i}
done
for i ${SWARM_NODE[@]}; do
eval $(docker-machine env ${i})
[[ "${i::7}" == "manager" ]] && { # manager/master
docker network create -d overlay --subnet 10.10.10.0/24 swarm
# with dedicated KV just for swarm
docker run -d --net swarm --name=consul progrium/consul -server -join $consul
docker run -d --net swarm --name=manager -p 3376:3376 -t -v /var/lib/boot2docker:/certs:ro \
swarm manage -H 0.0.0.0:3376 \
--tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server.pem --tlskey=/certs/server-key.pem \
--replication --advertise=eth1:3376 \
consul://consul:8500
} || {
docker run --net=swarm -d swarm join --addr=$(docker-machine ip ${i}):2376 consul://consul:8500
}
done
export DOCKER_HOST=$(docker-machine ip manager):3376
docker info
docker ps
# or just use external kv: "consul://$consul"