Distributed systems are systems which are connected by a network, and use some form of message passing to communicate and compute correctly.
What are some problems that can occur?
$X
of them running at a time127.0.0.1
10.0.1.10
foo.example.org
What we need is something that automatically finds, and configures the services..
DHCP is a form of specialized service discovery. Why?
etcd, zookeeper, consul
Protocols:
$ ssh core@140.211.168.XXX
$ systemctl start etcd2
$ etcdctl set /message Hello
Hello
$ curl -L -X PUT http://127.0.0.1:2379/v2/keys/message -d value="Hello"
{"action":"set","node":{"key":"/message","value":"Hello","modifiedIndex":5,"createdIndex":5},"prevNode":{"key":"/message","value":"Hello","modifiedIndex":4,"createdIndex":4}}
$ etcdctl get /message
Hello
$ curl -L http://127.0.0.1:2379/v2/keys/message
{"action":"get","node":{"key":"/message","value":"Hello","modifiedIndex":5,"createdIndex":5}}
$ etcdctl rm /message
PrevNode.Value: Hello
$ curl -w "\n" 'https://discovery.etcd.io/new?size=3'
https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
Create a cloud-config.yml
file with the contents of the discovery URL.
#cloud-config
coreos:
etcd2:
# generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
# specify the initial size of your cluster with ?size=X
discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
# multi-region and multi-cloud deployments need to use $public_ipv4
advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
initial-advertise-peer-urls: http://$private_ipv4:2380
# listen on both the official ports and the legacy ports
# legacy ports can be omitted if your application doesn't depend on them
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
listen-peer-urls: http://$private_ipv4:2380
units:
- name: etcd2.service
command: start
- name: fleet.service
command: start
$ nova boot --image "CoreOS" --flavor cs312 --key-name ramereth \
--user-data ./cloud-config.yml --security-groups all etcd1
# Wait for first node to start the etcd cluster and now watch the
# logs as we spin up new instances
$ journalctl -u etcd2 -f
$ nova boot --image "CoreOS" --flavor cs312 --key-name ramereth \
--user-data ./cloud-config.yml --security-groups all etcd2
$ nova boot --image "CoreOS" --flavor cs312 --key-name ramereth \
--user-data ./cloud-config.yml --security-groups all etcd3
$ ssh core@140.211.168.XXX
# Start etcd2 and fleet
$ systemctl start etcd2 fleet
# List machines seen in fleet
$ fleetctl list-machines
MACHINE IP METADATA
0de27f87... 192.168.68.2 -
# Create echo container from Monday
cat <<EOF > Dockerfile
FROM centos
MAINTAINER cs312@osuosl.org # Change your email here
ADD http://ilab.cs.byu.edu/python/code/echoserver-simple.py /echoserver-simple.py
EXPOSE 50000
CMD ["python", "/echoserver-simple.py"]
EOF
$ docker build -t cs312/echo .
# Create systemd unit from Monday
cat <<EOF > echo.service
[Unit]
Description=echo service
BindsTo=echo.service
[Service]
ExecStartPre=-/usr/bin/docker kill echo
ExecStartPre=-/usr/bin/docker rm echo
ExecStart=/usr/bin/docker run --name echo -p 50000:50000 cs312/echo
ExecStop=/usr/bin/docker stop echo
EOF
$ fleetctl submit echo
Unit echo.service inactive
$ fleetctl load echo
Unit echo.service loaded on 0de27f87.../192.168.68.2
$ fleetctl list-units
UNIT MACHINE ACTIVE SUB
echo.service 0de27f87.../192.168.68.2 inactive dead
$ fleetctl start echo
Unit echo.service launched on 0de27f87.../192.168.68.2
$ fleetctl list-units
UNIT MACHINE ACTIVE SUB
echo.service 0de27f87.../192.168.68.2 active running
$ ncat localhost 50000
foo
foo
Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts-- http://kubernetes.io/
# bind local port 8001 to localhost:8001 on remote server, we will use this
# later
$ ssh -L8001:localhost:8001 core@140.211.168.XXX
# Run etcd
$ docker run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd \
--addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data
# Run master k8
$ docker run \
--volume=/:/rootfs:ro \
--volume=/sys:/sys:ro \
--volume=/dev:/dev \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
--volume=/var/run:/var/run:rw \
--net=host \
--pid=host \
--privileged=true \
-d \
gcr.io/google_containers/hyperkube:v1.1.3 \
/hyperkube kubelet --containerized --hostname-override="127.0.0.1" \
--address="0.0.0.0" --api-servers=http://localhost:8080 \
--config=/etc/kubernetes/manifests
# Run service proxy
$ docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.1.3 \
/hyperkube proxy --master=http://127.0.0.1:8080 --v=2
# Download kubectl binary and put it somewhere in our $PATH
$ mkdir -p /opt/bin
$ wget -O /opt/bin/kubectl https://goo.gl/vkEeer
$ chmod +x /opt/bin/kubectl
# Ensure we can see the local node running
$ kubectl get nodes
NAME LABELS STATUS AGE
127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready 30s
# Download K8 repo tarball
$ wget https://github.com/kubernetes/kubernetes/archive/release-1.1.zip
$ unzip release-1.1.zip && cd kubernetes-release-1.1
# Run demo frontend and access via http://localhost:8001/static
$ kubectl proxy --www=docs/user-guide/update-demo/local/ &
# Show pods
$ kubectl get pods
# Run the replication controller
$ kubectl create -f docs/user-guide/update-demo/nautilus-rc.yaml
# Try scaling the replication controller
$ kubectl scale rc update-demo-nautilus --replicas=4
# Update the docker image
$ kubectl rolling-update update-demo-nautilus --update-period=5s \
-f docs/user-guide/update-demo/kitten-rc.yaml
# Bring down the pods
$ kubectl delete rc update-demo-kitten