[~/Documents/Virtual Machines.localized]$ find ./ -name "coreos_*.vmx" -exec cat {} \; | grep "displayName\|generatedAddress" displayName = "coreos0" ethernet0.generatedAddress = "00:0c:29:97:dd:62" ethernet0.generatedAddressOffset = "0" displayName = "coreos1" ethernet0.generatedAddress = "00:0c:29:16:dd:7e" ethernet0.generatedAddressOffset = "0" displayName = "coreos2" ethernet0.generatedAddress = "00:0c:29:bd:08:bb" ethernet0.generatedAddressOffset = "0" displayName = "coreos3" ethernet0.generatedAddress = "00:0c:29:03:d1:3f" ethernet0.generatedAddressOffset = "0"
[/Library/Preferences/VMware Fusion/vmnet8]$ tail -35 dhcpd.conf host vmnet8 { hardware ethernet 00:50:56:C0:00:08; fixed-address 192.168.38.1; option domain-name-servers 0.0.0.0; option domain-name ""; option routers 0.0.0.0; } ####### VMNET DHCP Configuration. End of "DO NOT MODIFY SECTION" ####### host ubuntu { hardware ethernet 00:0c:29:09:32:ef; fixed-address 192.168.38.10; } host coreos0 { hardware ethernet 00:0C:29:97:DD:62; fixed-address 192.168.38.100; } host coreos1 { hardware ethernet 00:0C:29:16:DD:7E; fixed-address 192.168.38.101; } host coreos2 { hardware ethernet 00:0C:29:BD:08:BB; fixed-address 192.168.38.102; } host coreos3 { hardware ethernet 00:0C:29:03:D1:3F; fixed-address 192.168.38.103; }
Host coreos0 HostName 192.168.38.100 User core IdentityFile ~/.ssh/insecure_ssh_key Host coreos1 HostName 192.168.38.101 User core IdentityFile ~/.ssh/insecure_ssh_key Host coreos2 HostName 192.168.38.102 User core IdentityFile ~/.ssh/insecure_ssh_key Host coreos3 HostName 192.168.38.103 User core IdentityFile ~/.ssh/insecure_ssh_key
[~]$ ssh-agent bash [~]$ ssh-add ~/.ssh/insecure_ssh_key Identity added: /Users/atsushi/.ssh/insecure_ssh_key (/Users/atsushi/.ssh/insecure_ssh_key) [~]$ ssh -A coreos1 CoreOS alpha (647.0.0) core@coreos1 ~ $ ssh -A 192.168.38.102 CoreOS alpha (647.0.0) core@coreos2 ~ $ ssh -A 192.168.38.103 CoreOS alpha (647.0.0) core@coreos3 ~ $
option | summary | default value |
-peer-addr=$addr:$port | etcd間通信に利用するアドレスとポート | 127.0.0.1:7001 |
-addr=$addr:$port | etcdとクライアント間の通信に利用するアドレスとポート | 127.0.0.1:4001 |
-data-dir=$dir | データ格納ディレクトリ | カレントディレクトリ |
-name=$name | ノード名 | UUID |
-discovery=$url | クラスタ内のノードの情報を格納するKVS http://192.168.38.1:4001/v2/keys/discovery/012345 | - |
[~]$ brew install etcd etcdctl jq [~]$ etcd -peer-addr=192.168.38.1:7001 -addr=192.168.38.1:4001 -data-dir=etcd/node00 -name=node00 [etcd] Jan 27 01:37:21.794 INFO | node00 is starting a new cluster [etcd] Jan 27 01:37:21.796 INFO | etcd server [name node00, listen on :4001, advertised url http://192.168.38.1:4001] [etcd] Jan 27 01:37:21.796 INFO | peer server [name node00, listen on :7001, advertised url http://192.168.38.1:7001] [etcd] Jan 27 01:37:21.796 INFO | node00 starting in peer mode [etcd] Jan 27 01:37:21.796 INFO | node00: state changed from 'initialized' to 'follower'. [etcd] Jan 27 01:37:21.796 INFO | node00: state changed from 'follower' to 'leader'. [etcd] Jan 27 01:37:21.796 INFO | node00: leader changed from '' to 'node00'.
core@localhost ~ $ cat /usr/share/oem/cloud-config.yml #cloud-config hostname: coreos0 coreos: etcd: name: node00 units: - name: etcd.service command: start ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
core@localhost ~ $ sudo coreos-cloudinit -from-file=/usr/share/oem/cloud-config.yml
core@localhost ~ $ curl -X PUT http://127.0.0.1:4001/v2/keys/discovery/012345/_config/size -d value=3 {"action":"set","node":{"key":"/discovery/012345/_config/size","value":"3","modifiedIndex":3,"createdIndex":3}}
[~]$ curl https://discovery.etcd.io/new https://discovery.etcd.io/6cdd01cb6656463a56528db78c79a2c3
#cloud-config hostname: coreos1 coreos: etcd: name: node01 discovery: http://192.168.38.100:4001/v2/keys/discovery/012345 addr: 192.168.38.101:4001 peer-addr: 192.168.38.101:7001 units: - name: etcd.service command: start - name: fleet.service command: start ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
core@coreos1 /usr/share/oem $ sudo rm -rf /usr/bin/etcd core@coreos1 /usr/share/oem $ sudo coreos-cloudinit -from-file=./cloud-config.yml
[Unit] Description=etcd [Service] User=etcd PermissionsStartOnly=true Environment=ETCD_DATA_DIR=/var/lib/etcd Environment=ETCD_NAME=%m ExecStart=/usr/bin/etcd Restart=always RestartSec=10s
[Service] Environment="ETCD_ADDR=192.168.38.101:4001" Environment="ETCD_DISCOVERY=http://192.168.38.1:4001/v2/keys/discovery/012345" Environment="ETCD_NAME=node01" Environment="ETCD_PEER_ADDR=192.168.38.101:7001"
$ curl -L http://192.168.38.101:4001/v2/machines http://192.168.38.101:4001, http://192.168.38.102:4001, http://192.168.38.103:4001 $ curl -s http://192.168.38.101:4001/v2/keys/_etcd/machines | jq . { "action": "get", "node": { "key": "/_etcd/machines", "dir": true, "nodes": [ { "key": "/_etcd/machines/node01", "value": "etcd=http%3A%2F%2F192.168.38.101%3A4001&raft=http%3A%2F%2F192.168.38.101%3A7001", "modifiedIndex": 1, "createdIndex": 1 }, { "key": "/_etcd/machines/node02", "value": "etcd=http%3A%2F%2F192.168.38.102%3A4001&raft=http%3A%2F%2F192.168.38.102%3A7001", "modifiedIndex": 134, "createdIndex": 134 }, { "key": "/_etcd/machines/node03", "value": "etcd=http%3A%2F%2F192.168.38.103%3A4001&raft=http%3A%2F%2F192.168.38.103%3A7001", "modifiedIndex": 194, "createdIndex": 194 } ], "modifiedIndex": 1, "createdIndex": 1 } }
良いんじゃないでしょうか。coreos1 に Hello をセットして coreos2,3 から取り出せるか
$ curl -X PUT http://192.168.38.101:4001/v2/keys/message -d value="HELLO" {"action":"set","node":{"key":"/message","value":"HELLO","modifiedIndex":703,"createdIndex":703}} $ curl -X GET http://192.168.38.102:4001/v2/keys/message {"action":"get","node":{"key":"/message","value":"HELLO","modifiedIndex":703,"createdIndex":703}} $ curl -X GET http://192.168.38.103:4001/v2/keys/message {"action":"get","node":{"key":"/message","value":"HELLO","modifiedIndex":703,"createdIndex":703}}
こんなところで良いんじゃないでしょうか。
[~]$ etcdctl set /message Hello Hello [~]$ etcdctl get /message Hello [~]$ etcdctl update /message goodby goodby [~]$ etcdctl get /message goodby [~]$ etcdctl rm /message [~]$ etcdctl get /message Error: 100: Key not found (/message) [4]
[~]$ curl -X PUT http://127.0.0.1:4001/v2/keys/message -d value="HELLO" | jq . { "action": "set", "node": { "key": "/message", "value": "HELLO", "modifiedIndex": 9, "createdIndex": 9 } } [~]$ curl -X GET http://127.0.0.1:4001/v2/keys/message | jq . { "action": "get", "node": { "key": "/message", "value": "HELLO", "modifiedIndex": 9, "createdIndex": 9 } } [~]$ curl -X DELETE http://127.0.0.1:4001/v2/keys/message | jq . { "action": "delete", "node": { "key": "/message", "modifiedIndex": 10, "createdIndex": 9 }, "prevNode": { "key": "/message", "value": "HELLO", "modifiedIndex": 9, "createdIndex": 9 } } [~]$ curl -X GET http://127.0.0.1:4001/v2/keys/message | jq . { "errorCode": 100, "message": "Key not found", "cause": "/message", "index": 10 }
[~]$ curl -L http://127.0.0.1:4001/v2/machines http://127.0.0.1:4001
[~]$ curl -s http://127.0.0.1:4001/v2/keys/_etcd/machines | jq . { "action": "get", "node": { "key": "/_etcd/machines", "dir": true, "nodes": [ { "key": "/_etcd/machines/leader", "value": "etcd=http%3A%2F%2F127.0.0.1%3A4001&raft=http%3A%2F%2F127.0.0.1%3A7001", "modifiedIndex": 1, "createdIndex": 1 } ], "modifiedIndex": 1, "createdIndex": 1 } }
https://github.com/coreos/flannel
core@core1 ~ $ etcdctl rm /coreos.com/network/ --recursive core@core1 ~ $ etcdctl mk /coreos.com/network/config '{"Network":"11.0.0.0/16"}' core@core1 ~ $ etcdctl get /coreos.com/network/config {"Network":"11.0.0.0/16"}
core@core1 ~ $ sudo -s core1 core # mkdir /opt core1 core # cd /opt/ core1 opt # git clone https://github.com/coreos/flannel.git core1 opt # cd flannel/ core1 flannel # sudo docker run -v `pwd`:/opt/flannel -i -t google/golang /bin/bash -c "cd /opt/flannel && ./build" core1 flannel # cd .. core1 opt # mkdir bin core1 opt # cd bin core1 bin # ln -s ../flannel/bin/flanneld flanneld core1 bin # exit exit
確認
core@localhost ~ $ /opt/bin/flanneld -version 0.3.0+git
Core OS 633 には、/usr/lib/systemd/system/flanneld.service が入っている。この中で起動しているのが /opt/bin/flanneld 。そのうちビルドしなくてもいいようになるのでしょう
#cloud-config stname: coreos1 coreos: etcd: name: node01 discovery: http://192.168.38.100:4001/v2/keys/discovery/012345 addr: 192.168.38.101:4001 peer-addr: 192.168.38.101:7001 units: - name: etcd.service command: start - name: fleet.service command: start - name: flanneld.service command: start ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
起動してみる
core@localhost ~ $ sudo coreos-cloudinit -from-file=/usr/share/oem/cloud-config.yml Checking availability of "local-file" Fetching user-data from datasource of type "local-file" line 3: warning: unrecognized key "stname" Fetching meta-data from datasource of type "local-file" 2015/04/01 14:18:37 Parsing user-data as cloud-config Merging cloud-config from meta-data and user-data 2015/04/01 14:18:37 Authorized SSH keys for core user 2015/04/01 14:18:37 Writing drop-in unit "20-cloudinit.conf" to filesystem 2015/04/01 14:18:37 Writing file to "/run/systemd/system/etcd.service.d/20-cloudinit.conf" 2015/04/01 14:18:37 Wrote file to "/run/systemd/system/etcd.service.d/20-cloudinit.conf" 2015/04/01 14:18:37 Wrote drop-in unit "20-cloudinit.conf" 2015/04/01 14:18:37 Ensuring runtime unit file "etcd.service" is unmasked 2015/04/01 14:18:37 Ensuring runtime unit file "fleet.service" is unmasked 2015/04/01 14:18:37 Ensuring runtime unit file "locksmithd.service" is unmasked 2015/04/01 14:18:37 Calling unit command "start" on "etcd.service"' 2015/04/01 14:18:37 Result of "start" on "etcd.service": done 2015/04/01 14:18:37 Calling unit command "start" on "fleet.service"' 2015/04/01 14:18:37 Result of "start" on "fleet.service": done 2015/04/01 14:18:37 Calling unit command "start" on "flanneld.service"' 2015/04/01 14:18:37 Result of "start" on "flanneld.service": done core@localhost ~ $ sudo systemctl status flanneld.service ● flanneld.service - Network fabric for containers Loaded: loaded (/usr/lib64/systemd/system/flanneld.service; static; vendor preset: disabled) Active: active (running) since Wed 2015-04-01 14:11:56 UTC; 7min ago Docs: https://github.com/coreos/flannel Main PID: 742 (sdnotify-proxy) CGroup: /system.slice/flanneld.service ├─742 /usr/libexec/sdnotify-proxy /run/flannel/sd.sock /usr/bin/docker run --net=host --privileged=true --rm --volume... └─744 /usr/bin/docker run --net=host --privileged=true --rm --volume=/run/flannel:/run/flannel --env=NOTIFY_SOCKET=/r... Apr 01 14:18:58 localhost sdnotify-proxy[742]: I0401 14:18:58.918646 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:58 localhost sdnotify-proxy[742]: I0401 14:18:58.969141 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.019231 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.068477 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.119422 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.169538 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.219166 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.269324 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.320882 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Apr 01 14:18:59 localhost sdnotify-proxy[742]: I0401 14:18:59.365672 00001 subnet.go:447] Lease renewed, new expiration: 2...00 UTC Hint: Some lines were ellipsized, use -l to show in full. core@localhost ~ $
[Unit] Description=Network fabric for containers Documentation=https://github.com/coreos/flannel Wants=etcd.service Requires=early-docker.service After=etcd.service early-docker.service Before=early-docker.target [Service] Type=notify Restart=always RestartSec=5 Environment="TMPDIR=/var/tmp/" Environment="DOCKER_HOST=unix:///var/run/early-docker.sock" Environment="FLANNEL_VER=0.3.0" Environment="ETCD_SSL_DIR=/etc/ssl/etcd" LimitNOFILE=40000 LimitNPROC=1048576 ExecStartPre=/sbin/modprobe ip_tables ExecStartPre=/usr/bin/mkdir -p /run/flannel ExecStartPre=/usr/bin/mkdir -p ${ETCD_SSL_DIR} ExecStartPre=/usr/bin/touch /run/flannel/options.env ExecStart=/usr/libexec/sdnotify-proxy /run/flannel/sd.sock \ /usr/bin/docker run --net=host --privileged=true --rm \ --volume=/run/flannel:/run/flannel \ --env=NOTIFY_SOCKET=/run/flannel/sd.sock \ --env-file=/run/flannel/options.env \ --volume=${ETCD_SSL_DIR}:/etc/ssl/etcd:ro \ quay.io/coreos/flannel:${FLANNEL_VER} /opt/bin/flanneld --ip-masq=true # Update docker options ExecStartPost=/usr/bin/docker run --net=host --rm -v /run:/run \ quay.io/coreos/flannel:${FLANNEL_VER} \ /opt/bin/mk-docker-opts.sh -d /run/flannel_docker_opts.env -i
よく分からんが、etcd を参照しつつ flanneld を起動し、flanneld のネットワーク設定を元に docker コンテナを再起動しているようだ
core@localhost ~ $ ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:16:dd:7e brd ff:ff:ff:ff:ff:ff inet 192.168.38.101/24 brd 192.168.38.255 scope global dynamic ens192 valid_lft 257sec preferred_lft 257sec inet6 fe80::20c:29ff:fe16:dd7e/64 scope link valid_lft forever preferred_lft forever 4: flannel0: <POINTOPOINT,UP,LOWER_UP> mtu 1472 qdisc fq_codel state UNKNOWN group default qlen 500 link/none inet 11.0.46.0/16 scope global flannel0 valid_lft forever preferred_lft forever 5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1472 qdisc noqueue state DOWN group default link/ether 56:84:7a:fe:97:99 brd ff:ff:ff:ff:ff:ff inet 11.0.46.1/24 scope global docker0 valid_lft forever preferred_lft forever inet6 fe80::5484:7aff:fefe:9799/64 scope link valid_lft forever preferred_lft forever
core@localhost ~ $ sudo docker run -ti ubuntu /bin/bash root@d19c748f5d56:/# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 8: eth0: <BROADCAST,UP,LOWER_UP> mtu 1472 qdisc noqueue state UP group default link/ether 02:42:0b:00:2e:03 brd ff:ff:ff:ff:ff:ff inet 11.0.46.3/24 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::42:bff:fe00:2e03/64 scope link valid_lft forever preferred_lft forever
core@coreos3 ~ $ sudo docker run -ti ubuntu /bin/bash root@cf7be88d257a:/# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 5: eth0: <BROADCAST,UP,LOWER_UP> mtu 1472 qdisc noqueue state UP group default link/ether 02:42:0b:00:3b:02 brd ff:ff:ff:ff:ff:ff inet 11.0.59.2/24 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::42:bff:fe00:3b02/64 scope link valid_lft forever preferred_lft forever root@cf7be88d257a:/# ping 11.0.72.2 PING 11.0.72.2 (11.0.72.2) 56(84) bytes of data. 64 bytes from 11.0.72.2: icmp_seq=1 ttl=60 time=0.802 ms 64 bytes from 11.0.72.2: icmp_seq=2 ttl=60 time=0.601 ms 64 bytes from 11.0.72.2: icmp_seq=3 ttl=60 time=0.516 ms 64 bytes from 11.0.72.2: icmp_seq=4 ttl=60 time=0.484 ms 64 bytes from 11.0.72.2: icmp_seq=5 ttl=60 time=0.517 ms ^C --- 11.0.72.2 ping statistics --- 5 packets transmitted, 5 received, 0% packet loss, time 4001ms rtt min/avg/max/mdev = 0.484/0.584/0.802/0.115 ms root@cf7be88d257a:/#
ping は通った
core@coreos3 ~ $ etcdctl ls /coreos.com/network/ /coreos.com/network/config /coreos.com/network/subnets core@coreos3 ~ $ etcdctl get /coreos.com/network/config {"Network":"11.0.0.0/16"} core@coreos3 ~ $ etcdctl ls /coreos.com/network/subnets /coreos.com/network/subnets/11.0.46.0-24 /coreos.com/network/subnets/11.0.72.0-24 /coreos.com/network/subnets/11.0.59.0-24 core@coreos3 ~ $ etcdctl get /coreos.com/network/subnets/11.0.46.0-24 {"PublicIP":"192.168.38.101"} core@coreos3 ~ $ etcdctl get /coreos.com/network/subnets/11.0.72.0-24 {"PublicIP":"192.168.38.102"} core@coreos3 ~ $ etcdctl get /coreos.com/network/subnets/11.0.59.0-24 {"PublicIP":"192.168.38.103"}
ふむふむさういうことか
etcd の分散データベースで、各マシンの docker のサブネットと Public IP の対応が管理されているのね
#cloud-config stname: coreos1 coreos: update: group: alpha reboot-strategy: best-effort etcd: name: node01 discovery: http://192.168.38.100:4001/v2/keys/discovery/012345 addr: 192.168.38.101:4001 peer-addr: 192.168.38.101:7001 units: - name: etcd.service command: start - name: fleet.service command: start - name: flanneld.service command: start ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
起動してみる
core@localhost ~ $ sudo coreos-cloudinit -from-file=/usr/share/oem/cloud-config.yml
[~]$ ssh core@coreos1 Last login: Sat Apr 4 06:37:15 2015 from 192.168.38.1 CoreOS alpha (640.0.0) core@localhost ~ $ fleetctl list-machines MACHINE IP METADATA 579c08c5... 192.168.38.102 - b2be44bb... 192.168.38.103 - edf014cc... 192.168.38.101 - core@localhost ~ $
いいんじゃないでしょうか
[Unit] Description=Apache Official Container(https://registry.hub.docker.com/_/httpd/) After=docker.service Requires=docker.service [Service] TimeoutStartSec=0 ExecStartPre=-/usr/bin/docker kill apache ExecStartPre=-/usr/bin/docker rm apache ExecStartPre=/usr/bin/docker pull httpd:2.4 ExecStart=/usr/bin/docker run --name apache -p 10080:80 -i httpd:2.4 ExecStop=/usr/bin/docker stop apache [Install] WantedBy=multi-user.target [X-Fleet] X-Conflicts=docker-apache.service
fleet クラスタに、サービス定義を登録する
core@localhost ~ $ fleetctl submit docker-apache.service core@localhost ~ $ fleetctl list-unit-files UNIT HASH DSTATE STATE TARGET docker-apache.service 11e0200 inactive inactive - core@localhost ~ $ fleetctl cat docker-apache.service [Unit] Description=Apache Official Container(https://registry.hub.docker.com/_/httpd/) After=docker.service Requires=docker.service [Service] TimeoutStartSec=0 ExecStartPre=-/usr/bin/docker kill apache ExecStartPre=-/usr/bin/docker rm apache ExecStartPre=/usr/bin/docker pull httpd:2.4 ExecStart=/usr/bin/docker run --name apache -p 10080:80 -i httpd:2.4 ExecStop=/usr/bin/docker stop apache [Install] WantedBy=multi-user.target [X-Fleet] X-Conflicts=docker-apache.service
物理マシンに、サービスを登録する
core@localhost ~ $ fleetctl load docker-apache.service Unit docker-apache.service loaded on 579c08c5.../192.168.38.102 core@localhost ~ $ fleetctl list-unit-files UNIT HASH DSTATE STATE TARGET docker-apache.service 11e0200 loaded loaded 579c08c5.../192.168.38.102
物理マシンで、サービスを起動する
core@localhost ~ $ fleetctl start docker-apache Unit docker-apache.service launched on 579c08c5.../192.168.38.102
core@localhost ~ $ fleetctl list-units UNIT MACHINE ACTIVE SUB docker-apache.service 579c08c5.../192.168.38.102 active running
よかよか
core@localhost ~ $ fleetctl status docker-apache ● docker-apache.service - Apache Official Container(https://registry.hub.docker.com/_/httpd/) Loaded: loaded (/run/fleet/units/docker-apache.service; linked-runtime; vendor preset: disabled) Active: active (running) since Mon 2015-06-01 13:54:13 UTC; 11s ago Process: 738 ExecStartPre=/usr/bin/docker pull httpd:2.4 (code=exited, status=0/SUCCESS) Process: 732 ExecStartPre=/usr/bin/docker rm apache (code=exited, status=1/FAILURE) Process: 671 ExecStartPre=/usr/bin/docker kill apache (code=exited, status=1/FAILURE) Main PID: 757 (docker) Memory: 1.5M CGroup: /system.slice/docker-apache.service └─757 /usr/bin/docker run --name apache -p 10080:80 -i httpd:2.4 Jun 01 13:54:13 coreos2 docker[738]: ec21802b3801: Already exists Jun 01 13:54:13 coreos2 docker[738]: 58d2b0d4632c: Already exists Jun 01 13:54:13 coreos2 docker[738]: 58d2b0d4632c: Already exists Jun 01 13:54:13 coreos2 docker[738]: Digest: sha256:e1c78fd635d30cd2185b16e69244c78aefd8ed7154f4fb4d97000c7e4fb4e3b1 Jun 01 13:54:13 coreos2 docker[738]: Status: Image is up to date for httpd:2.4 Jun 01 13:54:13 coreos2 systemd[1]: Started Apache Official Container(https://registry.hub.docker.com/_/httpd/). Jun 01 13:54:13 coreos2 docker[757]: AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using 11.0.43.2. Set the 'ServerName' directive globally to suppress this message Jun 01 13:54:13 coreos2 docker[757]: AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using 11.0.43.2. Set the 'ServerName' directive globally to suppress this message Jun 01 13:54:13 coreos2 docker[757]: [Mon Jun 01 13:54:13.317050 2015] [mpm_event:notice] [pid 1:tid 140234936838016] AH00489: Apache/2.4.12 (Unix) configured -- resuming normal operations Jun 01 13:54:13 coreos2 docker[757]: [Mon Jun 01 13:54:13.317993 2015] [core:notice] [pid 1:tid 140234936838016] AH00094: Command line: 'httpd -D FOREGROUND'
stdout を見たい場合には journal。tail -f のようにしたい場合には fleetctl journal -f apache でもいい
core@localhost ~ $ fleetctl journal -f docker-apache -- Logs begin at Sun 2015-02-08 13:14:59 UTC. -- Jun 01 13:54:13 coreos2 docker[738]: ec21802b3801: Already exists Jun 01 13:54:13 coreos2 docker[738]: 58d2b0d4632c: Already exists Jun 01 13:54:13 coreos2 docker[738]: 58d2b0d4632c: Already exists Jun 01 13:54:13 coreos2 docker[738]: Digest: sha256:e1c78fd635d30cd2185b16e69244c78aefd8ed7154f4fb4d97000c7e4fb4e3b1 Jun 01 13:54:13 coreos2 docker[738]: Status: Image is up to date for httpd:2.4 Jun 01 13:54:13 coreos2 systemd[1]: Started Apache Official Container(https://registry.hub.docker.com/_/httpd/). Jun 01 13:54:13 coreos2 docker[757]: AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using 11.0.43.2. Set the 'ServerName' directive globally to suppress this message Jun 01 13:54:13 coreos2 docker[757]: AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using 11.0.43.2. Set the 'ServerName' directive globally to suppress this message Jun 01 13:54:13 coreos2 docker[757]: [Mon Jun 01 13:54:13.317050 2015] [mpm_event:notice] [pid 1:tid 140234936838016] AH00489: Apache/2.4.12 (Unix) configured -- resuming normal operations Jun 01 13:54:13 coreos2 docker[757]: [Mon Jun 01 13:54:13.317993 2015] [core:notice] [pid 1:tid 140234936838016] AH00094: Command line: 'httpd -D FOREGROUND'
ブラウザからアクセスしてみる
core@localhost ~ $ fleetctl stop docker-apache Unit docker-apache.service loaded on 579c08c5.../192.168.38.102 core@localhost ~ $ fleetctl list-units UNIT MACHINE ACTIVE SUB docker-apache.service 579c08c5.../192.168.38.102 inactive dead
core@localhost ~ $ fleetctl unload docker-apache Unit docker-apache.service inactive core@localhost ~ $ fleetctl list-units UNIT MACHINE ACTIVE SUB
core@localhost ~ $ fleetctl destroy docker-apache Destroyed docker-apache.service core@localhost ~ $ fleetctl list-unit-files UNIT HASH DSTATE STATE TARGET
$ fleetctl submit docker-apache@.service $ fleetctl load docker-apache@80.serivce
[Unit] Description=Apache Official Container(https://registry.hub.docker.com/_/httpd/) After=docker.service Requires=docker.service [Service] TimeoutStartSec=0 ExecStartPre=-/usr/bin/docker kill apache ExecStartPre=-/usr/bin/docker rm apache ExecStartPre=/usr/bin/docker pull httpd:2.4 ExecStart=/usr/bin/docker run --name apache -p %i:80 -i httpd:2.4 ExecStop=/usr/bin/docker stop apache [Install] WantedBy=multi-user.target [X-Fleet] X-Conflicts=docker-apache@*.service