[CRIU] [PATCH v3] Add docker phaul driver
Hui Kang
hkang.sunysb at gmail.com
Tue Oct 20 09:43:19 PDT 2015
See the instruction at test/docker/HOWTO
TODO (suggestions from xemul, avagin, nikita):
(1) Send criu image iteratively (enable pre_dump)
(2) Resolve the criu_conn error at the end of start_migration()
for docker
(3) Remove sleep, call wait for docker daemon
(4) Can we add use sync_imgs_to_target() for docker as well?
(5) Remove get_driver_name; using some fake object, additional
abstraction layers and such stuff
(6) Wait for docker-py to integrate the C/R APIs
Signed-off-by: Hui Kang <hkang.sunysb at gmail.com>
---
p.haul | 2 +-
phaul/fs_haul_subtree.py | 3 +
phaul/p_haul_docker.py | 148 +++++++++++++++++++++++++++++++++++++++++++++++
phaul/p_haul_iters.py | 70 +++++++++++++---------
phaul/p_haul_pid.py | 3 +
phaul/p_haul_service.py | 4 +-
phaul/p_haul_type.py | 1 +
test/docker/HOWTO | 82 ++++++++++++++++++++++++++
8 files changed, 284 insertions(+), 29 deletions(-)
create mode 100644 phaul/p_haul_docker.py
create mode 100644 test/docker/HOWTO
diff --git a/p.haul b/p.haul
index 5a629bc..0e01424 100755
--- a/p.haul
+++ b/p.haul
@@ -25,7 +25,7 @@ import phaul.p_haul_type
parser = argparse.ArgumentParser("Process HAULer")
parser.add_argument("type", choices=phaul.p_haul_type.get_haul_names(),
- help="Type of hat to haul, e.g. vz or lxc")
+ help="Type of hat to haul, e.g. vz, lxc, or, docker")
parser.add_argument("id", help="ID of what to haul")
parser.add_argument("--to", help="IP where to haul")
parser.add_argument("--fdrpc", help="File descriptor of rpc socket", type=int, required=True)
diff --git a/phaul/fs_haul_subtree.py b/phaul/fs_haul_subtree.py
index a9bd559..7400c25 100644
--- a/phaul/fs_haul_subtree.py
+++ b/phaul/fs_haul_subtree.py
@@ -19,6 +19,9 @@ class p_haul_fs:
def set_options(self, opts):
self.__thost = opts["to"]
+ def set_target_host(self, thost):
+ self.__thost = thost
+
def set_work_dir(self, wdir):
self.__wdir = wdir
diff --git a/phaul/p_haul_docker.py b/phaul/p_haul_docker.py
new file mode 100644
index 0000000..f7a8bfa
--- /dev/null
+++ b/phaul/p_haul_docker.py
@@ -0,0 +1,148 @@
+#
+# Docker container hauler
+#
+
+import os
+import logging
+import shutil
+import time
+import p_haul_cgroup
+import p_haul_module
+import util
+import fs_haul_shared
+import fs_haul_subtree
+import pycriu.rpc
+
+import subprocess as sp
+
+# TODO use docker-py
+# import docker
+
+name = "docker"
+
+docker_exec = "/usr/bin/docker-1.9.0-dev"
+docker_dir = "/var/lib/docker/"
+criu_image_dir = "/var/run/docker/execdriver/native"
+
+class p_haul_type:
+ def __init__(self, ctid):
+
+ # TODO ctid must > 3 digit; with docker-py, we can also resolve
+ # container name
+ if len(ctid) < 3:
+ raise Exception("Docker container ID must be > 3 digits")
+
+ self._ctid = ctid
+ self._ct_rootfs = ""
+
+ def get_driver_name(self):
+ return name
+
+ def init_src(self):
+ self.full_ctid = self.get_full_ctid()
+ self.__load_ct_config(docker_dir)
+
+
+ def init_dst(self):
+ pass
+
+ def adjust_criu_req(self, req):
+ """Add module-specific options to criu request"""
+ pass
+
+ def root_task_pid(self):
+ pass
+
+ def __load_ct_config(self, path):
+ # Find the aufs filesystem dirname for the container
+ docker_aufs_dir = os.path.join(docker_dir, "aufs/mnt")
+ self._ct_rootfs = os.path.join(docker_aufs_dir, self.full_ctid)
+ logging.info("Container rootfs: %s", self._ct_rootfs)
+
+ def set_options(self, opts):
+ pass
+
+ # Remove any specific FS setup
+ def umount(self):
+ pass
+
+ def get_fs(self, fs_sk=None):
+ return fs_haul_subtree.p_haul_fs(self._ct_rootfs)
+
+ def get_fs_receiver(self, fs_sk=None):
+ return None
+
+ def get_full_ctid(self):
+ dir_name_list = os.listdir(os.path.join(docker_dir, "containers"))
+
+ full_id = ""
+ for name in dir_name_list:
+ name = name.rsplit("/")
+ if (name[0].find(self._ctid) == 0):
+ full_id = name[0]
+ break
+
+ if full_id != "":
+ return full_id
+ else:
+ raise Exception("Can not find container fs")
+
+ def dump(self):
+ logging.info("Dump docker container")
+
+ # TODO: docker API does not have checkpoint right now
+ # cli.checkpoint() so we have to use the command line
+ # cli = docker.Client(base_url='unix://var/run/docker.sock')
+ # output = cli.info()
+ # call docker API
+
+ logf = open("/tmp/docker_checkpoint.log", "w+")
+ ret = sp.call([docker_exec, "checkpoint", self._ctid],
+ stdout = logf, stderr = logf)
+ if ret != 0:
+ raise Exception("docker checkpoint failed")
+
+ def send_criu_images(self, thost):
+ # Sync checkpointed container images
+ ct_criu_img_dir = os.path.join(criu_image_dir, self.full_ctid)
+ dst_img_fs = fs_haul_subtree.p_haul_fs(ct_criu_img_dir)
+ dst_img_fs.set_target_host(thost)
+ dst_img_fs.set_work_dir(ct_criu_img_dir)
+ dst_img_fs.start_migration()
+
+ # Sync container status
+ ct_state_dir = os.path.join(docker_dir, "containers", self.full_ctid)
+ dst_img_fs_exec = fs_haul_subtree.p_haul_fs(ct_state_dir)
+ dst_img_fs_exec.set_target_host(thost)
+ dst_img_fs_exec.set_work_dir(ct_state_dir)
+ dst_img_fs_exec.start_migration()
+
+ def put_meta_images(self, dir):
+ pass
+
+ def kill_last_docker_daemon(self):
+ p = sp.Popen(['pgrep', '-l' , docker_exec], stdout=sp.PIPE)
+ out, err = p.communicate()
+
+ for line in out.splitlines():
+ line = bytes.decode(line)
+ pid = int(line.split(None, 1)[0])
+ os.kill(pid, signal.SIGKILL)
+
+ def final_restore(self, img, criu):
+ logf = open("/tmp/docker_restore.log", "w+")
+
+ # Kill any previous docker daemon in order to reload the
+ # status of the migrated container
+ self.kill_last_docker_daemon()
+
+ # start docker daemon in background
+ daemon = sp.Popen([docker_exec, "daemon", "-s", "aufs"],
+ stdout = logf, stderr = logf)
+ # daemon.wait() TODO: docker daemon not return
+ time.sleep(2)
+
+ ret = sp.call([docker_exec, "restore", self._ctid],
+ stdout = logf, stderr = logf)
+ if ret != 0:
+ raise Exception("docker restore failed")
diff --git a/phaul/p_haul_iters.py b/phaul/p_haul_iters.py
index b2c76e3..f8b97db 100644
--- a/phaul/p_haul_iters.py
+++ b/phaul/p_haul_iters.py
@@ -62,6 +62,7 @@ class phaul_iter_worker:
self.fs.set_options(opts)
self.__force = opts["force"]
self.pre_dump = opts["pre_dump"]
+ self.target_host_ip = opts["to"]
def validate_cpu(self):
logging.info("Checking CPU compatibility")
@@ -110,6 +111,12 @@ class phaul_iter_worker:
self.fs.set_work_dir(self.img.work_dir())
self.fs.start_migration()
+ # TODO: Do not do predump for docker right now. Add page-server
+ # to docker C/R API, then we can enable the pre-dump
+ if self.htype.get_driver_name() == "docker" :
+ logging.info("Disable pre-dump for docker")
+ self.pre_dump = False
+
logging.info("Checking for Dirty Tracking")
if self.pre_dump == PRE_DUMP_AUTO_DETECT:
# pre-dump auto-detection
@@ -194,36 +201,41 @@ class phaul_iter_worker:
logging.info("Final dump and restore")
- self.target_host.start_iter()
- self.img.new_image_dir()
+ if self.htype.get_driver_name() == "docker" :
+ # call docker dump API
+ self.htype.dump()
+ logging.info("Dump complete")
+ else:
+ self.target_host.start_iter()
+ self.img.new_image_dir()
- logging.info("\tIssuing dump command to service")
+ logging.info("\tIssuing dump command to service")
- req = criu_req.make_dump_req(
- self.pid, self.htype, self.img, self.criu_connection, self.fs)
- resp = self.criu_connection.send_req(req)
- while True:
- if resp.type != pycriu.rpc.NOTIFY:
- raise Exception("Dump failed")
-
- if resp.notify.script == "post-dump":
- #
- # Dump is effectively over. Now CRIU
- # waits for us to do whatever we want
- # and keeps the tasks frozen.
- #
- break
+ req = criu_req.make_dump_req(
+ self.pid, self.htype, self.img, self.criu_connection, self.fs)
+ resp = self.criu_connection.send_req(req)
+ while True:
+ if resp.type != pycriu.rpc.NOTIFY:
+ raise Exception("Dump failed")
+
+ if resp.notify.script == "post-dump":
+ #
+ # Dump is effectively over. Now CRIU
+ # waits for us to do whatever we want
+ # and keeps the tasks frozen.
+ #
+ break
- elif resp.notify.script == "network-lock":
- self.htype.net_lock()
- elif resp.notify.script == "network-unlock":
- self.htype.net_unlock()
+ elif resp.notify.script == "network-lock":
+ self.htype.net_lock()
+ elif resp.notify.script == "network-unlock":
+ self.htype.net_unlock()
- logging.info("\t\tNotify (%s)", resp.notify.script)
- resp = self.criu_connection.ack_notify()
+ logging.info("\t\tNotify (%s)", resp.notify.script)
+ resp = self.criu_connection.ack_notify()
- logging.info("Dump complete")
- self.target_host.end_iter()
+ logging.info("Dump complete")
+ self.target_host.end_iter()
#
# Dump is complete -- go to target node,
@@ -233,8 +245,12 @@ class phaul_iter_worker:
logging.info("Final FS and images sync")
self.fs.stop_migration()
- self.img.sync_imgs_to_target(self.target_host, self.htype,
- self.connection.mem_sk)
+
+ if self.htype.get_driver_name() == "docker" :
+ self.htype.send_criu_images(self.target_host_ip)
+ else:
+ self.img.sync_imgs_to_target(self.target_host, self.htype,
+ self.connection.mem_sk)
logging.info("Asking target host to restore")
self.target_host.restore_from_images()
diff --git a/phaul/p_haul_pid.py b/phaul/p_haul_pid.py
index e0f9d2f..27f8d0a 100644
--- a/phaul/p_haul_pid.py
+++ b/phaul/p_haul_pid.py
@@ -11,6 +11,9 @@ class p_haul_type:
self.pid = int(id)
self._pidfile = None
+ def get_driver_name(self):
+ return name
+
#
# Initialize itself for source node or destination one
#
diff --git a/phaul/p_haul_service.py b/phaul/p_haul_service.py
index 11883a6..a6fe405 100644
--- a/phaul/p_haul_service.py
+++ b/phaul/p_haul_service.py
@@ -94,7 +94,9 @@ class phaul_service:
def rpc_restore_from_images(self):
logging.info("Restoring from images")
- self.htype.put_meta_images(self.img.image_dir())
+ if self.htype.get_driver_name() != "docker" :
+ self.htype.put_meta_images(self.img.image_dir())
+
self.htype.final_restore(self.img, self.criu_connection)
logging.info("Restore succeeded")
self.restored = True
diff --git a/phaul/p_haul_type.py b/phaul/p_haul_type.py
index 7e05a7b..d4dac83 100644
--- a/phaul/p_haul_type.py
+++ b/phaul/p_haul_type.py
@@ -11,6 +11,7 @@ __haul_modules = {
"vz": "p_haul_vz",
"pid": "p_haul_pid",
"lxc": "p_haul_lxc",
+ "docker": "p_haul_docker",
}
def __get(id):
diff --git a/test/docker/HOWTO b/test/docker/HOWTO
new file mode 100644
index 0000000..3255360
--- /dev/null
+++ b/test/docker/HOWTO
@@ -0,0 +1,82 @@
+This HOWTO describes how to _non_ live-migrate a docker container from one
+docker host to another.
+
+** This is an experimental implementation of docker migration, which may affect
+your running containers.
+
+0. Install CRIU, p.haul, docker on both nodes
+
+ Besides the packages that are needed to compile and run CRIU and p.haul,
+ the specific docker binary that supports checkpoint/restore should be used.
+
+ Refer to step 0 in test/mtouch/HOWTO about the pacekages for CRIU and p.haul.
+
+ The docker version that supports checkpoint and restore can be obtained by
+
+ # git clone https://github.com/boucher/docker.git
+ # cd docker.git
+ # git checkout cr-combined
+
+ On both nodes, compile and store the the docker binary as
+
+ /usr/bin/docker-1.9.0-dev
+
+ Note that the path above is for now hard-coded in p_haul_docker.py
+
+1. Prepare criu and p.haul on both nodes (adapted from test/mtouch/HOWTO)
+
+ a) CRIU
+
+ * Clone CRIU repository from git://github.com/xemul/criu
+ and compile it with 'make'
+
+ * Make _local_ directory for service ($csdir)
+ * Start CRIU service by root user
+ Note that this step is mandatory. Although runC will start criu in swrk
+ mode, p.haul needs to connect to criu service for validating CPU.
+
+ # criu service -o $csdir/log -v4 --daemon
+
+ b) On destination node start the p.haul service
+
+ [dst]# ./p.haul-wrap service
+ Starting p.haul rpyc service
+
+ It will go daemon and will print logs on the terminals
+
+3. Run the test container on source node
+
+ a) Start the docker daemon
+
+ # /usr/bin/docker-1.9.0-dev daemon -s aufs
+
+ b) Start the container
+
+ # /usr/bin/docker-1.9.0-dev run -d busybox:latest /bin/sh -c 'i=0; while true; do echo $i >> /foo; i=$(expr $i + 1); sleep 1; done'
+
+ This command will return the container's ID, e.g., d78.
+ (borrowed from https://criu.org/Docker)
+
+4. Migrate container from source node
+
+ [src]# ./p.haul-wrap client to docker [container ID, e.g., d78]
+
+ to is the ip address of the dst node
+
+ For example:
+
+ [src]# ./p.haul-wrap client 192.168.11.106 docker d78
+ 192.168.11.106 is the destination node IP and d78 is the container ID
+
+
+ When the command returns, on the destination node run
+
+ [dst]# /usr/bin/docker-1.9.0-dev ps
+ [dst]# /usr/bin/docker-1.9.0-dev exec d78 cat /foo
+
+ to verify the counter is continuously being incremented.
+
+Known limitations.
+
+1. No support from docker python binding
+2. Docker daemon has to be restarted on the destination node
--
1.9.1
More information about the CRIU
mailing list