[CRIU] [PATCH 11/12] build: crit -- Fix install target
Cyrill Gorcunov
gorcunov at openvz.org
Fri Feb 12 10:05:17 PST 2016
From: Tycho Andersen <tycho.andersen at canonical.com>
Signed-off-by: Tycho Andersen <tycho.andersen at canonical.com>
Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
---
Makefile.install | 2 +-
lib/Makefile | 4 +-
lib/py/.gitignore | 3 -
lib/py/Makefile | 16 --
lib/py/__init__.py | 3 -
lib/py/criu.py | 282 -------------------------
lib/py/images/.gitignore | 4 -
lib/py/images/Makefile | 25 ---
lib/py/images/__init__.py | 3 -
lib/py/images/images.py | 479 ------------------------------------------
lib/py/images/pb2dict.py | 276 ------------------------
lib/pycriu/.gitignore | 3 +
lib/pycriu/Makefile | 16 ++
lib/pycriu/__init__.py | 3 +
lib/pycriu/criu.py | 282 +++++++++++++++++++++++++
lib/pycriu/images/.gitignore | 4 +
lib/pycriu/images/Makefile | 25 +++
lib/pycriu/images/__init__.py | 3 +
lib/pycriu/images/images.py | 479 ++++++++++++++++++++++++++++++++++++++++++
lib/pycriu/images/pb2dict.py | 276 ++++++++++++++++++++++++
scripts/crit-setup.py | 3 +-
21 files changed, 1096 insertions(+), 1095 deletions(-)
delete mode 100644 lib/py/.gitignore
delete mode 100644 lib/py/Makefile
delete mode 100644 lib/py/__init__.py
delete mode 100644 lib/py/criu.py
delete mode 100644 lib/py/images/.gitignore
delete mode 100644 lib/py/images/Makefile
delete mode 100644 lib/py/images/__init__.py
delete mode 100644 lib/py/images/images.py
delete mode 100644 lib/py/images/pb2dict.py
create mode 100644 lib/pycriu/.gitignore
create mode 100644 lib/pycriu/Makefile
create mode 100644 lib/pycriu/__init__.py
create mode 100644 lib/pycriu/criu.py
create mode 100644 lib/pycriu/images/.gitignore
create mode 100644 lib/pycriu/images/Makefile
create mode 100644 lib/pycriu/images/__init__.py
create mode 100644 lib/pycriu/images/images.py
create mode 100644 lib/pycriu/images/pb2dict.py
diff --git a/Makefile.install b/Makefile.install
index ffd496f7a651..dfb65452d6ce 100644
--- a/Makefile.install
+++ b/Makefile.install
@@ -24,7 +24,7 @@ install-criu: all $(CRIU-LIB) install-crit
install-man:
$(Q) $(MAKE) -C Documentation install
-install-crit: crit
+install-crit: crit/crit
$(E) " INSTALL crit"
$(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX)
diff --git a/lib/Makefile b/lib/Makefile
index d1b0450599f8..3c0decbe03f6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -27,7 +27,7 @@ PHONY += lib-c
# Python bindings.
lib-py:
$(call msg-gen, $@)
- $(Q) $(MAKE) -C py/images all
+ $(Q) $(MAKE) -C pycriu/images all
PHONY += lib-py
clean:
@@ -35,7 +35,7 @@ clean:
$(Q) $(MAKE) $(build)=c $@
$(Q) $(RM) c/$(CRIU_SO)
$(call msg-clean, lib-py)
- $(Q) $(MAKE) -C py/images $@
+ $(Q) $(MAKE) -C pycriu/images $@
all: $(PHONY)
@true
diff --git a/lib/py/.gitignore b/lib/py/.gitignore
deleted file mode 100644
index 8d503da7f031..000000000000
--- a/lib/py/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*_pb2.py
-*.pyc
-rpc.py
diff --git a/lib/py/Makefile b/lib/py/Makefile
deleted file mode 100644
index 582cc93dea84..000000000000
--- a/lib/py/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-all: images rpc.py
-
-.PHONY: all images clean
-
-images:
- $(Q) $(MAKE) -C images all
-
-# rpc_pb2.py doesn't depend on any other file, so
-# it is safe to rename it, dropping ugly _pb2 suffix.
-rpc.py:
- $(Q) protoc -I=$(SRC_DIR)/images/ --python_out=./ $(SRC_DIR)/images/$(@:.py=.proto)
- $(Q) mv $(@:.py=_pb2.py) $@
-
-clean:
- $(Q) $(MAKE) -C images clean
- $(Q) $(RM) rpc.py *.pyc
diff --git a/lib/py/__init__.py b/lib/py/__init__.py
deleted file mode 100644
index 7de62838cf2f..000000000000
--- a/lib/py/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import rpc
-import images
-from criu import *
diff --git a/lib/py/criu.py b/lib/py/criu.py
deleted file mode 100644
index 84dcefed25e1..000000000000
--- a/lib/py/criu.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# Same as libcriu for C.
-
-import socket
-import errno
-import subprocess
-import fcntl
-import os
-import signal
-import sys
-import struct
-
-import rpc
-
-class _criu_comm:
- """
- Base class for communication classes.
- """
- COMM_SK = 0
- COMM_FD = 1
- COMM_BIN = 2
- comm_type = None
- comm = None
- sk = None
-
- def connect(self, daemon):
- """
- Connect to criu and return socket object.
- daemon -- is for whether or not criu should daemonize if executing criu from binary(comm_bin).
- """
- pass
-
- def disconnect(self):
- """
- Disconnect from criu.
- """
- pass
-
-
-class _criu_comm_sk(_criu_comm):
- """
- Communication class for unix socket.
- """
- def __init__(self, sk_path):
- self.comm_type = self.COMM_SK
- self.comm = sk_path
-
- def connect(self, daemon):
- self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
- self.sk.connect(self.comm)
-
- return self.sk
-
- def disconnect(self):
- self.sk.close()
-
-
-class _criu_comm_fd(_criu_comm):
- """
- Commnunication class for file descriptor.
- """
- def __init__(self, fd):
- self.comm_type = self.COMM_FD
- self.comm = fd
-
- def connect(self, daemon):
- self.sk = socket.fromfd(self.comm, socket.AF_UNIX, socket.SOCK_SEQPACKET)
-
- return self.sk
-
- def disconnect(self):
- self.sk.close()
-
-class _criu_comm_bin(_criu_comm):
- """
- Communication class for binary.
- """
- def __init__(self, bin_path):
- self.comm_type = self.COMM_BIN
- self.comm = bin_path
- self.swrk = None
- self.daemon = None
-
- def connect(self, daemon):
- # Kind of the same thing we do in libcriu
- css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
- flags = fcntl.fcntl(css[1], fcntl.F_GETFD)
- fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
-
- self.daemon = daemon
-
- p = os.fork()
-
- if p == 0:
- def exec_criu():
- os.close(0)
- os.close(1)
- os.close(2)
-
- css[0].send(struct.pack('i', os.getpid()))
- os.execv(self.comm, [self.comm, 'swrk', "%d" % css[0].fileno()])
- os._exit(1)
-
- if daemon:
- # Python has no daemon(3) alternative,
- # so we need to mimic it ourself.
- p = os.fork()
-
- if p == 0:
- os.setsid()
-
- exec_criu()
- else:
- os._exit(0)
- else:
- exec_criu()
-
- css[0].close()
- self.swrk = struct.unpack('i', css[1].recv(4))[0]
- self.sk = css[1]
-
- return self.sk
-
- def disconnect(self):
- self.sk.close()
- if not self.daemon:
- os.waitpid(self.swrk, 0)
-
-
-class CRIUException(Exception):
- """
- Exception class for handling and storing criu errors.
- """
- typ = None
- _str = None
-
- def __str__(self):
- return self._str
-
-
-class CRIUExceptionInternal(CRIUException):
- """
- Exception class for handling and storing internal errors.
- """
- def __init__(self, typ, s):
- self.typ = typ
- self._str = "%s failed with internal error: %s" % (rpc.criu_req_type.Name(self.typ), s)
-
-
-class CRIUExceptionExternal(CRIUException):
- """
- Exception class for handling and storing criu RPC errors.
- """
-
- def __init__(self, req_typ, resp_typ, errno):
- self.typ = req_typ
- self.resp_typ = resp_typ
- self.errno = errno
- self._str = self._gen_error_str()
-
- def _gen_error_str(self):
- s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), )
-
- if self.typ != self.resp_typ:
- s += "Unxecpected response type %d: " % (self.resp_typ, )
-
- s += "Error(%d): " % (self.errno, )
-
- if self.errno == errno.EBADRQC:
- s += "Bad options"
-
- if self.typ == rpc.DUMP:
- if self.errno == errno.ESRCH:
- s += "No process with such pid"
-
- if self.typ == rpc.RESTORE:
- if self.errno == errno.EEXIST:
- s += "Process with requested pid already exists"
-
- s += "Unknown"
-
- return s
-
-
-class criu:
- """
- Call criu through RPC.
- """
- opts = None #CRIU options in pb format
-
- _comm = None #Communication method
-
- def __init__(self):
- self.use_binary('criu')
- self.opts = rpc.criu_opts()
-
- def use_sk(self, sk_name):
- """
- Access criu using unix socket which that belongs to criu service daemon.
- """
- self._comm = _criu_comm_sk(sk_name)
-
- def use_fd(self, fd):
- """
- Access criu using provided fd.
- """
- self._comm = _criu_comm_fd(fd)
-
- def use_binary(self, bin_name):
- """
- Access criu by execing it using provided path to criu binary.
- """
- self._comm = _criu_comm_bin(bin_name)
-
- def _send_req_and_recv_resp(self, req):
- """
- As simple as send request and receive response.
- """
- # In case of self-dump we need to spawn criu swrk detached
- # from our current process, as criu has a hard time separating
- # process resources from its own if criu is located in a same
- # process tree it is trying to dump.
- daemon = False
- if req.type == rpc.DUMP and not req.opts.HasField('pid'):
- daemon = True
-
- try:
- s = self._comm.connect(daemon)
-
- s.send(req.SerializeToString())
-
- buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK)))
-
- self._comm.disconnect()
-
- resp = rpc.criu_resp()
- resp.ParseFromString(buf)
- except Exception as e:
- raise CRIUExceptionInternal(req.type, str(e))
-
- return resp
-
- def check(self):
- """
- Checks whether the kernel support is up-to-date.
- """
- req = rpc.criu_req()
- req.type = rpc.CHECK
-
- resp = self._send_req_and_recv_resp(req)
-
- if not resp.success:
- raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
-
- def dump(self):
- """
- Checkpoint a process/tree identified by opts.pid.
- """
- req = rpc.criu_req()
- req.type = rpc.DUMP
- req.opts.MergeFrom(self.opts)
-
- resp = self._send_req_and_recv_resp(req)
-
- if not resp.success:
- raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
-
- return resp.dump
-
- def restore(self):
- """
- Restore a process/tree.
- """
- req = rpc.criu_req()
- req.type = rpc.RESTORE
- req.opts.MergeFrom(self.opts)
-
- resp = self._send_req_and_recv_resp(req)
-
- if not resp.success:
- raise CRIUExceptionExternal(req.type, resp.type, resp.errno)
-
- return resp.restore
diff --git a/lib/py/images/.gitignore b/lib/py/images/.gitignore
deleted file mode 100644
index 234bfe9f6f33..000000000000
--- a/lib/py/images/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.pyc
-*_pb2.py
-magic.py
-pb.py
diff --git a/lib/py/images/Makefile b/lib/py/images/Makefile
deleted file mode 100644
index c8a748e5c02c..000000000000
--- a/lib/py/images/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-all: pb.py images magic.py
-
-.PHONY: all images clean pb.py
-
-proto := $(filter-out $(SRC_DIR)/images/rpc.proto, $(sort $(wildcard $(SRC_DIR)/images/*.proto)))
-proto-py-modules := $(foreach m,$(proto),$(subst -,_,$(notdir $(m:.proto=_pb2))))
-
-# We don't need rpc_pb2.py here, as it is not related to the images.
-# Unfortunately, we can't drop ugly _pb2 suffixes here, because
-# some _pb2 files depend on others _pb2 files.
-images:
- $(Q) protoc -I=$(SRC_DIR)/images -I=/usr/include/ --python_out=./ $(proto)
-
-magic.py: $(SRC_DIR)/scripts/magic-gen.py $(SRC_DIR)/criu/include/magic.h
- $(call msg-gen, $@)
- $(Q) python $^ $@
-
-pb.py: images
- $(Q) echo "# Autogenerated. Do not edit!" > $@
- $(Q) for m in $(proto-py-modules); do \
- echo "from $$m import *" >> $@ ;\
- done
-
-clean:
- $(Q) $(RM) ./*_pb2.py ./*.pyc magic.py pb.py
diff --git a/lib/py/images/__init__.py b/lib/py/images/__init__.py
deleted file mode 100644
index 379943b977e1..000000000000
--- a/lib/py/images/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from magic import *
-from images import *
-from pb import *
diff --git a/lib/py/images/images.py b/lib/py/images/images.py
deleted file mode 100644
index d4e883f11006..000000000000
--- a/lib/py/images/images.py
+++ /dev/null
@@ -1,479 +0,0 @@
-#!/bin/env python
-
-# This file contains methods to deal with criu images.
-#
-# According to http://criu.org/Images, criu images can be described
-# with such IOW:
-#
-# IMAGE_FILE ::= MAGIC { ENTRY }
-# ENTRY ::= SIZE PAYLOAD [ EXTRA ]
-# PAYLOAD ::= "message encoded in ProtocolBuffer format"
-# EXTRA ::= "arbitrary blob, depends on the PAYLOAD contents"
-#
-# MAGIC ::= "32 bit integer"
-# SIZE ::= "32 bit integer, equals the PAYLOAD length"
-#
-# Images v1.1 NOTE: MAGIC now consist of 2 32 bit integers, first one is
-# MAGIC_COMMON or MAGIC_SERVICE and the second one is same as MAGIC
-# in images V1.0. We don't keep "first" magic in json images.
-#
-# In order to convert images to human-readable format, we use dict(json).
-# Using json not only allows us to easily read\write images, but also
-# to use a great variety of tools out there to manipulate them.
-# It also allows us to clearly describe criu images structure.
-#
-# Using dict(json) format, criu images can be described like:
-#
-# {
-# 'magic' : 'FOO',
-# 'entries' : [
-# entry,
-# ...
-# ]
-# }
-#
-# Entry, in its turn, could be described as:
-#
-# {
-# pb_msg,
-# 'extra' : extra_msg
-# }
-#
-import io
-import google
-import struct
-import os
-import sys
-import json
-import pb2dict
-import array
-
-import magic
-from pb import *
-
-#
-# Predefined hardcoded constants
-sizeof_u16 = 2
-sizeof_u32 = 4
-sizeof_u64 = 8
-
-# A helper for rounding
-def round_up(x,y):
- return (((x - 1) | (y - 1)) + 1)
-
-class MagicException(Exception):
- def __init__(self, magic):
- self.magic = magic
-
-# Generic class to handle loading/dumping criu images entries from/to bin
-# format to/from dict(json).
-class entry_handler:
- """
- Generic class to handle loading/dumping criu images
- entries from/to bin format to/from dict(json).
- """
- def __init__(self, payload, extra_handler=None):
- """
- Sets payload class and extra handler class.
- """
- self.payload = payload
- self.extra_handler = extra_handler
-
- def load(self, f, pretty = False):
- """
- Convert criu image entries from binary format to dict(json).
- Takes a file-like object and returnes a list with entries in
- dict(json) format.
- """
- entries = []
-
- while True:
- entry = {}
-
- # Read payload
- pb = self.payload()
- buf = f.read(4)
- if buf == '':
- break
- size, = struct.unpack('i', buf)
- pb.ParseFromString(f.read(size))
- entry = pb2dict.pb2dict(pb, pretty)
-
- # Read extra
- if self.extra_handler:
- entry['extra'] = self.extra_handler.load(f, pb)
-
- entries.append(entry)
-
- return entries
-
- def loads(self, s, pretty = False):
- """
- Same as load(), but takes a string as an argument.
- """
- f = io.BytesIO(s)
- return self.load(f, pretty)
-
- def dump(self, entries, f):
- """
- Convert criu image entries from dict(json) format to binary.
- Takes a list of entries and a file-like object to write entries
- in binary format to.
- """
- for entry in entries:
- extra = entry.pop('extra', None)
-
- # Write payload
- pb = self.payload()
- pb2dict.dict2pb(entry, pb)
- pb_str = pb.SerializeToString()
- size = len(pb_str)
- f.write(struct.pack('i', size))
- f.write(pb_str)
-
- # Write extra
- if self.extra_handler and extra:
- self.extra_handler.dump(extra, f, pb)
-
- def dumps(self, entries):
- """
- Same as dump(), but doesn't take file-like object and just
- returns a string.
- """
- f = io.BytesIO('')
- self.dump(entries, f)
- return f.read()
-
- def count(self, f):
- """
- Counts the number of top-level object in the image file
- """
- entries = 0
-
- while True:
- buf = f.read(4)
- if buf == '':
- break
- size, = struct.unpack('i', buf)
- f.seek(size, 1)
- entries += 1
-
- return entries
-
-# Special handler for pagemap.img
-class pagemap_handler:
- """
- Special entry handler for pagemap.img, which is unique in a way
- that it has a header of pagemap_head type followed by entries
- of pagemap_entry type.
- """
- def load(self, f, pretty = False):
- entries = []
-
- pb = pagemap_head()
- while True:
- buf = f.read(4)
- if buf == '':
- break
- size, = struct.unpack('i', buf)
- pb.ParseFromString(f.read(size))
- entries.append(pb2dict.pb2dict(pb, pretty))
-
- pb = pagemap_entry()
-
- return entries
-
- def loads(self, s, pretty = False):
- f = io.BytesIO(s)
- return self.load(f, pretty)
-
- def dump(self, entries, f):
- pb = pagemap_head()
- for item in entries:
- pb2dict.dict2pb(item, pb)
- pb_str = pb.SerializeToString()
- size = len(pb_str)
- f.write(struct.pack('i', size))
- f.write(pb_str)
-
- pb = pagemap_entry()
-
- def dumps(self, entries):
- f = io.BytesIO('')
- self.dump(entries, f)
- return f.read()
-
- def count(self, f):
- return entry_handler(None).count(f) - 1
-
-
-# In following extra handlers we use base64 encoding
-# to store binary data. Even though, the nature
-# of base64 is that it increases the total size,
-# it doesn't really matter, because our images
-# do not store big amounts of binary data. They
-# are negligible comparing to pages size.
-class pipes_data_extra_handler:
- def load(self, f, pload):
- size = pload.bytes
- data = f.read(size)
- return data.encode('base64')
-
- def dump(self, extra, f, pload):
- data = extra.decode('base64')
- f.write(data)
-
-class sk_queues_extra_handler:
- def load(self, f, pload):
- size = pload.length
- data = f.read(size)
- return data.encode('base64')
-
- def dump(self, extra, f, pb):
- data = extra.decode('base64')
- f.write(data)
-
-class ghost_file_extra_handler:
- def load(self, f, pb):
- data = f.read()
- return data.encode('base64')
-
- def dump(self, extra, f, pb):
- data = extra.decode('base64')
- f.write(data)
-
-class tcp_stream_extra_handler:
- def load(self, f, pb):
- d = {}
-
- inq = f.read(pb.inq_len)
- outq = f.read(pb.outq_len)
-
- d['inq'] = inq.encode('base64')
- d['outq'] = outq.encode('base64')
-
- return d
-
- def dump(self, extra, f, pb):
- inq = extra['inq'].decode('base64')
- outq = extra['outq'].decode('base64')
-
- f.write(inq)
- f.write(outq)
-
-class ipc_sem_set_handler:
- def load(self, f, pb):
- entry = pb2dict.pb2dict(pb)
- size = sizeof_u16 * entry['nsems']
- rounded = round_up(size, sizeof_u64)
- s = array.array('H')
- if s.itemsize != sizeof_u16:
- raise Exception("Array size mismatch")
- s.fromstring(f.read(size))
- f.seek(rounded - size, 1)
- return s.tolist()
-
- def dump(self, extra, f, pb):
- entry = pb2dict.pb2dict(pb)
- size = sizeof_u16 * entry['nsems']
- rounded = round_up(size, sizeof_u64)
- s = array.array('H')
- if s.itemsize != sizeof_u16:
- raise Exception("Array size mismatch")
- s.fromlist(extra)
- if len(s) != entry['nsems']:
- raise Exception("Number of semaphores mismatch")
- f.write(s.tostring())
- f.write('\0' * (rounded - size))
-
-class ipc_msg_queue_handler:
- def load(self, f, pb):
- entry = pb2dict.pb2dict(pb)
- messages = []
- for x in range (0, entry['qnum']):
- buf = f.read(4)
- if buf == '':
- break
- size, = struct.unpack('i', buf)
- msg = ipc_msg()
- msg.ParseFromString(f.read(size))
- rounded = round_up(msg.msize, sizeof_u64)
- data = f.read(msg.msize)
- f.seek(rounded - msg.msize, 1)
- messages.append(pb2dict.pb2dict(msg))
- messages.append(data.encode('base64'))
- return messages
-
- def dump(self, extra, f, pb):
- entry = pb2dict.pb2dict(pb)
- for i in range (0, len(extra), 2):
- msg = ipc_msg()
- pb2dict.dict2pb(extra[i], msg)
- msg_str = msg.SerializeToString()
- size = len(msg_str)
- f.write(struct.pack('i', size))
- f.write(msg_str)
- rounded = round_up(msg.msize, sizeof_u64)
- data = extra[i + 1].decode('base64')
- f.write(data[:msg.msize])
- f.write('\0' * (rounded - msg.msize))
-
-class ipc_shm_handler:
- def load(self, f, pb):
- entry = pb2dict.pb2dict(pb)
- size = entry['size']
- data = f.read(size)
- rounded = round_up(size, sizeof_u32)
- f.seek(rounded - size, 1)
- return data.encode('base64')
-
- def dump(self, extra, f, pb):
- entry = pb2dict.pb2dict(pb)
- size = entry['size']
- data = extra.decode('base64')
- rounded = round_up(size, sizeof_u32)
- f.write(data[:size])
- f.write('\0' * (rounded - size))
-
-handlers = {
- 'INVENTORY' : entry_handler(inventory_entry),
- 'CORE' : entry_handler(core_entry),
- 'IDS' : entry_handler(task_kobj_ids_entry),
- 'CREDS' : entry_handler(creds_entry),
- 'UTSNS' : entry_handler(utsns_entry),
- 'IPC_VAR' : entry_handler(ipc_var_entry),
- 'FS' : entry_handler(fs_entry),
- 'GHOST_FILE' : entry_handler(ghost_file_entry, ghost_file_extra_handler()),
- 'MM' : entry_handler(mm_entry),
- 'CGROUP' : entry_handler(cgroup_entry),
- 'TCP_STREAM' : entry_handler(tcp_stream_entry, tcp_stream_extra_handler()),
- 'STATS' : entry_handler(stats_entry),
- 'PAGEMAP' : pagemap_handler(), # Special one
- 'PSTREE' : entry_handler(pstree_entry),
- 'REG_FILES' : entry_handler(reg_file_entry),
- 'NS_FILES' : entry_handler(ns_file_entry),
- 'EVENTFD_FILE' : entry_handler(eventfd_file_entry),
- 'EVENTPOLL_FILE' : entry_handler(eventpoll_file_entry),
- 'EVENTPOLL_TFD' : entry_handler(eventpoll_tfd_entry),
- 'SIGNALFD' : entry_handler(signalfd_entry),
- 'TIMERFD' : entry_handler(timerfd_entry),
- 'INOTIFY_FILE' : entry_handler(inotify_file_entry),
- 'INOTIFY_WD' : entry_handler(inotify_wd_entry),
- 'FANOTIFY_FILE' : entry_handler(fanotify_file_entry),
- 'FANOTIFY_MARK' : entry_handler(fanotify_mark_entry),
- 'VMAS' : entry_handler(vma_entry),
- 'PIPES' : entry_handler(pipe_entry),
- 'FIFO' : entry_handler(fifo_entry),
- 'SIGACT' : entry_handler(sa_entry),
- 'NETLINK_SK' : entry_handler(netlink_sk_entry),
- 'REMAP_FPATH' : entry_handler(remap_file_path_entry),
- 'MNTS' : entry_handler(mnt_entry),
- 'TTY_FILES' : entry_handler(tty_file_entry),
- 'TTY_INFO' : entry_handler(tty_info_entry),
- 'RLIMIT' : entry_handler(rlimit_entry),
- 'TUNFILE' : entry_handler(tunfile_entry),
- 'EXT_FILES' : entry_handler(ext_file_entry),
- 'IRMAP_CACHE' : entry_handler(irmap_cache_entry),
- 'FILE_LOCKS' : entry_handler(file_lock_entry),
- 'FDINFO' : entry_handler(fdinfo_entry),
- 'UNIXSK' : entry_handler(unix_sk_entry),
- 'INETSK' : entry_handler(inet_sk_entry),
- 'PACKETSK' : entry_handler(packet_sock_entry),
- 'ITIMERS' : entry_handler(itimer_entry),
- 'POSIX_TIMERS' : entry_handler(posix_timer_entry),
- 'NETDEV' : entry_handler(net_device_entry),
- 'PIPES_DATA' : entry_handler(pipe_data_entry, pipes_data_extra_handler()),
- 'FIFO_DATA' : entry_handler(pipe_data_entry, pipes_data_extra_handler()),
- 'SK_QUEUES' : entry_handler(sk_packet_entry, sk_queues_extra_handler()),
- 'IPCNS_SHM' : entry_handler(ipc_shm_entry, ipc_shm_handler()),
- 'IPCNS_SEM' : entry_handler(ipc_sem_entry, ipc_sem_set_handler()),
- 'IPCNS_MSG' : entry_handler(ipc_msg_entry, ipc_msg_queue_handler()),
- 'NETNS' : entry_handler(netns_entry),
- 'USERNS' : entry_handler(userns_entry),
- 'SECCOMP' : entry_handler(seccomp_entry),
- }
-
-def __rhandler(f):
- # Images v1.1 NOTE: First read "first" magic.
- img_magic, = struct.unpack('i', f.read(4))
- if img_magic in (magic.by_name['IMG_COMMON'], magic.by_name['IMG_SERVICE']):
- img_magic, = struct.unpack('i', f.read(4))
-
- try:
- m = magic.by_val[img_magic]
- except:
- raise MagicException(img_magic)
-
- try:
- handler = handlers[m]
- except:
- raise Exception("No handler found for image with magic " + m)
-
- return m, handler
-
-def load(f, pretty = False):
- """
- Convert criu image from binary format to dict(json).
- Takes a file-like object to read criu image from.
- Returns criu image in dict(json) format.
- """
- image = {}
-
- m, handler = __rhandler(f)
-
- image['magic'] = m
- image['entries'] = handler.load(f, pretty)
-
- return image
-
-def info(f):
- res = {}
-
- m, handler = __rhandler(f)
-
- res['magic'] = m
- res['count'] = handler.count(f)
-
- return res
-
-def loads(s, pretty = False):
- """
- Same as load(), but takes a string.
- """
- f = io.BytesIO(s)
- return load(f, pretty)
-
-def dump(img, f):
- """
- Convert criu image from dict(json) format to binary.
- Takes an image in dict(json) format and file-like
- object to write to.
- """
- m = img['magic']
- magic_val = magic.by_name[img['magic']]
-
- # Images v1.1 NOTE: use "second" magic to identify what "first"
- # should be written.
- if m != 'INVENTORY':
- if m in ('STATS', 'IRMAP_CACHE'):
- f.write(struct.pack('i', magic.by_name['IMG_SERVICE']))
- else:
- f.write(struct.pack('i', magic.by_name['IMG_COMMON']))
-
- f.write(struct.pack('i', magic_val))
-
- try:
- handler = handlers[m]
- except:
- raise Exception("No handler found for image with such magic")
-
- handler.dump(img['entries'], f)
-
-def dumps(img):
- """
- Same as dump(), but takes only an image and returns
- a string.
- """
- f = io.BytesIO('')
- dump(img, f)
- return f.getvalue()
diff --git a/lib/py/images/pb2dict.py b/lib/py/images/pb2dict.py
deleted file mode 100644
index 177cda3154a3..000000000000
--- a/lib/py/images/pb2dict.py
+++ /dev/null
@@ -1,276 +0,0 @@
-from google.protobuf.descriptor import FieldDescriptor as FD
-import opts_pb2
-import ipaddr
-import socket
-import collections
-import os
-
-# pb2dict and dict2pb are methods to convert pb to/from dict.
-# Inspired by:
-# protobuf-to-dict - https://github.com/benhodgson/protobuf-to-dict
-# protobuf-json - https://code.google.com/p/protobuf-json/
-# protobuf source - https://code.google.com/p/protobuf/
-# Both protobuf-to-dict/json do not fit here because of several reasons,
-# here are some of them:
-# - both have a common bug in treating optional field with empty
-# repeated inside.
-# - protobuf-to-json is not avalible in pip or in any other python
-# repo, so it is hard to distribute and we can't rely on it.
-# - both do not treat enums in a way we would like to. They convert
-# protobuf enum to int, but we need a string here, because it is
-# much more informative. BTW, protobuf text_format converts pb
-# enums to string value too. (i.e. "march : x86_64" is better then
-# "march : 1").
-
-
-_basic_cast = {
- FD.TYPE_FIXED64 : long,
- FD.TYPE_FIXED32 : int,
- FD.TYPE_SFIXED64 : long,
- FD.TYPE_SFIXED32 : int,
-
- FD.TYPE_INT64 : long,
- FD.TYPE_UINT64 : long,
- FD.TYPE_SINT64 : long,
-
- FD.TYPE_INT32 : int,
- FD.TYPE_UINT32 : int,
- FD.TYPE_SINT32 : int,
-
- FD.TYPE_BOOL : bool,
-
- FD.TYPE_STRING : unicode
-}
-
-def _marked_as_hex(field):
- return field.GetOptions().Extensions[opts_pb2.criu].hex
-
-def _marked_as_ip(field):
- return field.GetOptions().Extensions[opts_pb2.criu].ipadd
-
-def _marked_as_flags(field):
- return field.GetOptions().Extensions[opts_pb2.criu].flags
-
-def _marked_as_dev(field):
- return field.GetOptions().Extensions[opts_pb2.criu].dev
-
-def _marked_as_odev(field):
- return field.GetOptions().Extensions[opts_pb2.criu].odev
-
-mmap_prot_map = [
- ('PROT_READ', 0x1),
- ('PROT_WRITE', 0x2),
- ('PROT_EXEC', 0x4),
-];
-
-mmap_flags_map = [
- ('MAP_SHARED', 0x1),
- ('MAP_PRIVATE', 0x2),
- ('MAP_ANON', 0x20),
- ('MAP_GROWSDOWN', 0x0100),
-];
-
-mmap_status_map = [
- ('VMA_AREA_NONE', 0 << 0),
- ('VMA_AREA_REGULAR', 1 << 0),
- ('VMA_AREA_STACK', 1 << 1),
- ('VMA_AREA_VSYSCALL', 1 << 2),
- ('VMA_AREA_VDSO', 1 << 3),
- ('VMA_AREA_HEAP', 1 << 5),
-
- ('VMA_FILE_PRIVATE', 1 << 6),
- ('VMA_FILE_SHARED', 1 << 7),
- ('VMA_ANON_SHARED', 1 << 8),
- ('VMA_ANON_PRIVATE', 1 << 9),
-
- ('VMA_AREA_SYSVIPC', 1 << 10),
- ('VMA_AREA_SOCKET', 1 << 11),
- ('VMA_AREA_VVAR', 1 << 12),
- ('VMA_AREA_AIORING', 1 << 13),
-
- ('VMA_UNSUPP', 1 << 31),
-];
-
-rfile_flags_map = [
- ('O_WRONLY', 01),
- ('O_RDWR', 02),
- ('O_APPEND', 02000),
- ('O_DIRECT', 040000),
- ('O_LARGEFILE', 0100000),
-];
-
-flags_maps = {
- 'mmap.prot' : mmap_prot_map,
- 'mmap.flags' : mmap_flags_map,
- 'mmap.status' : mmap_status_map,
- 'rfile.flags' : rfile_flags_map,
-}
-
-def map_flags(value, flags_map):
- bs = map(lambda x: x[0], filter(lambda x: value & x[1], flags_map))
- value &= ~sum(map(lambda x: x[1], flags_map))
- if value:
- bs.append("0x%x" % value)
- return " | ".join(bs)
-
-def unmap_flags(value, flags_map):
- if value == '':
- return 0
-
- bd = dict(flags_map)
- return sum(map(lambda x: int(str(bd.get(x, x)), 0), map(lambda x: x.strip(), value.split('|'))))
-
-kern_minorbits = 20 # This is how kernel encodes dev_t in new format
-
-def decode_dev(field, value):
- if _marked_as_odev(field):
- return "%d:%d" % (os.major(value), os.minor(value))
- else:
- return "%d:%d" % (value >> kern_minorbits, value & ((1 << kern_minorbits) - 1))
-
-def encode_dev(field, value):
- dev = map(lambda x: int(x), value.split(':'))
- if _marked_as_odev(field):
- return os.makedev(dev[0], dev[1])
- else:
- return dev[0] << kern_minorbits | dev[1]
-
-def is_string(value):
- return isinstance(value, unicode) or isinstance(value, str)
-
-def _pb2dict_cast(field, value, pretty = False, is_hex = False):
- if not is_hex:
- is_hex = _marked_as_hex(field)
-
- if field.type == FD.TYPE_MESSAGE:
- return pb2dict(value, pretty, is_hex)
- elif field.type == FD.TYPE_BYTES:
- return value.encode('base64')
- elif field.type == FD.TYPE_ENUM:
- return field.enum_type.values_by_number.get(value, None).name
- elif field.type in _basic_cast:
- cast = _basic_cast[field.type]
- if pretty and (cast == int or cast == long):
- if is_hex:
- # Fields that have (criu).hex = true option set
- # should be stored in hex string format.
- return "0x%x" % value
-
- if _marked_as_dev(field):
- return decode_dev(field, value)
-
- flags = _marked_as_flags(field)
- if flags:
- try:
- flags_map = flags_maps[flags]
- except:
- return "0x%x" % value # flags are better seen as hex anyway
- else:
- return map_flags(value, flags_map)
-
- return cast(value)
- else:
- raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
-
-def pb2dict(pb, pretty = False, is_hex = False):
- """
- Convert protobuf msg to dictionary.
- Takes a protobuf message and returns a dict.
- """
- d = collections.OrderedDict() if pretty else {}
- for field, value in pb.ListFields():
- if field.label == FD.LABEL_REPEATED:
- d_val = []
- if pretty and _marked_as_ip(field):
- if len(value) == 1:
- v = socket.ntohl(value[0])
- addr = ipaddr.IPv4Address(v)
- else:
- v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \
- (socket.ntohl(value[1]) << (32 * 2)) + \
- (socket.ntohl(value[2]) << (32 * 1)) + \
- (socket.ntohl(value[3]))
- addr = ipaddr.IPv6Address(v)
-
- d_val.append(addr.compressed)
- else:
- for v in value:
- d_val.append(_pb2dict_cast(field, v, pretty, is_hex))
- else:
- d_val = _pb2dict_cast(field, value, pretty, is_hex)
-
- d[field.name] = d_val
- return d
-
-def _dict2pb_cast(field, value):
- # Not considering TYPE_MESSAGE here, as repeated
- # and non-repeated messages need special treatment
- # in this case, and are hadled separately.
- if field.type == FD.TYPE_BYTES:
- return value.decode('base64')
- elif field.type == FD.TYPE_ENUM:
- return field.enum_type.values_by_name.get(value, None).number
- elif field.type in _basic_cast:
- cast = _basic_cast[field.type]
- if (cast == int or cast == long) and is_string(value):
- if _marked_as_dev(field):
- return encode_dev(field, value)
-
- flags = _marked_as_flags(field)
- if flags:
- try:
- flags_map = flags_maps[flags]
- except:
- pass # Try to use plain string cast
- else:
- return unmap_flags(value, flags_map)
-
- # Some int or long fields might be stored as hex
- # strings. See _pb2dict_cast.
- return cast(value, 0)
- else:
- return cast(value)
- else:
- raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
-
-def dict2pb(d, pb):
- """
- Convert dictionary to protobuf msg.
- Takes dict and protobuf message to be merged into.
- """
- for field in pb.DESCRIPTOR.fields:
- if field.name not in d:
- continue
- value = d[field.name]
- if field.label == FD.LABEL_REPEATED:
- pb_val = getattr(pb, field.name, None)
- if is_string(value[0]) and _marked_as_ip(field):
- val = ipaddr.IPAddress(value[0])
- if val.version == 4:
- pb_val.append(socket.htonl(int(val)))
- elif val.version == 6:
- ival = int(val)
- pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF))
- pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF))
- pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF))
- pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF))
- else:
- raise Exception("Unknown IP address version %d" % val.version)
- continue
-
- for v in value:
- if field.type == FD.TYPE_MESSAGE:
- dict2pb(v, pb_val.add())
- else:
- pb_val.append(_dict2pb_cast(field, v))
- else:
- if field.type == FD.TYPE_MESSAGE:
- # SetInParent method acts just like has_* = true in C,
- # and helps to properly treat cases when we have optional
- # field with empty repeated inside.
- getattr(pb, field.name).SetInParent()
-
- dict2pb(value, getattr(pb, field.name, None))
- else:
- setattr(pb, field.name, _dict2pb_cast(field, value))
- return pb
diff --git a/lib/pycriu/.gitignore b/lib/pycriu/.gitignore
new file mode 100644
index 000000000000..8d503da7f031
--- /dev/null
+++ b/lib/pycriu/.gitignore
@@ -0,0 +1,3 @@
+*_pb2.py
+*.pyc
+rpc.py
diff --git a/lib/pycriu/Makefile b/lib/pycriu/Makefile
new file mode 100644
index 000000000000..582cc93dea84
--- /dev/null
+++ b/lib/pycriu/Makefile
@@ -0,0 +1,16 @@
+all: images rpc.py
+
+.PHONY: all images clean
+
+images:
+ $(Q) $(MAKE) -C images all
+
+# rpc_pb2.py doesn't depend on any other file, so
+# it is safe to rename it, dropping ugly _pb2 suffix.
+rpc.py:
+ $(Q) protoc -I=$(SRC_DIR)/images/ --python_out=./ $(SRC_DIR)/images/$(@:.py=.proto)
+ $(Q) mv $(@:.py=_pb2.py) $@
+
+clean:
+ $(Q) $(MAKE) -C images clean
+ $(Q) $(RM) rpc.py *.pyc
diff --git a/lib/pycriu/__init__.py b/lib/pycriu/__init__.py
new file mode 100644
index 000000000000..7de62838cf2f
--- /dev/null
+++ b/lib/pycriu/__init__.py
@@ -0,0 +1,3 @@
+import rpc
+import images
+from criu import *
diff --git a/lib/pycriu/criu.py b/lib/pycriu/criu.py
new file mode 100644
index 000000000000..84dcefed25e1
--- /dev/null
+++ b/lib/pycriu/criu.py
@@ -0,0 +1,282 @@
+# Same as libcriu for C.
+
+import socket
+import errno
+import subprocess
+import fcntl
+import os
+import signal
+import sys
+import struct
+
+import rpc
+
+class _criu_comm:
+ """
+ Base class for communication classes.
+ """
+ COMM_SK = 0
+ COMM_FD = 1
+ COMM_BIN = 2
+ comm_type = None
+ comm = None
+ sk = None
+
+ def connect(self, daemon):
+ """
+ Connect to criu and return socket object.
+ daemon -- is for whether or not criu should daemonize if executing criu from binary(comm_bin).
+ """
+ pass
+
+ def disconnect(self):
+ """
+ Disconnect from criu.
+ """
+ pass
+
+
+class _criu_comm_sk(_criu_comm):
+ """
+ Communication class for unix socket.
+ """
+ def __init__(self, sk_path):
+ self.comm_type = self.COMM_SK
+ self.comm = sk_path
+
+ def connect(self, daemon):
+ self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
+ self.sk.connect(self.comm)
+
+ return self.sk
+
+ def disconnect(self):
+ self.sk.close()
+
+
+class _criu_comm_fd(_criu_comm):
+ """
+ Commnunication class for file descriptor.
+ """
+ def __init__(self, fd):
+ self.comm_type = self.COMM_FD
+ self.comm = fd
+
+ def connect(self, daemon):
+ self.sk = socket.fromfd(self.comm, socket.AF_UNIX, socket.SOCK_SEQPACKET)
+
+ return self.sk
+
+ def disconnect(self):
+ self.sk.close()
+
+class _criu_comm_bin(_criu_comm):
+ """
+ Communication class for binary.
+ """
+ def __init__(self, bin_path):
+ self.comm_type = self.COMM_BIN
+ self.comm = bin_path
+ self.swrk = None
+ self.daemon = None
+
+ def connect(self, daemon):
+ # Kind of the same thing we do in libcriu
+ css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
+ flags = fcntl.fcntl(css[1], fcntl.F_GETFD)
+ fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+ self.daemon = daemon
+
+ p = os.fork()
+
+ if p == 0:
+ def exec_criu():
+ os.close(0)
+ os.close(1)
+ os.close(2)
+
+ css[0].send(struct.pack('i', os.getpid()))
+ os.execv(self.comm, [self.comm, 'swrk', "%d" % css[0].fileno()])
+ os._exit(1)
+
+ if daemon:
+ # Python has no daemon(3) alternative,
+ # so we need to mimic it ourself.
+ p = os.fork()
+
+ if p == 0:
+ os.setsid()
+
+ exec_criu()
+ else:
+ os._exit(0)
+ else:
+ exec_criu()
+
+ css[0].close()
+ self.swrk = struct.unpack('i', css[1].recv(4))[0]
+ self.sk = css[1]
+
+ return self.sk
+
+ def disconnect(self):
+ self.sk.close()
+ if not self.daemon:
+ os.waitpid(self.swrk, 0)
+
+
+class CRIUException(Exception):
+ """
+ Exception class for handling and storing criu errors.
+ """
+ typ = None
+ _str = None
+
+ def __str__(self):
+ return self._str
+
+
+class CRIUExceptionInternal(CRIUException):
+ """
+ Exception class for handling and storing internal errors.
+ """
+ def __init__(self, typ, s):
+ self.typ = typ
+ self._str = "%s failed with internal error: %s" % (rpc.criu_req_type.Name(self.typ), s)
+
+
+class CRIUExceptionExternal(CRIUException):
+ """
+ Exception class for handling and storing criu RPC errors.
+ """
+
+ def __init__(self, req_typ, resp_typ, errno):
+ self.typ = req_typ
+ self.resp_typ = resp_typ
+ self.errno = errno
+ self._str = self._gen_error_str()
+
+ def _gen_error_str(self):
+ s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), )
+
+ if self.typ != self.resp_typ:
+ s += "Unxecpected response type %d: " % (self.resp_typ, )
+
+ s += "Error(%d): " % (self.errno, )
+
+ if self.errno == errno.EBADRQC:
+ s += "Bad options"
+
+ if self.typ == rpc.DUMP:
+ if self.errno == errno.ESRCH:
+ s += "No process with such pid"
+
+ if self.typ == rpc.RESTORE:
+ if self.errno == errno.EEXIST:
+ s += "Process with requested pid already exists"
+
+ s += "Unknown"
+
+ return s
+
+
+class criu:
+ """
+ Call criu through RPC.
+ """
+ opts = None #CRIU options in pb format
+
+ _comm = None #Communication method
+
+ def __init__(self):
+ self.use_binary('criu')
+ self.opts = rpc.criu_opts()
+
+ def use_sk(self, sk_name):
+ """
+ Access criu using unix socket which that belongs to criu service daemon.
+ """
+ self._comm = _criu_comm_sk(sk_name)
+
+ def use_fd(self, fd):
+ """
+ Access criu using provided fd.
+ """
+ self._comm = _criu_comm_fd(fd)
+
+ def use_binary(self, bin_name):
+ """
+ Access criu by execing it using provided path to criu binary.
+ """
+ self._comm = _criu_comm_bin(bin_name)
+
+ def _send_req_and_recv_resp(self, req):
+ """
+ As simple as send request and receive response.
+ """
+ # In case of self-dump we need to spawn criu swrk detached
+ # from our current process, as criu has a hard time separating
+ # process resources from its own if criu is located in a same
+ # process tree it is trying to dump.
+ daemon = False
+ if req.type == rpc.DUMP and not req.opts.HasField('pid'):
+ daemon = True
+
+ try:
+ s = self._comm.connect(daemon)
+
+ s.send(req.SerializeToString())
+
+ buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK)))
+
+ self._comm.disconnect()
+
+ resp = rpc.criu_resp()
+ resp.ParseFromString(buf)
+ except Exception as e:
+ raise CRIUExceptionInternal(req.type, str(e))
+
+ return resp
+
+ def check(self):
+ """
+ Checks whether the kernel support is up-to-date.
+ """
+ req = rpc.criu_req()
+ req.type = rpc.CHECK
+
+ resp = self._send_req_and_recv_resp(req)
+
+ if not resp.success:
+ raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
+
+ def dump(self):
+ """
+ Checkpoint a process/tree identified by opts.pid.
+ """
+ req = rpc.criu_req()
+ req.type = rpc.DUMP
+ req.opts.MergeFrom(self.opts)
+
+ resp = self._send_req_and_recv_resp(req)
+
+ if not resp.success:
+ raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
+
+ return resp.dump
+
+ def restore(self):
+ """
+ Restore a process/tree.
+ """
+ req = rpc.criu_req()
+ req.type = rpc.RESTORE
+ req.opts.MergeFrom(self.opts)
+
+ resp = self._send_req_and_recv_resp(req)
+
+ if not resp.success:
+ raise CRIUExceptionExternal(req.type, resp.type, resp.errno)
+
+ return resp.restore
diff --git a/lib/pycriu/images/.gitignore b/lib/pycriu/images/.gitignore
new file mode 100644
index 000000000000..234bfe9f6f33
--- /dev/null
+++ b/lib/pycriu/images/.gitignore
@@ -0,0 +1,4 @@
+*.pyc
+*_pb2.py
+magic.py
+pb.py
diff --git a/lib/pycriu/images/Makefile b/lib/pycriu/images/Makefile
new file mode 100644
index 000000000000..c8a748e5c02c
--- /dev/null
+++ b/lib/pycriu/images/Makefile
@@ -0,0 +1,25 @@
+all: pb.py images magic.py
+
+.PHONY: all images clean pb.py
+
+proto := $(filter-out $(SRC_DIR)/images/rpc.proto, $(sort $(wildcard $(SRC_DIR)/images/*.proto)))
+proto-py-modules := $(foreach m,$(proto),$(subst -,_,$(notdir $(m:.proto=_pb2))))
+
+# We don't need rpc_pb2.py here, as it is not related to the images.
+# Unfortunately, we can't drop ugly _pb2 suffixes here, because
+# some _pb2 files depend on others _pb2 files.
+images:
+ $(Q) protoc -I=$(SRC_DIR)/images -I=/usr/include/ --python_out=./ $(proto)
+
+magic.py: $(SRC_DIR)/scripts/magic-gen.py $(SRC_DIR)/criu/include/magic.h
+ $(call msg-gen, $@)
+ $(Q) python $^ $@
+
+pb.py: images
+ $(Q) echo "# Autogenerated. Do not edit!" > $@
+ $(Q) for m in $(proto-py-modules); do \
+ echo "from $$m import *" >> $@ ;\
+ done
+
+clean:
+ $(Q) $(RM) ./*_pb2.py ./*.pyc magic.py pb.py
diff --git a/lib/pycriu/images/__init__.py b/lib/pycriu/images/__init__.py
new file mode 100644
index 000000000000..379943b977e1
--- /dev/null
+++ b/lib/pycriu/images/__init__.py
@@ -0,0 +1,3 @@
+from magic import *
+from images import *
+from pb import *
diff --git a/lib/pycriu/images/images.py b/lib/pycriu/images/images.py
new file mode 100644
index 000000000000..d4e883f11006
--- /dev/null
+++ b/lib/pycriu/images/images.py
@@ -0,0 +1,479 @@
+#!/bin/env python
+
+# This file contains methods to deal with criu images.
+#
+# According to http://criu.org/Images, criu images can be described
+# with such IOW:
+#
+# IMAGE_FILE ::= MAGIC { ENTRY }
+# ENTRY ::= SIZE PAYLOAD [ EXTRA ]
+# PAYLOAD ::= "message encoded in ProtocolBuffer format"
+# EXTRA ::= "arbitrary blob, depends on the PAYLOAD contents"
+#
+# MAGIC ::= "32 bit integer"
+# SIZE ::= "32 bit integer, equals the PAYLOAD length"
+#
+# Images v1.1 NOTE: MAGIC now consist of 2 32 bit integers, first one is
+# MAGIC_COMMON or MAGIC_SERVICE and the second one is same as MAGIC
+# in images V1.0. We don't keep "first" magic in json images.
+#
+# In order to convert images to human-readable format, we use dict(json).
+# Using json not only allows us to easily read\write images, but also
+# to use a great variety of tools out there to manipulate them.
+# It also allows us to clearly describe criu images structure.
+#
+# Using dict(json) format, criu images can be described like:
+#
+# {
+# 'magic' : 'FOO',
+# 'entries' : [
+# entry,
+# ...
+# ]
+# }
+#
+# Entry, in its turn, could be described as:
+#
+# {
+# pb_msg,
+# 'extra' : extra_msg
+# }
+#
+import io
+import google
+import struct
+import os
+import sys
+import json
+import pb2dict
+import array
+
+import magic
+from pb import *
+
+#
+# Predefined hardcoded constants
+sizeof_u16 = 2
+sizeof_u32 = 4
+sizeof_u64 = 8
+
+# A helper for rounding
+def round_up(x,y):
+ return (((x - 1) | (y - 1)) + 1)
+
+class MagicException(Exception):
+ def __init__(self, magic):
+ self.magic = magic
+
+# Generic class to handle loading/dumping criu images entries from/to bin
+# format to/from dict(json).
+class entry_handler:
+ """
+ Generic class to handle loading/dumping criu images
+ entries from/to bin format to/from dict(json).
+ """
+ def __init__(self, payload, extra_handler=None):
+ """
+ Sets payload class and extra handler class.
+ """
+ self.payload = payload
+ self.extra_handler = extra_handler
+
+ def load(self, f, pretty = False):
+ """
+ Convert criu image entries from binary format to dict(json).
+ Takes a file-like object and returnes a list with entries in
+ dict(json) format.
+ """
+ entries = []
+
+ while True:
+ entry = {}
+
+ # Read payload
+ pb = self.payload()
+ buf = f.read(4)
+ if buf == '':
+ break
+ size, = struct.unpack('i', buf)
+ pb.ParseFromString(f.read(size))
+ entry = pb2dict.pb2dict(pb, pretty)
+
+ # Read extra
+ if self.extra_handler:
+ entry['extra'] = self.extra_handler.load(f, pb)
+
+ entries.append(entry)
+
+ return entries
+
+ def loads(self, s, pretty = False):
+ """
+ Same as load(), but takes a string as an argument.
+ """
+ f = io.BytesIO(s)
+ return self.load(f, pretty)
+
+ def dump(self, entries, f):
+ """
+ Convert criu image entries from dict(json) format to binary.
+ Takes a list of entries and a file-like object to write entries
+ in binary format to.
+ """
+ for entry in entries:
+ extra = entry.pop('extra', None)
+
+ # Write payload
+ pb = self.payload()
+ pb2dict.dict2pb(entry, pb)
+ pb_str = pb.SerializeToString()
+ size = len(pb_str)
+ f.write(struct.pack('i', size))
+ f.write(pb_str)
+
+ # Write extra
+ if self.extra_handler and extra:
+ self.extra_handler.dump(extra, f, pb)
+
+ def dumps(self, entries):
+ """
+ Same as dump(), but doesn't take file-like object and just
+ returns a string.
+ """
+ f = io.BytesIO('')
+ self.dump(entries, f)
+ return f.read()
+
+ def count(self, f):
+ """
+ Counts the number of top-level object in the image file
+ """
+ entries = 0
+
+ while True:
+ buf = f.read(4)
+ if buf == '':
+ break
+ size, = struct.unpack('i', buf)
+ f.seek(size, 1)
+ entries += 1
+
+ return entries
+
+# Special handler for pagemap.img
+class pagemap_handler:
+ """
+ Special entry handler for pagemap.img, which is unique in a way
+ that it has a header of pagemap_head type followed by entries
+ of pagemap_entry type.
+ """
+ def load(self, f, pretty = False):
+ entries = []
+
+ pb = pagemap_head()
+ while True:
+ buf = f.read(4)
+ if buf == '':
+ break
+ size, = struct.unpack('i', buf)
+ pb.ParseFromString(f.read(size))
+ entries.append(pb2dict.pb2dict(pb, pretty))
+
+ pb = pagemap_entry()
+
+ return entries
+
+ def loads(self, s, pretty = False):
+ f = io.BytesIO(s)
+ return self.load(f, pretty)
+
+ def dump(self, entries, f):
+ pb = pagemap_head()
+ for item in entries:
+ pb2dict.dict2pb(item, pb)
+ pb_str = pb.SerializeToString()
+ size = len(pb_str)
+ f.write(struct.pack('i', size))
+ f.write(pb_str)
+
+ pb = pagemap_entry()
+
+ def dumps(self, entries):
+ f = io.BytesIO('')
+ self.dump(entries, f)
+ return f.read()
+
+ def count(self, f):
+ return entry_handler(None).count(f) - 1
+
+
+# In following extra handlers we use base64 encoding
+# to store binary data. Even though, the nature
+# of base64 is that it increases the total size,
+# it doesn't really matter, because our images
+# do not store big amounts of binary data. They
+# are negligible comparing to pages size.
+class pipes_data_extra_handler:
+ def load(self, f, pload):
+ size = pload.bytes
+ data = f.read(size)
+ return data.encode('base64')
+
+ def dump(self, extra, f, pload):
+ data = extra.decode('base64')
+ f.write(data)
+
+class sk_queues_extra_handler:
+ def load(self, f, pload):
+ size = pload.length
+ data = f.read(size)
+ return data.encode('base64')
+
+ def dump(self, extra, f, pb):
+ data = extra.decode('base64')
+ f.write(data)
+
+class ghost_file_extra_handler:
+ def load(self, f, pb):
+ data = f.read()
+ return data.encode('base64')
+
+ def dump(self, extra, f, pb):
+ data = extra.decode('base64')
+ f.write(data)
+
+class tcp_stream_extra_handler:
+ def load(self, f, pb):
+ d = {}
+
+ inq = f.read(pb.inq_len)
+ outq = f.read(pb.outq_len)
+
+ d['inq'] = inq.encode('base64')
+ d['outq'] = outq.encode('base64')
+
+ return d
+
+ def dump(self, extra, f, pb):
+ inq = extra['inq'].decode('base64')
+ outq = extra['outq'].decode('base64')
+
+ f.write(inq)
+ f.write(outq)
+
+class ipc_sem_set_handler:
+ def load(self, f, pb):
+ entry = pb2dict.pb2dict(pb)
+ size = sizeof_u16 * entry['nsems']
+ rounded = round_up(size, sizeof_u64)
+ s = array.array('H')
+ if s.itemsize != sizeof_u16:
+ raise Exception("Array size mismatch")
+ s.fromstring(f.read(size))
+ f.seek(rounded - size, 1)
+ return s.tolist()
+
+ def dump(self, extra, f, pb):
+ entry = pb2dict.pb2dict(pb)
+ size = sizeof_u16 * entry['nsems']
+ rounded = round_up(size, sizeof_u64)
+ s = array.array('H')
+ if s.itemsize != sizeof_u16:
+ raise Exception("Array size mismatch")
+ s.fromlist(extra)
+ if len(s) != entry['nsems']:
+ raise Exception("Number of semaphores mismatch")
+ f.write(s.tostring())
+ f.write('\0' * (rounded - size))
+
+class ipc_msg_queue_handler:
+ def load(self, f, pb):
+ entry = pb2dict.pb2dict(pb)
+ messages = []
+ for x in range (0, entry['qnum']):
+ buf = f.read(4)
+ if buf == '':
+ break
+ size, = struct.unpack('i', buf)
+ msg = ipc_msg()
+ msg.ParseFromString(f.read(size))
+ rounded = round_up(msg.msize, sizeof_u64)
+ data = f.read(msg.msize)
+ f.seek(rounded - msg.msize, 1)
+ messages.append(pb2dict.pb2dict(msg))
+ messages.append(data.encode('base64'))
+ return messages
+
+ def dump(self, extra, f, pb):
+ entry = pb2dict.pb2dict(pb)
+ for i in range (0, len(extra), 2):
+ msg = ipc_msg()
+ pb2dict.dict2pb(extra[i], msg)
+ msg_str = msg.SerializeToString()
+ size = len(msg_str)
+ f.write(struct.pack('i', size))
+ f.write(msg_str)
+ rounded = round_up(msg.msize, sizeof_u64)
+ data = extra[i + 1].decode('base64')
+ f.write(data[:msg.msize])
+ f.write('\0' * (rounded - msg.msize))
+
+class ipc_shm_handler:
+ def load(self, f, pb):
+ entry = pb2dict.pb2dict(pb)
+ size = entry['size']
+ data = f.read(size)
+ rounded = round_up(size, sizeof_u32)
+ f.seek(rounded - size, 1)
+ return data.encode('base64')
+
+ def dump(self, extra, f, pb):
+ entry = pb2dict.pb2dict(pb)
+ size = entry['size']
+ data = extra.decode('base64')
+ rounded = round_up(size, sizeof_u32)
+ f.write(data[:size])
+ f.write('\0' * (rounded - size))
+
+handlers = {
+ 'INVENTORY' : entry_handler(inventory_entry),
+ 'CORE' : entry_handler(core_entry),
+ 'IDS' : entry_handler(task_kobj_ids_entry),
+ 'CREDS' : entry_handler(creds_entry),
+ 'UTSNS' : entry_handler(utsns_entry),
+ 'IPC_VAR' : entry_handler(ipc_var_entry),
+ 'FS' : entry_handler(fs_entry),
+ 'GHOST_FILE' : entry_handler(ghost_file_entry, ghost_file_extra_handler()),
+ 'MM' : entry_handler(mm_entry),
+ 'CGROUP' : entry_handler(cgroup_entry),
+ 'TCP_STREAM' : entry_handler(tcp_stream_entry, tcp_stream_extra_handler()),
+ 'STATS' : entry_handler(stats_entry),
+ 'PAGEMAP' : pagemap_handler(), # Special one
+ 'PSTREE' : entry_handler(pstree_entry),
+ 'REG_FILES' : entry_handler(reg_file_entry),
+ 'NS_FILES' : entry_handler(ns_file_entry),
+ 'EVENTFD_FILE' : entry_handler(eventfd_file_entry),
+ 'EVENTPOLL_FILE' : entry_handler(eventpoll_file_entry),
+ 'EVENTPOLL_TFD' : entry_handler(eventpoll_tfd_entry),
+ 'SIGNALFD' : entry_handler(signalfd_entry),
+ 'TIMERFD' : entry_handler(timerfd_entry),
+ 'INOTIFY_FILE' : entry_handler(inotify_file_entry),
+ 'INOTIFY_WD' : entry_handler(inotify_wd_entry),
+ 'FANOTIFY_FILE' : entry_handler(fanotify_file_entry),
+ 'FANOTIFY_MARK' : entry_handler(fanotify_mark_entry),
+ 'VMAS' : entry_handler(vma_entry),
+ 'PIPES' : entry_handler(pipe_entry),
+ 'FIFO' : entry_handler(fifo_entry),
+ 'SIGACT' : entry_handler(sa_entry),
+ 'NETLINK_SK' : entry_handler(netlink_sk_entry),
+ 'REMAP_FPATH' : entry_handler(remap_file_path_entry),
+ 'MNTS' : entry_handler(mnt_entry),
+ 'TTY_FILES' : entry_handler(tty_file_entry),
+ 'TTY_INFO' : entry_handler(tty_info_entry),
+ 'RLIMIT' : entry_handler(rlimit_entry),
+ 'TUNFILE' : entry_handler(tunfile_entry),
+ 'EXT_FILES' : entry_handler(ext_file_entry),
+ 'IRMAP_CACHE' : entry_handler(irmap_cache_entry),
+ 'FILE_LOCKS' : entry_handler(file_lock_entry),
+ 'FDINFO' : entry_handler(fdinfo_entry),
+ 'UNIXSK' : entry_handler(unix_sk_entry),
+ 'INETSK' : entry_handler(inet_sk_entry),
+ 'PACKETSK' : entry_handler(packet_sock_entry),
+ 'ITIMERS' : entry_handler(itimer_entry),
+ 'POSIX_TIMERS' : entry_handler(posix_timer_entry),
+ 'NETDEV' : entry_handler(net_device_entry),
+ 'PIPES_DATA' : entry_handler(pipe_data_entry, pipes_data_extra_handler()),
+ 'FIFO_DATA' : entry_handler(pipe_data_entry, pipes_data_extra_handler()),
+ 'SK_QUEUES' : entry_handler(sk_packet_entry, sk_queues_extra_handler()),
+ 'IPCNS_SHM' : entry_handler(ipc_shm_entry, ipc_shm_handler()),
+ 'IPCNS_SEM' : entry_handler(ipc_sem_entry, ipc_sem_set_handler()),
+ 'IPCNS_MSG' : entry_handler(ipc_msg_entry, ipc_msg_queue_handler()),
+ 'NETNS' : entry_handler(netns_entry),
+ 'USERNS' : entry_handler(userns_entry),
+ 'SECCOMP' : entry_handler(seccomp_entry),
+ }
+
+def __rhandler(f):
+ # Images v1.1 NOTE: First read "first" magic.
+ img_magic, = struct.unpack('i', f.read(4))
+ if img_magic in (magic.by_name['IMG_COMMON'], magic.by_name['IMG_SERVICE']):
+ img_magic, = struct.unpack('i', f.read(4))
+
+ try:
+ m = magic.by_val[img_magic]
+ except:
+ raise MagicException(img_magic)
+
+ try:
+ handler = handlers[m]
+ except:
+ raise Exception("No handler found for image with magic " + m)
+
+ return m, handler
+
+def load(f, pretty = False):
+ """
+ Convert criu image from binary format to dict(json).
+ Takes a file-like object to read criu image from.
+ Returns criu image in dict(json) format.
+ """
+ image = {}
+
+ m, handler = __rhandler(f)
+
+ image['magic'] = m
+ image['entries'] = handler.load(f, pretty)
+
+ return image
+
+def info(f):
+ res = {}
+
+ m, handler = __rhandler(f)
+
+ res['magic'] = m
+ res['count'] = handler.count(f)
+
+ return res
+
+def loads(s, pretty = False):
+ """
+ Same as load(), but takes a string.
+ """
+ f = io.BytesIO(s)
+ return load(f, pretty)
+
+def dump(img, f):
+ """
+ Convert criu image from dict(json) format to binary.
+ Takes an image in dict(json) format and file-like
+ object to write to.
+ """
+ m = img['magic']
+ magic_val = magic.by_name[img['magic']]
+
+ # Images v1.1 NOTE: use "second" magic to identify what "first"
+ # should be written.
+ if m != 'INVENTORY':
+ if m in ('STATS', 'IRMAP_CACHE'):
+ f.write(struct.pack('i', magic.by_name['IMG_SERVICE']))
+ else:
+ f.write(struct.pack('i', magic.by_name['IMG_COMMON']))
+
+ f.write(struct.pack('i', magic_val))
+
+ try:
+ handler = handlers[m]
+ except:
+ raise Exception("No handler found for image with such magic")
+
+ handler.dump(img['entries'], f)
+
+def dumps(img):
+ """
+ Same as dump(), but takes only an image and returns
+ a string.
+ """
+ f = io.BytesIO('')
+ dump(img, f)
+ return f.getvalue()
diff --git a/lib/pycriu/images/pb2dict.py b/lib/pycriu/images/pb2dict.py
new file mode 100644
index 000000000000..177cda3154a3
--- /dev/null
+++ b/lib/pycriu/images/pb2dict.py
@@ -0,0 +1,276 @@
+from google.protobuf.descriptor import FieldDescriptor as FD
+import opts_pb2
+import ipaddr
+import socket
+import collections
+import os
+
+# pb2dict and dict2pb are methods to convert pb to/from dict.
+# Inspired by:
+# protobuf-to-dict - https://github.com/benhodgson/protobuf-to-dict
+# protobuf-json - https://code.google.com/p/protobuf-json/
+# protobuf source - https://code.google.com/p/protobuf/
+# Both protobuf-to-dict/json do not fit here because of several reasons,
+# here are some of them:
+# - both have a common bug in treating optional field with empty
+# repeated inside.
+# - protobuf-to-json is not avalible in pip or in any other python
+# repo, so it is hard to distribute and we can't rely on it.
+# - both do not treat enums in a way we would like to. They convert
+# protobuf enum to int, but we need a string here, because it is
+# much more informative. BTW, protobuf text_format converts pb
+# enums to string value too. (i.e. "march : x86_64" is better then
+# "march : 1").
+
+
+_basic_cast = {
+ FD.TYPE_FIXED64 : long,
+ FD.TYPE_FIXED32 : int,
+ FD.TYPE_SFIXED64 : long,
+ FD.TYPE_SFIXED32 : int,
+
+ FD.TYPE_INT64 : long,
+ FD.TYPE_UINT64 : long,
+ FD.TYPE_SINT64 : long,
+
+ FD.TYPE_INT32 : int,
+ FD.TYPE_UINT32 : int,
+ FD.TYPE_SINT32 : int,
+
+ FD.TYPE_BOOL : bool,
+
+ FD.TYPE_STRING : unicode
+}
+
+def _marked_as_hex(field):
+ return field.GetOptions().Extensions[opts_pb2.criu].hex
+
+def _marked_as_ip(field):
+ return field.GetOptions().Extensions[opts_pb2.criu].ipadd
+
+def _marked_as_flags(field):
+ return field.GetOptions().Extensions[opts_pb2.criu].flags
+
+def _marked_as_dev(field):
+ return field.GetOptions().Extensions[opts_pb2.criu].dev
+
+def _marked_as_odev(field):
+ return field.GetOptions().Extensions[opts_pb2.criu].odev
+
+mmap_prot_map = [
+ ('PROT_READ', 0x1),
+ ('PROT_WRITE', 0x2),
+ ('PROT_EXEC', 0x4),
+];
+
+mmap_flags_map = [
+ ('MAP_SHARED', 0x1),
+ ('MAP_PRIVATE', 0x2),
+ ('MAP_ANON', 0x20),
+ ('MAP_GROWSDOWN', 0x0100),
+];
+
+mmap_status_map = [
+ ('VMA_AREA_NONE', 0 << 0),
+ ('VMA_AREA_REGULAR', 1 << 0),
+ ('VMA_AREA_STACK', 1 << 1),
+ ('VMA_AREA_VSYSCALL', 1 << 2),
+ ('VMA_AREA_VDSO', 1 << 3),
+ ('VMA_AREA_HEAP', 1 << 5),
+
+ ('VMA_FILE_PRIVATE', 1 << 6),
+ ('VMA_FILE_SHARED', 1 << 7),
+ ('VMA_ANON_SHARED', 1 << 8),
+ ('VMA_ANON_PRIVATE', 1 << 9),
+
+ ('VMA_AREA_SYSVIPC', 1 << 10),
+ ('VMA_AREA_SOCKET', 1 << 11),
+ ('VMA_AREA_VVAR', 1 << 12),
+ ('VMA_AREA_AIORING', 1 << 13),
+
+ ('VMA_UNSUPP', 1 << 31),
+];
+
+rfile_flags_map = [
+ ('O_WRONLY', 01),
+ ('O_RDWR', 02),
+ ('O_APPEND', 02000),
+ ('O_DIRECT', 040000),
+ ('O_LARGEFILE', 0100000),
+];
+
+flags_maps = {
+ 'mmap.prot' : mmap_prot_map,
+ 'mmap.flags' : mmap_flags_map,
+ 'mmap.status' : mmap_status_map,
+ 'rfile.flags' : rfile_flags_map,
+}
+
+def map_flags(value, flags_map):
+ bs = map(lambda x: x[0], filter(lambda x: value & x[1], flags_map))
+ value &= ~sum(map(lambda x: x[1], flags_map))
+ if value:
+ bs.append("0x%x" % value)
+ return " | ".join(bs)
+
+def unmap_flags(value, flags_map):
+ if value == '':
+ return 0
+
+ bd = dict(flags_map)
+ return sum(map(lambda x: int(str(bd.get(x, x)), 0), map(lambda x: x.strip(), value.split('|'))))
+
+kern_minorbits = 20 # This is how kernel encodes dev_t in new format
+
+def decode_dev(field, value):
+ if _marked_as_odev(field):
+ return "%d:%d" % (os.major(value), os.minor(value))
+ else:
+ return "%d:%d" % (value >> kern_minorbits, value & ((1 << kern_minorbits) - 1))
+
+def encode_dev(field, value):
+ dev = map(lambda x: int(x), value.split(':'))
+ if _marked_as_odev(field):
+ return os.makedev(dev[0], dev[1])
+ else:
+ return dev[0] << kern_minorbits | dev[1]
+
+def is_string(value):
+ return isinstance(value, unicode) or isinstance(value, str)
+
+def _pb2dict_cast(field, value, pretty = False, is_hex = False):
+ if not is_hex:
+ is_hex = _marked_as_hex(field)
+
+ if field.type == FD.TYPE_MESSAGE:
+ return pb2dict(value, pretty, is_hex)
+ elif field.type == FD.TYPE_BYTES:
+ return value.encode('base64')
+ elif field.type == FD.TYPE_ENUM:
+ return field.enum_type.values_by_number.get(value, None).name
+ elif field.type in _basic_cast:
+ cast = _basic_cast[field.type]
+ if pretty and (cast == int or cast == long):
+ if is_hex:
+ # Fields that have (criu).hex = true option set
+ # should be stored in hex string format.
+ return "0x%x" % value
+
+ if _marked_as_dev(field):
+ return decode_dev(field, value)
+
+ flags = _marked_as_flags(field)
+ if flags:
+ try:
+ flags_map = flags_maps[flags]
+ except:
+ return "0x%x" % value # flags are better seen as hex anyway
+ else:
+ return map_flags(value, flags_map)
+
+ return cast(value)
+ else:
+ raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
+
+def pb2dict(pb, pretty = False, is_hex = False):
+ """
+ Convert protobuf msg to dictionary.
+ Takes a protobuf message and returns a dict.
+ """
+ d = collections.OrderedDict() if pretty else {}
+ for field, value in pb.ListFields():
+ if field.label == FD.LABEL_REPEATED:
+ d_val = []
+ if pretty and _marked_as_ip(field):
+ if len(value) == 1:
+ v = socket.ntohl(value[0])
+ addr = ipaddr.IPv4Address(v)
+ else:
+ v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \
+ (socket.ntohl(value[1]) << (32 * 2)) + \
+ (socket.ntohl(value[2]) << (32 * 1)) + \
+ (socket.ntohl(value[3]))
+ addr = ipaddr.IPv6Address(v)
+
+ d_val.append(addr.compressed)
+ else:
+ for v in value:
+ d_val.append(_pb2dict_cast(field, v, pretty, is_hex))
+ else:
+ d_val = _pb2dict_cast(field, value, pretty, is_hex)
+
+ d[field.name] = d_val
+ return d
+
+def _dict2pb_cast(field, value):
+ # Not considering TYPE_MESSAGE here, as repeated
+ # and non-repeated messages need special treatment
+ # in this case, and are hadled separately.
+ if field.type == FD.TYPE_BYTES:
+ return value.decode('base64')
+ elif field.type == FD.TYPE_ENUM:
+ return field.enum_type.values_by_name.get(value, None).number
+ elif field.type in _basic_cast:
+ cast = _basic_cast[field.type]
+ if (cast == int or cast == long) and is_string(value):
+ if _marked_as_dev(field):
+ return encode_dev(field, value)
+
+ flags = _marked_as_flags(field)
+ if flags:
+ try:
+ flags_map = flags_maps[flags]
+ except:
+ pass # Try to use plain string cast
+ else:
+ return unmap_flags(value, flags_map)
+
+ # Some int or long fields might be stored as hex
+ # strings. See _pb2dict_cast.
+ return cast(value, 0)
+ else:
+ return cast(value)
+ else:
+ raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
+
+def dict2pb(d, pb):
+ """
+ Convert dictionary to protobuf msg.
+ Takes dict and protobuf message to be merged into.
+ """
+ for field in pb.DESCRIPTOR.fields:
+ if field.name not in d:
+ continue
+ value = d[field.name]
+ if field.label == FD.LABEL_REPEATED:
+ pb_val = getattr(pb, field.name, None)
+ if is_string(value[0]) and _marked_as_ip(field):
+ val = ipaddr.IPAddress(value[0])
+ if val.version == 4:
+ pb_val.append(socket.htonl(int(val)))
+ elif val.version == 6:
+ ival = int(val)
+ pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF))
+ pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF))
+ pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF))
+ pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF))
+ else:
+ raise Exception("Unknown IP address version %d" % val.version)
+ continue
+
+ for v in value:
+ if field.type == FD.TYPE_MESSAGE:
+ dict2pb(v, pb_val.add())
+ else:
+ pb_val.append(_dict2pb_cast(field, v))
+ else:
+ if field.type == FD.TYPE_MESSAGE:
+ # SetInParent method acts just like has_* = true in C,
+ # and helps to properly treat cases when we have optional
+ # field with empty repeated inside.
+ getattr(pb, field.name).SetInParent()
+
+ dict2pb(value, getattr(pb, field.name, None))
+ else:
+ setattr(pb, field.name, _dict2pb_cast(field, value))
+ return pb
diff --git a/scripts/crit-setup.py b/scripts/crit-setup.py
index 38e3647f1508..fbdc66518e40 100644
--- a/scripts/crit-setup.py
+++ b/scripts/crit-setup.py
@@ -6,6 +6,7 @@ setup(name = "crit",
author = "CRIU team",
author_email = "criu at openvz.org",
url = "https://github.com/xemul/criu",
+ package_dir = {'': "lib"},
packages = ["pycriu", "pycriu.images"],
- scripts = ["crit"]
+ scripts = ["crit/crit"]
)
--
2.5.0
More information about the CRIU
mailing list