[CRIU] [PATCH 02/20] ARM: added ARM-specific files.

alekskartashov at parallels.com alekskartashov at parallels.com
Wed Dec 12 08:34:12 EST 2012


From: Alexander Kartashov <alekskartashov at parallels.com>

Signed-off-by: Alexander Kartashov <alekskartashov at parallels.com>
---
 arch/arm/Makefile                |   42 +++++++++
 arch/arm/Makefile.inc            |    9 ++
 arch/arm/arch-types.h            |  111 +++++++++++++++++++++++
 arch/arm/arch_bitops.h           |   30 ++++++
 arch/arm/arch_cr_dump.h          |  124 +++++++++++++++++++++++++
 arch/arm/arch_parasite.h         |   28 ++++++
 arch/arm/arch_parasite_syscall.h |   77 ++++++++++++++++
 arch/arm/arch_restorer.h         |   24 +++++
 arch/arm/atomic.h                |   51 +++++++++++
 arch/arm/linkage.h               |   24 +++++
 arch/arm/memcpy_64.h             |   20 ++++
 arch/arm/parasite-head.S         |   24 +++++
 arch/arm/processor-flags.h       |    3 +
 arch/arm/restorer_private.h      |  178 ++++++++++++++++++++++++++++++++++++
 arch/arm/syscall-aux.S           |    9 ++
 arch/arm/syscall-aux.h           |    8 ++
 arch/arm/syscall-common.S        |   52 +++++++++++
 arch/arm/uidiv.S                 |  186 ++++++++++++++++++++++++++++++++++++++
 18 files changed, 1000 insertions(+)
 create mode 100644 arch/arm/Makefile
 create mode 100644 arch/arm/Makefile.inc
 create mode 100644 arch/arm/arch-types.h
 create mode 100644 arch/arm/arch_bitops.h
 create mode 100644 arch/arm/arch_cr_dump.h
 create mode 100644 arch/arm/arch_parasite.h
 create mode 100644 arch/arm/arch_parasite_syscall.h
 create mode 100644 arch/arm/arch_restorer.h
 create mode 100644 arch/arm/atomic.h
 create mode 100644 arch/arm/linkage.h
 create mode 100644 arch/arm/memcpy_64.h
 create mode 100644 arch/arm/parasite-head.S
 create mode 100644 arch/arm/processor-flags.h
 create mode 100644 arch/arm/restorer_private.h
 create mode 100644 arch/arm/syscall-aux.S
 create mode 100644 arch/arm/syscall-aux.h
 create mode 100644 arch/arm/syscall-common.S
 create mode 100644 arch/arm/uidiv.S

diff --git a/arch/arm/Makefile b/arch/arm/Makefile
new file mode 100644
index 0000000..4e1a955
--- /dev/null
+++ b/arch/arm/Makefile
@@ -0,0 +1,42 @@
+SYS-DEF		:= $(SRC_DIR)/include/syscall.def
+SYS-ASM-COMMON	:= syscall-common.S
+SYS-TYPES	:= $(SRC_DIR)/include/syscall-types.h
+
+SYS-CODES	:= $(SRC_DIR)/include/syscall-codes.h
+SYS-PROTO	:= $(SRC_DIR)/include/syscall.h
+
+SYS-ASM		:= syscalls.S
+SYS-GEN		:= $(SRC_DIR)/gen-syscalls.pl
+
+SYS-OBJ		:= syscalls.o
+
+CFLAGS		+= -c -fpie -Wstrict-prototypes -Wa,--noexecstack -D__ASSEMBLY__ -nostdlib -fomit-frame-pointer -I$(shell pwd)
+
+.DEFAULT_GOAL	:= arm
+
+$(SYS-ASM): $(SYS-GEN) $(SYS-DEF) $(SYS-ASM-COMMON) $(SYS-TYPES)
+	$(E) "  GEN     " $@
+	$(Q) perl			\
+		$(SYS-GEN)		\
+		$(SYS-DEF)		\
+		$(SYS-CODES)		\
+		$(SYS-PROTO)		\
+		$(SYS-ASM)		\
+		$(SYS-ASM-COMMON)	\
+		$(SYS-TYPES)		\
+		$(ARCH_BITS)
+
+%.o: %.S
+	$(E) "  CC      " $@
+	$(Q) $(CC) $(CFLAGS)  $^ -o $@
+
+arm: $(SYS-OBJ) parasite-head.o
+
+clean:
+	$(E) "  CLEAN SYSCALLS"
+	$(Q) $(RM) -f $(SYS-ASM)
+	$(Q) $(RM) -f $(SYS-CODES)
+	$(Q) $(RM) -f $(SYS-PROTO)
+	$(Q) $(RM) -f $(SYS-OBJ)
+
+.PHONY: clean arm
diff --git a/arch/arm/Makefile.inc b/arch/arm/Makefile.inc
new file mode 100644
index 0000000..5613d87
--- /dev/null
+++ b/arch/arm/Makefile.inc
@@ -0,0 +1,9 @@
+DEFINES   += -DCONFIG_ARM -DCONFIG_HAS_TLS -DARCH_NEED_FP
+ARCH_BITS := 32
+
+CC		:= $(GCC_PREFIX)gcc
+LD		:= $(GCC_PREFIX)ld
+OBJCOPY := $(GCC_PREFIX)objcopy
+LDARCH	:= arm
+
+CFLAGS += -Wno-error=int-to-pointer-cast -Wno-error=pointer-to-int-cast -march=armv7-a
diff --git a/arch/arm/arch-types.h b/arch/arm/arch-types.h
new file mode 100644
index 0000000..ec7259b
--- /dev/null
+++ b/arch/arm/arch-types.h
@@ -0,0 +1,111 @@
+#ifndef ARM_ARCH_H_
+#define ARM_ARCH_H_
+
+#define PRIs PRIu32
+
+/*
+ * Copied from the kernel header arch/arm/include/asm/ptrace.h
+ *
+ * A thread ARM CPU context
+ */
+
+typedef struct {
+        long uregs[18];
+} user_regs_struct_t;
+
+#define ARM_cpsr        uregs[16]
+#define ARM_pc          uregs[15]
+#define ARM_lr          uregs[14]
+#define ARM_sp          uregs[13]
+#define ARM_ip          uregs[12]
+#define ARM_fp          uregs[11]
+#define ARM_r10         uregs[10]
+#define ARM_r9          uregs[9]
+#define ARM_r8          uregs[8]
+#define ARM_r7          uregs[7]
+#define ARM_r6          uregs[6]
+#define ARM_r5          uregs[5]
+#define ARM_r4          uregs[4]
+#define ARM_r3          uregs[3]
+#define ARM_r2          uregs[2]
+#define ARM_r1          uregs[1]
+#define ARM_r0          uregs[0]
+#define ARM_ORIG_r0     uregs[17]
+
+#define REG_IP(regs)  regs.ARM_pc
+#define REG_RES(regs) regs.ARM_r0
+
+
+// Copied from arch/arm/include/asm/user.h
+
+struct user_vfp {
+	unsigned long long fpregs[32];
+	unsigned long fpscr;
+};
+
+struct user_vfp_exc {
+        unsigned long   fpexc;
+	unsigned long   fpinst;
+	unsigned long   fpinst2;
+};
+
+
+/*
+ * PSR bits
+ */
+#define USR26_MODE      0x00000000
+#define FIQ26_MODE      0x00000001
+#define IRQ26_MODE      0x00000002
+#define SVC26_MODE      0x00000003
+#define USR_MODE        0x00000010
+#define FIQ_MODE        0x00000011
+#define IRQ_MODE        0x00000012
+#define SVC_MODE        0x00000013
+#define ABT_MODE        0x00000017
+#define UND_MODE        0x0000001b
+#define SYSTEM_MODE     0x0000001f
+#define MODE32_BIT      0x00000010
+#define MODE_MASK       0x0000001f
+#define PSR_T_BIT       0x00000020
+#define PSR_F_BIT       0x00000040
+#define PSR_I_BIT       0x00000080
+#define PSR_A_BIT       0x00000100
+#define PSR_E_BIT       0x00000200
+#define PSR_J_BIT       0x01000000
+#define PSR_Q_BIT       0x08000000
+#define PSR_V_BIT       0x10000000
+#define PSR_C_BIT       0x20000000
+#define PSR_Z_BIT       0x40000000
+#define PSR_N_BIT       0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f           0xff000000      /* Flags                */
+#define PSR_s           0x00ff0000      /* Status               */
+#define PSR_x           0x0000ff00      /* Extension            */
+#define PSR_c           0x000000ff      /* Control              */
+
+#define _NSIG_BPW 32
+#define TASK_SIZE 0x7f000000
+
+#define CORE_ENTRY__MARCH CORE_ENTRY__MARCH__ARM
+
+#define CORE_THREAD_INFO(core) core->ti_arm
+#define CORE_GPREGS(core) (core->ti_arm->gpregs)
+#define CORE_TLS(core) (core)->ti_arm->tls
+
+#define AT_VECTOR_SIZE 20
+
+typedef uint64_t auxv_t;
+
+#define SIGFRAME_OFFSET 0
+
+#define UserRegsEntry    UserArmRegsEntry
+
+typedef struct {
+	struct user_vfp     vfp;
+	struct user_vfp_exc vfp_exc;
+} UserFPState;
+
+#endif
diff --git a/arch/arm/arch_bitops.h b/arch/arm/arch_bitops.h
new file mode 100644
index 0000000..27ca4b3
--- /dev/null
+++ b/arch/arm/arch_bitops.h
@@ -0,0 +1,30 @@
+#ifndef ARM_BITOPS_H_
+#define ARM_BITOPS_H_
+
+static inline void set_bit(int nr, volatile unsigned long *addr) {
+	*addr |= (1 << nr);
+}
+
+static inline int test_bit(int nr, volatile const unsigned long *addr) {
+	return (*addr & (1 << nr)) ? -1 : 0;
+}
+
+static inline void clear_bit(int nr, volatile unsigned long *addr) {
+	*addr &= ~(1 << nr);
+}
+
+static inline unsigned long __ffs(unsigned long word) {
+	int p = 0;
+
+	for (; p < 8*sizeof(word); ++p) {
+		if (word & 1) {
+			break;
+		}
+
+		word <<= 1;
+	}
+
+	return p;
+}
+
+#endif
diff --git a/arch/arm/arch_cr_dump.h b/arch/arm/arch_cr_dump.h
new file mode 100644
index 0000000..f8a09d6
--- /dev/null
+++ b/arch/arm/arch_cr_dump.h
@@ -0,0 +1,124 @@
+#ifndef CR_DUMP_ARM_H_
+#define CR_DUMP_ARM_H_
+
+#define TI_SP(core) ((core)->ti_arm->gpregs->sp)
+
+#include "../protobuf/core.pb-c.h"
+
+#define assign_reg(dst, src, e)		dst->e = (__typeof__(dst->e))src.ARM_##e
+
+static int get_task_regs(pid_t pid, CoreEntry *core, const struct parasite_ctl *ctl)
+{
+	user_regs_struct_t regs = {{-1}};
+	struct user_vfp vfp;
+
+	int ret = -1;
+
+	pr_info("Dumping GP/FPU registers ... ");
+
+	if (ctl)
+		regs = ctl->regs_orig;
+	else {
+		if (ptrace(PTRACE_GETREGS, pid, NULL, &regs)) {
+			pr_err("Can't obtain GP registers for %d\n", pid);
+			goto err;
+		}
+	}
+
+	if (ptrace(PTRACE_GETFPREGS, pid, NULL, &vfp)) {
+		pr_err("Can't obtain FPU registers for %d\n", pid);
+		goto err;
+	}
+
+	/* Did we come from a system call? */
+	if ((int)regs.ARM_ORIG_r0 >= 0) {
+		/* Restart the system call */
+		switch ((long)(int)regs.ARM_r0) {
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			regs.ARM_r0 = regs.ARM_ORIG_r0;
+			regs.ARM_pc -= 4;
+			break;
+		case -ERESTART_RESTARTBLOCK:
+			regs.ARM_r0 = __NR_restart_syscall;
+			regs.ARM_pc -= 4;
+			break;
+		}
+	}
+
+
+	// Save the ARM CPU state
+
+	assign_reg(core->ti_arm->gpregs, regs, r0);
+	assign_reg(core->ti_arm->gpregs, regs, r1);
+	assign_reg(core->ti_arm->gpregs, regs, r2);
+	assign_reg(core->ti_arm->gpregs, regs, r3);
+	assign_reg(core->ti_arm->gpregs, regs, r4);
+	assign_reg(core->ti_arm->gpregs, regs, r5);
+	assign_reg(core->ti_arm->gpregs, regs, r6);
+	assign_reg(core->ti_arm->gpregs, regs, r7);
+	assign_reg(core->ti_arm->gpregs, regs, r8);
+	assign_reg(core->ti_arm->gpregs, regs, r9);
+	assign_reg(core->ti_arm->gpregs, regs, r10);
+	assign_reg(core->ti_arm->gpregs, regs, fp);
+	assign_reg(core->ti_arm->gpregs, regs, ip);
+	assign_reg(core->ti_arm->gpregs, regs, sp);
+	assign_reg(core->ti_arm->gpregs, regs, lr);
+	assign_reg(core->ti_arm->gpregs, regs, pc);
+	assign_reg(core->ti_arm->gpregs, regs, cpsr);
+	core->ti_arm->gpregs->orig_r0 = regs.ARM_ORIG_r0;
+
+
+	// Save the VFP state
+
+	memcpy(CORE_THREAD_INFO(core)->fpstate->vfp_regs, &vfp.fpregs, sizeof(vfp.fpregs));
+	CORE_THREAD_INFO(core)->fpstate->fpscr = vfp.fpscr;
+
+	ret = 0;
+
+err:
+	return ret;
+}
+
+static int arch_alloc_thread_info(CoreEntry* core) {
+        ThreadInfoArm *ti_arm;
+        UserArmRegsEntry *gpregs;
+	UserArmVfpstateEntry *fpstate;
+
+        ti_arm = xmalloc(sizeof(*ti_arm));
+        thread_info_arm__init(ti_arm);
+
+        gpregs = xmalloc(sizeof(*gpregs));
+        user_arm_regs_entry__init(gpregs);
+        ti_arm->gpregs = gpregs;
+
+	fpstate = xmalloc(sizeof(*fpstate));
+	user_arm_vfpstate_entry__init(fpstate);
+	fpstate->vfp_regs = xmalloc(32*sizeof(unsigned long long));
+	fpstate->n_vfp_regs = 32;
+	ti_arm->fpstate = fpstate;
+
+        core->ti_arm = ti_arm;
+
+        return 0;
+}
+
+static void core_entry_free(CoreEntry *core)
+{
+	if (core) {
+		if (CORE_THREAD_INFO(core)) {
+			if (CORE_THREAD_INFO(core)->fpstate) {
+				xfree(CORE_THREAD_INFO(core)->fpstate->vfp_regs);
+				xfree(CORE_THREAD_INFO(core)->fpstate);
+			}
+			xfree(CORE_THREAD_INFO(core)->gpregs);
+		}
+		xfree(CORE_THREAD_INFO(core));
+		xfree(core->thread_core);
+		xfree(core->tc);
+		xfree(core->ids);
+	}
+}
+
+#endif
diff --git a/arch/arm/arch_parasite.h b/arch/arm/arch_parasite.h
new file mode 100644
index 0000000..97165bf
--- /dev/null
+++ b/arch/arm/arch_parasite.h
@@ -0,0 +1,28 @@
+#ifndef ARM_PARASITE_H__
+#define ARM_PARASITE_H__
+
+static uint32_t get_tls(void) {
+	uint32_t res;
+
+	asm (
+	     "adr %%r1, kuser_get_tls   \n"
+	     "ldr %%r1, [%%r1]          \n"
+	     "push { %%r7, %%lr }       \n"
+	     "blx %%r1                  \n"
+	     "pop { %%r7, %%lr }        \n"
+	     "mov %0, %%r0              \n"
+	     "b   core_store_tls_done   \n"
+
+	     "kuser_get_tls:            \n"
+	     ".word 0xffff0fe0          \n"
+
+	     "core_store_tls_done:      \n"
+	     :"=r"(res)
+	     :
+	     : "r0", "r1", "memory"
+	     );
+
+	return res;
+}
+
+#endif
diff --git a/arch/arm/arch_parasite_syscall.h b/arch/arm/arch_parasite_syscall.h
new file mode 100644
index 0000000..0f384c7
--- /dev/null
+++ b/arch/arm/arch_parasite_syscall.h
@@ -0,0 +1,77 @@
+#ifndef ARM_PARASITE_SYSCALL_H_
+#define ARM_PARASITE_SYSCALL_H_
+
+#define ARCH_SI_TRAP TRAP_BRKPT
+
+static int __parasite_execute(struct parasite_ctl *ctl, pid_t pid, user_regs_struct_t *regs);
+
+/*
+ * Injected syscall instruction
+ */
+
+static const char code_syscall[] = {
+        0x00, 0x00, 0x00, 0xef,         /* SVC #0  */
+        0xf0, 0x01, 0xf0, 0xe7          /* UDF #32 */
+};
+
+
+/*
+ * The ARM-specific parasite setup
+ */
+
+static void parasite_setup_regs(unsigned long new_ip, user_regs_struct_t *regs)
+{
+	regs->ARM_pc = new_ip;
+
+	/* Avoid end of syscall processing */
+	regs->ARM_ORIG_r0 = -1;
+
+	/* Make sure flags are in known state */
+	regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+}
+
+static void *mmap_seized(struct parasite_ctl *ctl,
+			 void *addr, size_t length, int prot,
+			 int flags, int fd, off_t offset)
+{
+	user_regs_struct_t regs = ctl->regs_orig;
+	void *map = NULL;
+	int ret;
+
+	regs.ARM_r7 = (unsigned long)__NR_mmap2;     /* mmap	*/
+	regs.ARM_r0 = (unsigned long)addr;	     /* @addr	*/
+	regs.ARM_r1 = (unsigned long)length;	     /* @length	*/
+	regs.ARM_r2 = (unsigned long)prot; 	     /* @prot	*/
+	regs.ARM_r3 = (unsigned long)flags;	     /* @flags	*/
+	regs.ARM_r4 = (unsigned long)fd;	     /* @fd	*/
+	regs.ARM_r5 = (unsigned long)(offset >> 12); /* @offset	*/
+	parasite_setup_regs(ctl->syscall_ip, &regs);
+
+	ret = __parasite_execute(ctl, ctl->pid, &regs);
+	if (ret)
+		goto err;
+
+	if ((long)regs.ARM_r0 > 0)
+		map = (void *)regs.ARM_r0;
+err:
+	return map;
+}
+
+static int munmap_seized(struct parasite_ctl *ctl, void *addr, size_t length)
+{
+	user_regs_struct_t regs = ctl->regs_orig;
+	int ret;
+
+	regs.ARM_r7 = (unsigned long)__NR_munmap;	/* mmap		*/
+	regs.ARM_r0 = (unsigned long)addr;		/* @addr	*/
+	regs.ARM_r1 = (unsigned long)length;	        /* @length	*/
+	parasite_setup_regs(ctl->syscall_ip, &regs);
+
+	ret = __parasite_execute(ctl, ctl->pid, &regs);
+	if (!ret)
+		ret = (int)regs.ARM_r0;
+
+	return ret;
+}
+
+#endif
diff --git a/arch/arm/arch_restorer.h b/arch/arm/arch_restorer.h
new file mode 100644
index 0000000..7c8ed71
--- /dev/null
+++ b/arch/arm/arch_restorer.h
@@ -0,0 +1,24 @@
+#ifndef ARM_RESTORER_H_
+#define ARM_RESTORER_H_
+
+// Copied from arch/arm/kernel/signal.c
+
+#define jump_to_restorer_blob(new_sp, restore_task_exec_start,		\
+			      task_args)				\
+	asm volatile(							\
+		     "mov %%sp, %%%0				    \n" \
+		     "mov %%r1, %%%1				    \n" \
+		     "mov %%r0, %%%2				    \n" \
+		     "bx  %%r1				            \n"	\
+		     :							\
+		     : "r"(new_sp),					\
+		       "r"(restore_task_exec_start),			\
+		       "r"(task_args)					\
+		     : "sp", "r0", "r1", "memory")
+
+
+static void get_core_tls(CoreEntry *core, struct task_restore_core_args *args) {
+	args->tls = core->ti_arm->tls;
+}
+
+#endif
diff --git a/arch/arm/atomic.h b/arch/arm/atomic.h
new file mode 100644
index 0000000..0576d4c
--- /dev/null
+++ b/arch/arm/atomic.h
@@ -0,0 +1,51 @@
+#ifndef ARM_ATOMIC_H_
+#define ARM_ATOMIC_H_
+
+typedef struct {
+	u32 counter;
+} atomic_t;
+
+#define atomic_set(mem,v) ((mem)->counter = (v))
+
+#define atomic_get(v)	((v)->counter)
+
+static inline unsigned int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	unsigned int result;
+
+	__asm__ __volatile__("@ atomic_add_return\n"
+"1:	ldrex	%0, [%3]\n"
+"	add	%0, %0, %4\n"
+"	strex	%1, %0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result - i;
+}
+
+static inline unsigned int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	__asm__ __volatile__("@ atomic_sub_return\n"
+"1:	ldrex	%0, [%3]\n"
+"	sub	%0, %0, %4\n"
+"	strex	%1, %0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result + i;
+}
+
+#define atomic_inc(v) (atomic_add_return(1, v))
+#define atomic_dec(v) (atomic_sub_return(1, v))
+
+#endif
diff --git a/arch/arm/linkage.h b/arch/arm/linkage.h
new file mode 100644
index 0000000..c80703a
--- /dev/null
+++ b/arch/arm/linkage.h
@@ -0,0 +1,24 @@
+#ifndef LINKAGE_H_
+#define LINKAGE_H_
+
+#ifdef __ASSEMBLY__
+
+#define __ALIGN		.align 4, 0x00
+#define __ALIGN_STR	".align 4, 0x00"
+
+#define GLOBAL(name)		\
+	.globl name;		\
+	name:
+
+#define ENTRY(name)		\
+	.globl name;		\
+	.type name, #function;	\
+	__ALIGN;		\
+	name:
+
+#define END(sym)		\
+	.size sym, . - sym
+
+#endif  /* __ASSEMBLY__ */
+
+#endif /* LINKAGE_H_ */
diff --git a/arch/arm/memcpy_64.h b/arch/arm/memcpy_64.h
new file mode 100644
index 0000000..3dc8f34
--- /dev/null
+++ b/arch/arm/memcpy_64.h
@@ -0,0 +1,20 @@
+#ifndef __MEMCPY_ARM_H__
+#define __MEMCPY_ARM_H__
+
+#include "compiler.h"
+#include "types.h"
+
+static always_inline void *builtin_memcpy(void *to, const void *from, unsigned int n)
+{
+	int i;
+	unsigned char *cto = to;
+	const unsigned char *cfrom = from;
+
+	for (i = 0; i < n; ++i, ++cto, ++cfrom) {
+		*cto = *cfrom;
+	}
+
+	return to;
+}
+
+#endif
diff --git a/arch/arm/parasite-head.S b/arch/arm/parasite-head.S
new file mode 100644
index 0000000..51e0564
--- /dev/null
+++ b/arch/arm/parasite-head.S
@@ -0,0 +1,24 @@
+#include "linkage.h"
+#include "parasite.h"
+
+	.section .head.text, "ax"
+ENTRY(__export_parasite_head_start)
+	adr	%sp, __export_parasite_stack
+        adr     %r0, __export_parasite_cmd
+	ldr	%r0, [%r0]
+        adr     %r1, __export_parasite_args
+	bl	parasite_service
+        .byte   0xf0, 0x01, 0xf0, 0xe7          @ the instruction UDF #32 generates the signal SIGTRAP in Linux
+
+__export_parasite_cmd:
+	.long 0
+__export_parasite_args:
+	.long 0
+	.space PARASITE_ARG_SIZE,0
+	.space PARASITE_STACK_SIZE,0
+
+        .space 228, 0
+
+__export_parasite_stack:
+	.long 0
+END(__export_parasite_head_start)
diff --git a/arch/arm/processor-flags.h b/arch/arm/processor-flags.h
new file mode 100644
index 0000000..80a5b1c
--- /dev/null
+++ b/arch/arm/processor-flags.h
@@ -0,0 +1,3 @@
+#ifndef ARM_PROCESSOR_FLAGS_H
+#define ARM_PROCESSOR_FLAGS_H
+#endif
diff --git a/arch/arm/restorer_private.h b/arch/arm/restorer_private.h
new file mode 100644
index 0000000..b5c9ecd
--- /dev/null
+++ b/arch/arm/restorer_private.h
@@ -0,0 +1,178 @@
+#ifndef ARM_RESTORER_PRIVATE_H_
+#define ARM_RESTORER_PRIVATE_H_
+
+// Copied from arch/arm/include/asm/sigcontext.h
+
+struct rt_sigcontext {
+	unsigned long trap_no;
+	unsigned long error_code;
+	unsigned long oldmask;
+	unsigned long arm_r0;
+	unsigned long arm_r1;
+	unsigned long arm_r2;
+	unsigned long arm_r3;
+	unsigned long arm_r4;
+	unsigned long arm_r5;
+	unsigned long arm_r6;
+	unsigned long arm_r7;
+	unsigned long arm_r8;
+	unsigned long arm_r9;
+	unsigned long arm_r10;
+	unsigned long arm_fp;
+	unsigned long arm_ip;
+	unsigned long arm_sp;
+	unsigned long arm_lr;
+	unsigned long arm_pc;
+	unsigned long arm_cpsr;
+	unsigned long fault_address;
+};
+
+// Copied from arch/arm/include/asm/ucontext.h
+
+#define VFP_MAGIC               0x56465001
+#define VFP_STORAGE_SIZE        sizeof(struct vfp_sigframe)
+
+struct vfp_sigframe {
+	unsigned long           magic;
+	unsigned long           size;
+	struct user_vfp         ufp;
+	struct user_vfp_exc     ufp_exc;
+};
+
+struct aux_sigframe {
+	/*
+	struct crunch_sigframe  crunch;
+        struct iwmmxt_sigframe  iwmmxt;
+	*/
+
+	struct vfp_sigframe     vfp;
+	unsigned long           end_magic;
+} __attribute__((__aligned__(8)));
+
+#include <sigframe.h>
+
+struct sigframe {
+	struct rt_ucontext uc;
+	unsigned long retcode[2];
+};
+
+struct rt_sigframe {
+	struct rt_siginfo info;
+	struct sigframe sig;
+};
+
+#define RT_SIGFRAME_UC(rt_sigframe) rt_sigframe->sig.uc
+
+#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, 	\
+			     thread_args, clone_restore_fn)		\
+	asm volatile(							\
+		     "clone_emul:				\n"	\
+		     "ldr %%r1, %2				\n"	\
+		     "sub %%r1, #16			        \n"	\
+		     "mov %%r0, %%%6				\n"	\
+		     "str %%r0, [%%r1, #4]			\n"	\
+		     "mov %%r0, %%%5				\n"	\
+		     "str %%r0, [%%r1]			        \n"	\
+		     "mov %%r0, %%%1				\n"	\
+		     "mov %%r2, %%%3				\n"	\
+		     "mov %%r3, %%%4				\n"	\
+		     "mov %%r7, #"__stringify(__NR_clone)"	\n"	\
+		     "svc #0				        \n"	\
+									\
+		     "cmp %%r0, #0			        \n"	\
+		     "beq thread_run				\n"	\
+									\
+		     "mov %%%0, %%r0				\n"	\
+		     "b   clone_end				\n"	\
+									\
+		     "thread_run:				\n"	\
+		     "pop { %%r1 }			        \n"	\
+		     "pop { %%r0 }			        \n"	\
+		     "bx  %%r1				        \n"	\
+									\
+		     "clone_end:				\n"	\
+		     : "=r"(ret)					\
+		     : "r"(clone_flags),				\
+		       "m"(new_sp),					\
+		       "r"(&parent_tid),				\
+		       "r"(&thread_args[i].pid),			\
+		       "r"(clone_restore_fn),				\
+		       "r"(&thread_args[i])				\
+		     : "r0", "r1", "r2", "r3", "memory")
+
+
+#define ARCH_RT_SIGRETURN(new_sp)					\
+	asm volatile(							\
+		     "mov %%sp, %0				    \n"	\
+		     "mov %%r7,  #"__stringify(__NR_rt_sigreturn)"  \n" \
+		     "svc #0					    \n"	\
+		     :							\
+		     : "r"(new_sp)					\
+		     : "sp","memory")
+
+#define ARCH_FAIL_CORE_RESTORE					\
+	asm volatile(						\
+		     "mov %%sp, %0			    \n"	\
+		     "mov %%r0, #0			    \n"	\
+		     "bx  %%r0			            \n"	\
+		     :						\
+		     : "r"(ret)					\
+		     : "memory")
+
+
+static int restore_gpregs(struct rt_sigframe *f, UserArmRegsEntry *r) {
+#define CPREG1(d)       f->sig.uc.uc_mcontext.arm_##d = r->d
+#define CPREG2(d, s)    f->sig.uc.uc_mcontext.arm_##d = r->s
+
+	CPREG1(r0);
+	CPREG1(r1);
+	CPREG1(r2);
+	CPREG1(r3);
+	CPREG1(r4);
+	CPREG1(r5);
+	CPREG1(r6);
+	CPREG1(r7);
+	CPREG1(r8);
+	CPREG1(r9);
+	CPREG1(r10);
+	CPREG1(fp);
+	CPREG1(ip);
+	CPREG1(sp);
+	CPREG1(lr);
+	CPREG1(pc);
+	CPREG1(cpsr);
+
+#undef CPREG1
+#undef CPREG2
+
+	return 0;
+}
+
+static int restore_fpregs(struct rt_sigframe *f, UserFPState *fpstate) {
+	struct aux_sigframe *aux = &f->sig.uc.uc_regspace;
+
+	aux->vfp.magic = VFP_MAGIC;
+	aux->vfp.size = VFP_STORAGE_SIZE;
+
+	builtin_memcpy(&aux->vfp.ufp, &fpstate->vfp, sizeof(struct user_vfp));
+
+	return 0;
+}
+
+static void restore_tls(uint32_t tls) {
+	asm (
+	     "push { %%r7 }  \n"
+	     "mov %%r7, #15  \n"
+	     "lsl %%r7, #16  \n"
+	     "mov %%r0, #5   \n"
+	     "add %%r7, %%r0 \n"
+	     "mov %%r0, %0   \n"
+	     "svc #0         \n"
+	     "pop { %%r7 }   \n"
+	     :
+	     : "r"(tls)
+	     : "r0"
+	     );
+}
+
+#endif
diff --git a/arch/arm/syscall-aux.S b/arch/arm/syscall-aux.S
new file mode 100644
index 0000000..77e1397
--- /dev/null
+++ b/arch/arm/syscall-aux.S
@@ -0,0 +1,9 @@
+        ENTRY(sys_mmap)
+        push    { %r4, %r5, %r7 }
+        ldr     %r4, [%sp, #12]
+        ldr     %r5, [%sp, #16]
+	lsr	%r5, #12
+        do_sys  192
+        pop     { %r4, %r5, %r7 }
+        bx      %lr
+        END(sys_mmap)
\ No newline at end of file
diff --git a/arch/arm/syscall-aux.h b/arch/arm/syscall-aux.h
new file mode 100644
index 0000000..ec8c2d3
--- /dev/null
+++ b/arch/arm/syscall-aux.h
@@ -0,0 +1,8 @@
+#define __NR_mmap2 192
+
+#define __ARM_NR_BASE                   0x0f0000
+#define __ARM_NR_breakpoint             (__ARM_NR_BASE+1)
+#define __ARM_NR_cacheflush             (__ARM_NR_BASE+2)
+#define __ARM_NR_usr26                  (__ARM_NR_BASE+3)
+#define __ARM_NR_usr32                  (__ARM_NR_BASE+4)
+#define __ARM_NR_set_tls                (__ARM_NR_BASE+5)
diff --git a/arch/arm/syscall-common.S b/arch/arm/syscall-common.S
new file mode 100644
index 0000000..68ec893
--- /dev/null
+++ b/arch/arm/syscall-common.S
@@ -0,0 +1,52 @@
+#include "linkage.h"
+
+        .macro mov_r7 imm
+        mov %r7, #\imm
+        .endm
+
+
+        // Call the kernel
+
+        .macro do_sys opcode
+        movw    %r7, #\opcode
+        svc     #0
+        .endm
+
+
+        // a syscall with 0-4 arguments
+
+        .macro syscall0 name, opcode
+        ENTRY(\name)
+        push    { %r7 }
+        do_sys  \opcode
+        pop     { %r7 }
+        bx      %lr
+        END(\name)
+        .endm
+
+
+        // a syscall with 5 arguments
+
+        .macro syscall5 name, opcode
+        ENTRY(\name)
+        push    { %r4, %r7 }
+        ldr     %r4, [%sp, #8]
+        do_sys  \opcode
+        pop     { %r4, %r7 }
+        bx      %lr
+        END(\name)
+        .endm
+
+
+        // a syscall with 6 arguments
+
+        .macro syscall6 name, opcode
+        ENTRY(\name)
+        push    { %r4, %r5, %r7 }
+        ldr     %r4, [%sp, #12]
+        ldr     %r5, [%sp, #16]
+        do_sys  \opcode
+        pop     { %r4, %r5, %r7 }
+        bx      %lr
+        END(\name)
+        .endm
diff --git a/arch/arm/uidiv.S b/arch/arm/uidiv.S
new file mode 100644
index 0000000..e77f610
--- /dev/null
+++ b/arch/arm/uidiv.S
@@ -0,0 +1,186 @@
+.globl __aeabi_uidiv
+
+work		.req	r4	@ XXXX is this safe ?
+dividend	.req	r0
+divisor		.req	r1
+overdone	.req	r2
+result		.req	r2
+curbit		.req	r3
+
+#define	LSYM(x) x
+
+.macro THUMB_DIV_MOD_BODY modulo
+	@ Load the constant 0x10000000 into our work register.
+	mov	work, #1
+	lsl	work, #28
+LSYM(Loop1):
+	@ Unless the divisor is very big, shift it up in multiples of
+	@ four bits, since this is the amount of unwinding in the main
+	@ division loop.  Continue shifting until the divisor is
+	@ larger than the dividend.
+	cmp	divisor, work
+	bhs	LSYM(Lbignum)
+	cmp	divisor, dividend
+	bhs	LSYM(Lbignum)
+	lsl	divisor, #4
+	lsl	curbit,  #4
+	b	LSYM(Loop1)
+LSYM(Lbignum):
+	@ Set work to 0x80000000
+	lsl	work, #3
+LSYM(Loop2):
+	@ For very big divisors, we must shift it a bit at a time, or
+	@ we will be in danger of overflowing.
+	cmp	divisor, work
+	bhs	LSYM(Loop3)
+	cmp	divisor, dividend
+	bhs	LSYM(Loop3)
+	lsl	divisor, #1
+	lsl	curbit,  #1
+	b	LSYM(Loop2)
+LSYM(Loop3):
+	@ Test for possible subtractions ...
+  .if \modulo
+	@ ... On the final pass, this may subtract too much from the dividend,
+	@ so keep track of which subtractions are done, we can fix them up
+	@ afterwards.
+	mov	overdone, #0
+	cmp	dividend, divisor
+	blo	LSYM(Lover1)
+	sub	dividend, dividend, divisor
+LSYM(Lover1):
+	lsr	work, divisor, #1
+	cmp	dividend, work
+	blo	LSYM(Lover2)
+	sub	dividend, dividend, work
+	mov	ip, curbit
+	mov	work, #1
+	ror	curbit, work
+	orr	overdone, curbit
+	mov	curbit, ip
+LSYM(Lover2):
+	lsr	work, divisor, #2
+	cmp	dividend, work
+	blo	LSYM(Lover3)
+	sub	dividend, dividend, work
+	mov	ip, curbit
+	mov	work, #2
+	ror	curbit, work
+	orr	overdone, curbit
+	mov	curbit, ip
+LSYM(Lover3):
+	lsr	work, divisor, #3
+	cmp	dividend, work
+	blo	LSYM(Lover4)
+	sub	dividend, dividend, work
+	mov	ip, curbit
+	mov	work, #3
+	ror	curbit, work
+	orr	overdone, curbit
+	mov	curbit, ip
+LSYM(Lover4):
+	mov	ip, curbit
+  .else
+	@ ... and note which bits are done in the result.  On the final pass,
+	@ this may subtract too much from the dividend, but the result will be ok,
+	@ since the "bit" will have been shifted out at the bottom.
+	cmp	dividend, divisor
+	blo	LSYM(Lover1)
+	sub	dividend, dividend, divisor
+	orr	result, result, curbit
+LSYM(Lover1):
+	lsr	work, divisor, #1
+	cmp	dividend, work
+	blo	LSYM(Lover2)
+	sub	dividend, dividend, work
+	lsr	work, curbit, #1
+	orr	result, work
+LSYM(Lover2):
+	lsr	work, divisor, #2
+	cmp	dividend, work
+	blo	LSYM(Lover3)
+	sub	dividend, dividend, work
+	lsr	work, curbit, #2
+	orr	result, work
+LSYM(Lover3):
+	lsr	work, divisor, #3
+	cmp	dividend, work
+	blo	LSYM(Lover4)
+	sub	dividend, dividend, work
+	lsr	work, curbit, #3
+	orr	result, work
+LSYM(Lover4):
+  .endif
+
+	cmp	dividend, #0			@ Early termination?
+	beq	LSYM(Lover5)
+	lsr	curbit,  #4			@ No, any more bits to do?
+	beq	LSYM(Lover5)
+	lsr	divisor, #4
+	b	LSYM(Loop3)
+LSYM(Lover5):
+  .if \modulo
+	@ Any subtractions that we should not have done will be recorded in
+	@ the top three bits of "overdone".  Exactly which were not needed
+	@ are governed by the position of the bit, stored in ip.
+	mov	work, #0xe
+	lsl	work, #28
+	and	overdone, work
+	beq	LSYM(Lgot_result)
+
+	@ If we terminated early, because dividend became zero, then the
+	@ bit in ip will not be in the bottom nibble, and we should not
+	@ perform the additions below.  We must test for this though
+	@ (rather relying upon the TSTs to prevent the additions) since
+	@ the bit in ip could be in the top two bits which might then match
+	@ with one of the smaller RORs.
+	mov	curbit, ip
+	mov	work, #0x7
+	tst	curbit, work
+	beq	LSYM(Lgot_result)
+
+	mov	curbit, ip
+	mov	work, #3
+	ror	curbit, work
+	tst	overdone, curbit
+	beq	LSYM(Lover6)
+	lsr	work, divisor, #3
+	add	dividend, work
+LSYM(Lover6):
+	mov	curbit, ip
+	mov	work, #2
+	ror	curbit, work
+	tst	overdone, curbit
+	beq	LSYM(Lover7)
+	lsr	work, divisor, #2
+	add	dividend, work
+LSYM(Lover7):
+	mov	curbit, ip
+	mov	work, #1
+	ror	curbit, work
+	tst	overdone, curbit
+	beq	LSYM(Lgot_result)
+	lsr	work, divisor, #1
+	add	dividend, work
+  .endif
+LSYM(Lgot_result):
+.endm
+
+
+	.thumb
+	.text
+
+__aeabi_uidiv:
+	mov	curbit, #1
+	mov	result, #0
+
+	push	{ work }
+	cmp	dividend, divisor
+	blo	LSYM(Lgot_result)
+
+	THUMB_DIV_MOD_BODY 0
+
+	mov	r0, result
+	pop	{ work }
+
+	bx      lr
-- 
1.7.9.5



More information about the CRIU mailing list