[CRIU] [PATCH 08/20] restorer: introduced the multiarch support into the restorer.

Pavel Emelyanov xemul at parallels.com
Wed Dec 12 10:57:25 EST 2012


> @@ -66,7 +62,16 @@ struct thread_restore_args {
>  	struct restore_mem_zone		mem_zone;
>  
>  	int				pid;
> -	UserX86RegsEntry		gpregs;
> +	UserRegsEntry		        gpregs;
> +
> +#ifdef ARCH_NEED_FP
> +	UserFPState			fpstate;
> +#endif

The FPU states fetching is currently being done for x86 by Cyrill. Can we
make the 1st ARM port w/o FPU to make it symmetrical with x86?

> +
> +#ifdef CONFIG_HAS_TLS
> +	u32				tls;
> +#endif
> +
>  	u64				clear_tid_addr;
>  
>  	bool				has_futex;
> @@ -116,107 +121,12 @@ struct task_restore_core_args {
>  	u32				mm_saved_auxv_size;
>  	char				comm[TASK_COMM_LEN];
>  	TaskKobjIdsEntry		ids;
> +	uint32_t                        tls;

Why is this thing here? Master thread's tls will be on the thread_restore_args.

>  	int				*rst_tcp_socks;
>  	int				rst_tcp_socks_size;
>  } __aligned(sizeof(long));
>  

> @@ -232,6 +199,9 @@ long __export_restore_thread(struct thread_restore_args *args)
>  
>  	restore_creds(&args->ta->creds);
>  
> +#ifdef CONFIG_HAS_TLS
> +	restore_tls(args->tls);
> +#endif

This particular place (and some alike) should be implemented the other way. Introduce
the arch-specific routine called arch_restore_tls() and make it void for x86. This
will eliminate preprocessor in the middle of a function.

>  
>  	pr_info("%ld: Restored\n", sys_gettid());
>  
> @@ -240,15 +210,9 @@ long __export_restore_thread(struct thread_restore_args *args)
>  
>  	futex_dec_and_wake(&thread_inprogress);
>  
> -	new_sp = (long)rt_sigframe + 8;
> -	asm volatile(
> -		"movq %0, %%rax					\n"
> -		"movq %%rax, %%rsp				\n"
> -		"movl $"__stringify(__NR_rt_sigreturn)", %%eax	\n"
> -		"syscall					\n"
> -		:
> -		: "r"(new_sp)
> -		: "rax","rsp","memory");
> +	new_sp = (long)rt_sigframe + SIGFRAME_OFFSET;
> +	ARCH_RT_SIGRETURN(new_sp);

I'd make arch_rt_sigreturn accept rt_sigframe pointer and doing the new_sp calculations
withing. This would shrink two arch-specific macros into one.

> +
>  core_restore_end:
>  	pr_err("Restorer abnormal termination for %ld\n", sys_getpid());
>  	sys_exit_group(1);
> @@ -448,6 +412,10 @@ long __export_restore_task(struct task_restore_core_args *args)
>  			}
>  		}
>  
> +		if (vma_entry->end >= TASK_SIZE) {
> +			continue;
> +		}
> +
>  		if (vma_entry->end > premmapped_end) {
>  			if (vma_entry->start < premmapped_end)
>  				addr = premmapped_end;
> @@ -470,6 +438,10 @@ long __export_restore_task(struct task_restore_core_args *args)
>  		if (!vma_priv(vma_entry))
>  			continue;
>  
> +		if (vma_entry->end >= TASK_SIZE) {
> +			continue;
> +		}
> +
>  		if (vma_entry->start > vma_entry->shmid)
>  			break;
>  
> @@ -487,6 +459,10 @@ long __export_restore_task(struct task_restore_core_args *args)
>  		if (!vma_priv(vma_entry))
>  			continue;
>  
> +		if (vma_entry->start > TASK_SIZE) {
> +			continue;
> +		}
> +
>  		if (vma_entry->start < vma_entry->shmid)
>  			break;
>  

The above 3 hunks look like non arch-specific bug fix. Is it?

> @@ -678,41 +654,8 @@ long __export_restore_task(struct task_restore_core_args *args)
>  			 * thread will run with own stack and we must not
>  			 * have any additional instructions... oh, dear...
>  			 */
> -			asm volatile(
> -				"clone_emul:				\n"
> -				"movq %2, %%rsi				\n"
> -				"subq $16, %%rsi			\n"
> -				"movq %6, %%rdi				\n"
> -				"movq %%rdi, 8(%%rsi)			\n"
> -				"movq %5, %%rdi				\n"
> -				"movq %%rdi, 0(%%rsi)			\n"
> -				"movq %1, %%rdi				\n"
> -				"movq %3, %%rdx				\n"
> -				"movq %4, %%r10				\n"
> -				"movl $"__stringify(__NR_clone)", %%eax	\n"
> -				"syscall				\n"
> -
> -				"testq %%rax,%%rax			\n"
> -				"jz thread_run				\n"
> -
> -				"movq %%rax, %0				\n"
> -				"jmp clone_end				\n"
> -
> -				"thread_run:				\n"	/* new stack here */
> -				"xorq %%rbp, %%rbp			\n"	/* clear ABI frame pointer */
> -				"popq %%rax				\n"	/* clone_restore_fn  -- restore_thread */
> -				"popq %%rdi				\n"	/* arguments */
> -				"callq *%%rax				\n"
> -
> -				"clone_end:				\n"
> -				: "=r"(ret)
> -				:	"g"(clone_flags),
> -					"g"(new_sp),
> -					"g"(&parent_tid),
> -					"g"(&thread_args[i].pid),
> -					"g"(args->clone_restore_fn),
> -					"g"(&thread_args[i])
> -				: "rax", "rdi", "rsi", "rdx", "r10", "memory");
> +
> +			RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, thread_args, args->clone_restore_fn);
>  		}
>  
>  		ret = sys_flock(fd, LOCK_UN);
> @@ -766,28 +709,26 @@ long __export_restore_task(struct task_restore_core_args *args)
>  
>  	ret = sys_munmap(args->task_entries, TASK_ENTRIES_SIZE);
>  	if (ret < 0) {
> -		ret = ((long)__LINE__ << 32) | -ret;
> +		ret = ((long)__LINE__ << 16) | ((-ret) & 0xffff);

What is this?

>  		goto core_restore_failed;
>  	}
>  
>  	/*
>  	 * Sigframe stack.
>  	 */
> -	new_sp = (long)rt_sigframe + 8;
> +	new_sp = (long)rt_sigframe + SIGFRAME_OFFSET;
>  


More information about the CRIU mailing list