[CRIU] [PATCH] restore: add action scripts setup-namespaces

Andrew Vagin avagin at parallels.com
Fri May 31 06:28:58 EDT 2013


On Fri, May 31, 2013 at 01:55:49PM +0400, Pavel Emelyanov wrote:
> On 05/31/2013 11:24 AM, Andrey Vagin wrote:
> > After creating namespaces we may need to apply some configuration.
> > For example uid and gid maps should be applied in this moment.
> 
> This comment doesn't explain why we need socketpair. Other than this, can
> we avoid using socketpairs and re-use the restore stages for that? E.g.,
> before switching from "fork" stage call the scripts?

Thanks for this advice. socketpair is not required here. I resent this
patch.

> 
> > Signed-off-by: Andrey Vagin <avagin at openvz.org>
> > ---
> >  cr-restore.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 56 insertions(+), 1 deletion(-)
> > 
> > diff --git a/cr-restore.c b/cr-restore.c
> > index 70c5e68..d17fb20 100644
> > --- a/cr-restore.c
> > +++ b/cr-restore.c
> > @@ -818,6 +818,7 @@ struct cr_clone_arg {
> >  	char stack_ptr[0];
> >  	struct pstree_item *item;
> >  	unsigned long clone_flags;
> > +	int ns_signal_socks[2];
> >  	int fd;
> >  
> >  	CoreEntry *core;
> > @@ -841,7 +842,7 @@ static void write_pidfile(char *pfname, int pid)
> >  static inline int fork_with_pid(struct pstree_item *item)
> >  {
> >  	int ret = -1, fd;
> > -	struct cr_clone_arg ca;
> > +	struct cr_clone_arg ca = {.ns_signal_socks = {-1, -1}};
> >  	pid_t pid = item->pid.virt;
> >  
> >  	if (item->state != TASK_HELPER) {
> > @@ -860,6 +861,14 @@ static inline int fork_with_pid(struct pstree_item *item)
> >  	} else
> >  		ca.core = NULL;
> >  
> > +	if (item->parent == NULL) {
> > +		ret = socketpair(PF_UNIX, SOCK_STREAM, 0, ca.ns_signal_socks);
> > +		if (ret == -1) {
> > +			pr_perror("Can't create socket pair\n");
> > +			goto err_unlock;
> > +		}
> > +	}
> > +
> >  	ca.item = item;
> >  	ca.clone_flags = item->rst->clone_flags;
> >  
> > @@ -916,7 +925,30 @@ err_unlock:
> >  
> >  		close(ca.fd);
> >  	}
> > +
> > +	if (item->parent == NULL) {
> > +		char c = 0;
> > +
> > +		ret = -1;
> > +		close_safe(&ca.ns_signal_socks[1]);
> > +		if (read(ca.ns_signal_socks[0], &c, sizeof(c)) != sizeof(c)) {
> > +			pr_perror("Can't to read from ns signal sock");
> > +			goto err;
> > +		}
> > +
> > +		if (run_scripts("setup-namespaces"))
> > +			goto err;
> > +
> > +		if (write(ca.ns_signal_socks[0], &c, sizeof(c)) != sizeof(c)) {
> > +			pr_err("Unable to write in ns signal sock");
> > +			goto err;
> > +		}
> > +		close_safe(&ca.ns_signal_socks[0]);
> > +		ret = 0;
> > +	}
> >  err:
> > +	close_safe(&ca.ns_signal_socks[0]);
> > +	close_safe(&ca.ns_signal_socks[1]);
> >  	if (ca.core)
> >  		core_entry__free_unpacked(ca.core, NULL);
> >  	return ret;
> > @@ -931,6 +963,14 @@ static void sigchld_handler(int signal, siginfo_t *siginfo, void *data)
> >  
> >  	exit = (siginfo->si_code == CLD_EXITED);
> >  	status = siginfo->si_status;
> > +
> > +	/* skip scripts */
> > +	if (!current && root_item->pid.real != pid) {
> > +		pid = waitpid(root_item->pid.real, &status, WNOHANG);
> > +		if (pid <= 0)
> > +			return;
> > +	}
> > +
> >  	if (!current || status)
> >  		goto err;
> >  
> > @@ -1077,6 +1117,7 @@ static int restore_task_with_children(void *_arg)
> >  
> >  	/* Restore root task */
> >  	if (current->parent == NULL) {
> > +		char c = 0, sk;
> >  		if (collect_mount_info(getpid()))
> >  			exit(1);
> >  
> > @@ -1091,6 +1132,20 @@ static int restore_task_with_children(void *_arg)
> >  		if (mount_proc())
> >  			exit(1);
> >  
> > +		close(ca->ns_signal_socks[0]);
> > +		sk = ca->ns_signal_socks[1];
> > +
> > +		/* Signal crtools to execute action scripts setup-namespaces */
> > +		if (write(sk, &c, sizeof(c)) != sizeof(c)) {
> > +			pr_perror("Can't write in a ns signal socket");
> > +			exit(1);
> > +		}
> > +		if (read(sk, &c, sizeof(c)) != sizeof(c)) {
> > +			pr_perror("Can't read from a ns signal socket");
> > +			exit(1);
> > +		}
> > +		close(sk);
> > +
> >  		if (root_prepare_shared())
> >  			exit(1);
> >  	}
> > 
> 
> 


More information about the CRIU mailing list