[Devel] [PATCH v8 6/9] ve/cgroup: unmark ve-root cgroups at container stop
Kirill Tkhai
ktkhai at virtuozzo.com
Thu Apr 16 14:09:50 MSK 2020
On 16.04.2020 13:06, Valeriy Vdovin wrote:
> Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
> ---
> include/linux/cgroup.h | 1 +
> kernel/cgroup.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
> kernel/ve/ve.c | 2 ++
> 3 files changed, 51 insertions(+)
>
> diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
> index 0a42f93..1bd0fe7 100644
> --- a/include/linux/cgroup.h
> +++ b/include/linux/cgroup.h
> @@ -636,6 +636,7 @@ int cgroup_task_count(const struct cgroup *cgrp);
>
> #ifdef CONFIG_VE
> void cgroup_mark_ve_roots(struct ve_struct *ve);
> +void cgroup_unmark_ve_roots(struct ve_struct *ve);
> #endif
>
> /*
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index 42acbb4..14987d0 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -635,6 +635,35 @@ static struct css_set *find_css_set(
> }
>
> /*
> + * Walk each cgroup link of a given css_set and find a cgroup that
> + * is the child of cgroupfs_root in argument.
> + */
> +static struct cgroup *css_cgroup_from_root(struct css_set *css_set,
> + struct cgroupfs_root *root)
> +{
> + struct cgroup *res = NULL;
> +
> + BUG_ON(!mutex_is_locked(&cgroup_mutex));
> + read_lock(&css_set_lock);
> +
> + if (css_set == &init_css_set) {
> + res = &root->top_cgroup;
> + } else {
> + struct cg_cgroup_link *link;
> + list_for_each_entry(link, &css_set->cg_links, cg_link_list) {
> + struct cgroup *c = link->cgrp;
> + if (c->root == root) {
> + res = c;
> + break;
> + }
> + }
> + }
> + read_unlock(&css_set_lock);
> + BUG_ON(!res);
> + return res;
> +}
> +
> +/*
> * Return the cgroup for "task" from the given hierarchy. Must be
> * called with cgroup_mutex held.
> */
> @@ -4235,6 +4264,25 @@ void cgroup_mark_ve_roots(struct ve_struct *ve)
> mutex_unlock(&cgroup_mutex);
> }
>
> +void cgroup_unmark_ve_roots(struct ve_struct *ve)
> +{
> + struct cgroup *cgrp;
> + struct cgroupfs_root *root;
> +
> + mutex_lock(&cgroup_mutex);
> + for_each_active_root(root) {
> + cgrp = css_cgroup_from_root(ve->root_css_set, root);
> + BUG_ON(!rcu_dereference_protected(cgrp->ve_owner,
> + lockdep_is_held(&cgroup_mutex)));
> + rcu_assign_pointer(cgrp->ve_owner, NULL);
How can we access ve_owner when it is not defined yet? It goes in next patch.
Can we move ve->ve_owner declaration and assignment in this patch?
> + clear_bit(CGRP_VE_ROOT, &cgrp->flags);
> + }
> + mutex_unlock(&cgroup_mutex);
> + /* ve_owner == NULL will be visible */
> + synchronize_rcu();
> + flush_workqueue(ve->wq);
> +}
> +
> struct cgroup *cgroup_get_ve_root(struct cgroup *cgrp)
> {
> struct cgroup *ve_root = NULL;
> diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
> index 539bba1..0ad1a55 100644
> --- a/kernel/ve/ve.c
> +++ b/kernel/ve/ve.c
> @@ -618,6 +618,8 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
> if (!ve->ve_ns || ve->ve_ns->pid_ns != pid_ns)
> return;
>
> + cgroup_unmark_ve_roots(ve);
> +
> ve_workqueue_stop(ve);
>
> /*
>
More information about the Devel
mailing list