[Devel] [PATCH RHEL7 COMMIT] ve/cgroup: cleanup per_cgroot_data at cgroup mount destruction
Vasily Averin
vvs at virtuozzo.com
Thu Aug 6 08:32:21 MSK 2020
The commit is pushed to "branch-rh7-3.10.0-1127.18.2.vz7.163.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1127.18.2.el7
------>
commit 62c48c745b6df383a9fa315404c6f9b135f43201
Author: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
Date: Thu Aug 6 08:32:21 2020 +0300
ve/cgroup: cleanup per_cgroot_data at cgroup mount destruction
Implemented function 've_cleanup_per_cgroot_data' that is called in two
resource release cases.
1. At container stop procedure, container' init process goes through
exit process routines, and calls ve_exit_ns. ve_exit_ns knows about
ve from it's agrument it and can call ve_cleanup_per_cgroot_data with
ve argument.
2. At destruction of cgroup mount point, 'cgroup_drop_root' will be called
from cgroup's unmount implementation code. Also same code will be executed
during cgroup_mount function when error happens and cleanup is needed.
In both cases cgroup_drop_root will call 've_cleanup_per_cgroot_data'.
All those codepaths will know cgroup and pass it as an argument.
've_cleaup_per_cgroot_data' releases per-cgroup-root resources stored in ve
It expects any of two arguments (ve or cgroup) to be non-NULL to derive
type of cleanup, that is needed. If cgroup is NULL, it cleans up values
for all cgroup roots that it possesses.
If cgroup is non-NULL it only cleans out values for this particular
cgroup.
Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
---
include/linux/ve.h | 2 ++
kernel/cgroup.c | 1 +
kernel/ve/ve.c | 30 +++++++++++++++++++++---------
3 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/include/linux/ve.h b/include/linux/ve.h
index 5bf275f..2dcd7bb 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -220,6 +220,8 @@ int ve_set_release_agent_path(struct cgroup *cgroot,
const char *ve_get_release_agent_path(struct cgroup *cgrp_root);
+void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp);
+
extern struct ve_struct *get_ve(struct ve_struct *ve);
extern void put_ve(struct ve_struct *ve);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 123724d..64cbc5d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1756,6 +1756,7 @@ static void cgroup_drop_root(struct cgroupfs_root *root)
{
if (!root)
return;
+ ve_cleanup_per_cgroot_data(NULL, &root->top_cgroup);
BUG_ON(!root->hierarchy_id);
spin_lock(&hierarchy_id_lock);
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 8d78270..db26cbd4 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -745,21 +745,33 @@ err_list:
return err;
}
-static void ve_per_cgroot_free(struct ve_struct *ve)
+static inline void per_cgroot_data_free(struct per_cgroot_data *data)
+{
+ struct cgroup_rcu_string *release_agent = data->release_agent_path;
+
+ RCU_INIT_POINTER(data->release_agent_path, NULL);
+ if (release_agent)
+ kfree_rcu(release_agent, rcu_head);
+ kfree(data);
+}
+
+void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp)
{
struct per_cgroot_data *data, *saved;
- struct cgroup_rcu_string *release_agent;
+ BUG_ON(!ve && !cgrp);
+ rcu_read_lock();
+ if (!ve)
+ ve = cgroup_get_ve_owner(cgrp);
raw_spin_lock(&ve->per_cgroot_list_lock);
list_for_each_entry_safe(data, saved, &ve->per_cgroot_list, list) {
- release_agent = data->release_agent_path;
- RCU_INIT_POINTER(data->release_agent_path, NULL);
- if (release_agent)
- kfree_rcu(release_agent, rcu_head);
- list_del_init(&data->list);
- kfree(data);
+ if (!cgrp || data->cgroot == cgrp) {
+ list_del_init(&data->list);
+ per_cgroot_data_free(data);
+ }
}
raw_spin_unlock(&ve->per_cgroot_list_lock);
+ rcu_read_unlock();
}
void ve_stop_ns(struct pid_namespace *pid_ns)
@@ -812,7 +824,7 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
ve_workqueue_stop(ve);
- ve_per_cgroot_free(ve);
+ ve_cleanup_per_cgroot_data(ve, NULL);
/*
* At this point all userspace tasks in container are dead.
More information about the Devel
mailing list