[Devel] [PATCH RHEL8 COMMIT] ve/cgroup: Implement per-ve workqueue

Konstantin Khorenko khorenko at virtuozzo.com
Wed Mar 3 20:21:10 MSK 2021


The commit is pushed to "branch-rh8-4.18.0-240.1.1.vz8.5.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-240.1.1.vz8.5.5
------>
commit 20ae7b2db8dea234d8dd1e11ddbf9067daf94137
Author: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
Date:   Wed Mar 3 20:21:10 2021 +0300

    ve/cgroup: Implement per-ve workqueue
    
    Cherry-picked from vz7 commit
     0293870666c4 ("ve/cgroup: implemented per-ve workqueue.")
    
    Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
    Reviewed-by: Kirill Tkhai <ktkhai at virtuozzo.com>
    
    =====================
    Patchset description:
    
    ve/cgroup: Port release_agent virtualization from vz7
    
    This patchset ports virtualization of cgroup release_agent
    virtualization from vz7.
    
    Major challanges of porting are differences between vz7 and vz8 cgroup
    implementations:
    - transition of cgroups to kernfs
    - slightly changed locking scheme, which relies on css_set_lock in
      places, previously relied on cgroup_mutex.
    
    There is a small number of patches that have been ported without
    modifications, but most of the patches had suffered a lot of
    modification due to the factors described above.
    
    v1:
      - original patchset
    v2:
      - removed port of CGRP_REMOVED due to the use of CSS_ONLINE in VZ8 for
        same reason
      - changed ve_set(get)_release_agent_path signature for more optimal
      - added ve->is_running check before calling userspace executable
    v3:
      - use goto after check for ve->is_running in last patch
---
 include/linux/ve.h |  1 +
 kernel/ve/ve.c     | 25 +++++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/include/linux/ve.h b/include/linux/ve.h
index 103d0a9044fc..d3c1ab840444 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -105,6 +105,7 @@ struct ve_struct {
 	unsigned long		aio_nr;
 	unsigned long		aio_max_nr;
 #endif
+	struct workqueue_struct	*wq;
 };
 
 struct ve_devmnt {
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index f7d605357d2e..25455264b225 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -388,6 +388,21 @@ static void ve_set_vdso_time(struct ve_struct *ve, u64 time)
 	*vdso_start_time = time;
 }
 
+static int ve_workqueue_start(struct ve_struct *ve)
+{
+	ve->wq = alloc_workqueue("ve_wq_%s",
+		WQ_SYSFS|WQ_FREEZABLE|WQ_UNBOUND, 8, ve->ve_name);
+
+	if (!ve->wq)
+		return -ENOMEM;
+	return 0;
+}
+
+static void ve_workqueue_stop(struct ve_struct *ve)
+{
+	destroy_workqueue(ve->wq);
+}
+
 /* under ve->op_sem write-lock */
 static int ve_start_container(struct ve_struct *ve)
 {
@@ -443,6 +458,10 @@ static int ve_start_container(struct ve_struct *ve)
 	if (err)
 		goto err_umh;
 
+	err = ve_workqueue_start(ve);
+	if (err)
+		goto err_workqueue;
+
 	err = ve_hook_iterate_init(VE_SS_CHAIN, ve);
 	if (err < 0)
 		goto err_iterate;
@@ -458,6 +477,8 @@ static int ve_start_container(struct ve_struct *ve)
 	return 0;
 
 err_iterate:
+	ve_workqueue_stop(ve);
+err_workqueue:
 	ve_stop_umh(ve);
 err_umh:
 	ve_stop_kthreadd(ve);
@@ -523,6 +544,8 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
 
 	cgroup_unmark_ve_roots(ve);
 
+	ve_workqueue_stop(ve);
+
 	/*
 	 * At this point all userspace tasks in container are dead.
 	 */
@@ -1363,6 +1386,8 @@ static int __init ve_subsys_init(void)
 {
 	ve_cachep = KMEM_CACHE_USERCOPY(ve_struct, SLAB_PANIC, core_pattern);
 	list_add(&ve0.ve_list, &ve_list_head);
+	ve0.wq = alloc_workqueue("ve0_wq", WQ_FREEZABLE|WQ_UNBOUND, 8);
+	BUG_ON(!ve0.wq);
 	return 0;
 }
 late_initcall(ve_subsys_init);


More information about the Devel mailing list