[Devel] [PATCH RHEL8 COMMIT] mm: assign id to every memcg-aware shrinker

Konstantin Khorenko khorenko at virtuozzo.com
Thu Apr 2 17:12:15 MSK 2020


The commit is pushed to "branch-rh8-4.18.0-80.1.2.vz8.3.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-80.1.2.vz8.3.4
------>
commit 2094ac124375fb67f1e0c00ae80d742a7229e9b4
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date:   Thu Apr 2 17:12:14 2020 +0300

    mm: assign id to every memcg-aware shrinker
    
    Introduce shrinker::id number, which is used to enumerate memcg-aware
    shrinkers.  The number start from 0, and the code tries to maintain it
    as small as possible.
    
    This will be used to represent a memcg-aware shrinkers in memcg
    shrinkers map.
    
    Since all memcg-aware shrinkers are based on list_lru, which is
    per-memcg in case of !CONFIG_MEMCG_KMEM only, the new functionality will
    be under this config option.
    
    [ktkhai at virtuozzo.com: v9]
      Link: http://lkml.kernel.org/r/153112546435.4097.10607140323811756557.stgit@localhost.localdomain
    Link: http://lkml.kernel.org/r/153063054586.1818.6041047871606697364.stgit@localhost.localdomain
    Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
    
    Acked-by: Vladimir Davydov <vdavydov.dev at gmail.com>
    Tested-by: Shakeel Butt <shakeelb at google.com>
    Cc: Al Viro <viro at zeniv.linux.org.uk>
    Cc: Andrey Ryabinin <aryabinin at virtuozzo.com>
    Cc: Chris Wilson <chris at chris-wilson.co.uk>
    Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
    Cc: Guenter Roeck <linux at roeck-us.net>
    Cc: "Huang, Ying" <ying.huang at intel.com>
    Cc: Johannes Weiner <hannes at cmpxchg.org>
    Cc: Josef Bacik <jbacik at fb.com>
    Cc: Li RongQing <lirongqing at baidu.com>
    Cc: Matthew Wilcox <willy at infradead.org>
    Cc: Matthias Kaehlcke <mka at chromium.org>
    Cc: Mel Gorman <mgorman at techsingularity.net>
    Cc: Michal Hocko <mhocko at kernel.org>
    Cc: Minchan Kim <minchan at kernel.org>
    Cc: Philippe Ombredanne <pombredanne at nexb.com>
    Cc: Roman Gushchin <guro at fb.com>
    Cc: Sahitya Tummala <stummala at codeaurora.org>
    Cc: Stephen Rothwell <sfr at canb.auug.org.au>
    Cc: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
    Cc: Thomas Gleixner <tglx at linutronix.de>
    Cc: Waiman Long <longman at redhat.com>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    (cherry picked from commit b4c2b231c3ba155623591fb6301ed97b95e1c039)
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 include/linux/shrinker.h |  4 +++
 mm/vmscan.c              | 63 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 67 insertions(+)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 8d912bc91bf5..cec7a37e8193 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -68,6 +68,10 @@ struct shrinker {
 
 	/* These are for internal use */
 	struct list_head list;
+#ifdef CONFIG_MEMCG_KMEM
+	/* ID in shrinker_idr */
+	int id;
+#endif
 	/* objs pending delete, per node */
 	atomic_long_t *nr_deferred;
 };
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d99fb2be1c36..d14f954ab95b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -182,6 +182,50 @@ unsigned long vm_total_pages;
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
+#ifdef CONFIG_MEMCG_KMEM
+static DEFINE_IDR(shrinker_idr);
+static int shrinker_nr_max;
+
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+	int id, ret = -ENOMEM;
+
+	down_write(&shrinker_rwsem);
+	/* This may call shrinker, so it must use down_read_trylock() */
+	id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
+	if (id < 0)
+		goto unlock;
+
+	if (id >= shrinker_nr_max)
+		shrinker_nr_max = id + 1;
+	shrinker->id = id;
+	ret = 0;
+unlock:
+	up_write(&shrinker_rwsem);
+	return ret;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+	int id = shrinker->id;
+
+	BUG_ON(id < 0);
+
+	down_write(&shrinker_rwsem);
+	idr_remove(&shrinker_idr, id);
+	up_write(&shrinker_rwsem);
+}
+#else /* CONFIG_MEMCG_KMEM */
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+	return 0;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
 #ifdef CONFIG_MEMCG
 static bool cgroup_reclaim(struct scan_control *sc)
 {
@@ -281,11 +325,28 @@ int prealloc_shrinker(struct shrinker *shrinker)
 	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
 	if (!shrinker->nr_deferred)
 		return -ENOMEM;
+
+	if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
+		if (prealloc_memcg_shrinker(shrinker))
+			goto free_deferred;
+	}
+
 	return 0;
+
+free_deferred:
+	kfree(shrinker->nr_deferred);
+	shrinker->nr_deferred = NULL;
+	return -ENOMEM;
 }
 
 void free_prealloced_shrinker(struct shrinker *shrinker)
 {
+	if (!shrinker->nr_deferred)
+		return;
+
+	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+		unregister_memcg_shrinker(shrinker);
+
 	kfree(shrinker->nr_deferred);
 	shrinker->nr_deferred = NULL;
 }
@@ -315,6 +376,8 @@ void unregister_shrinker(struct shrinker *shrinker)
 {
 	if (!shrinker->nr_deferred)
 		return;
+	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+		unregister_memcg_shrinker(shrinker);
 	down_write(&shrinker_rwsem);
 	list_del(&shrinker->list);
 	up_write(&shrinker_rwsem);


More information about the Devel mailing list