[Devel] [PATCH rh7 1/2] memcg: introduce for_each_kmemcg
Vladimir Davydov
vdavydov at virtuozzo.com
Tue Oct 27 08:08:05 PDT 2015
In our ancient kernel, in contrast to mainstream, for_each_mem_cgroup
does not iterate over offline cgroups, because offline cgroups are
supposed to die quickly after reparenting charges. However, it is
impossible to reparent kmem charges, so we introduced a hack - we link
all dead kmem-active memory cgroups in memcg->kmemcg_sharers list and
leave them hanging around. This works fine except one cannot iterate
over offline kmem-accounted cgroups, which is required by the following
patch.
To overcome this limitation, this patch introduces for_each_kmemcg
helper, which iterates over all kmem-accounted cgroups including offline
ones. It is ugly, just like the initial kmemcg_sharers hack, but it
seems to be better to introduce it instead of pulling all those
mainstream patches fixing cgroup core. Hopefully, they will be
backported by RH team sooner or later and we will drop this hack.
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
mm/memcontrol.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 26c12658600c..11a4c41fc079 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3099,6 +3099,64 @@ static DEFINE_MUTEX(set_limit_mutex);
#ifdef CONFIG_MEMCG_KMEM
static DEFINE_MUTEX(activate_kmem_mutex);
+static DECLARE_RWSEM(kmemcg_sharers_sem);
+
+/*
+ * Similar to mem_cgroup_iter except iterates over not only online cgroups, but
+ * also offline kmem-accounted cgroups.
+ */
+static struct mem_cgroup *kmemcg_iter(struct mem_cgroup *root,
+ struct mem_cgroup *prev)
+{
+ struct mem_cgroup *memcg = NULL;
+
+ /*
+ * Dead kmem-accounted cgroups are stored in the memcg->kmemcg_shares
+ * list, which is protected by kmemcg_sharers_sem. Take it for reading
+ * when starting an iteration in order to guarantee the hierarchy will
+ * not change under us.
+ */
+ if (!prev)
+ down_read(&kmemcg_sharers_sem);
+
+ if (prev) {
+ /*
+ * Iterate over lists of dead kmem-accounted cgroups attached
+ * to this cgroup via kmemcg_sharers list. All cgroups but the
+ * online one has KMEM_ACCOUNTED_ACTIVE cleared, which we use
+ * as a marker of a round-trip.
+ *
+ * No need to take css reference, because the cgroup cannot be
+ * destroyed until we release kmemcg_sharers_sem.
+ */
+ memcg = list_next_entry(prev, kmemcg_sharers);
+ if (!memcg_kmem_is_active(memcg))
+ return memcg;
+ }
+
+ /*
+ * Select the next active kmem-accounted cgroup if any.
+ */
+ do {
+ memcg = mem_cgroup_iter(root, memcg, NULL);
+ } while (memcg && !memcg_kmem_is_active(memcg));
+
+ /*
+ * Release kmemcg_sharers_sem once we are done.
+ */
+ if (!memcg)
+ up_read(&kmemcg_sharers_sem);
+ return memcg;
+}
+
+/*
+ * It is not allowed to break the loop. If one needs this, kmemcg_iter_break is
+ * to be implemented.
+ */
+#define for_each_kmemcg_tree(iter, root) \
+ for (iter = kmemcg_iter(root, NULL); iter != NULL; \
+ iter = kmemcg_iter(root, iter))
+#define for_each_kmemcg(iter) for_each_kmemcg_tree(iter, NULL)
#ifdef CONFIG_SLABINFO
static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
@@ -6010,8 +6068,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
if (test_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags)) {
+ down_write(&kmemcg_sharers_sem);
list_del(&memcg->kmemcg_sharers);
memcg_destroy_kmem_caches(memcg);
+ up_write(&kmemcg_sharers_sem);
}
mem_cgroup_sockets_destroy(memcg);
}
@@ -6024,6 +6084,8 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
if (!memcg_kmem_is_active(memcg))
return;
+ down_write(&kmemcg_sharers_sem);
+
/*
* Clear the 'active' flag before clearing memcg_caches arrays entries.
* Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
@@ -6083,6 +6145,8 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
memcg_kmem_mark_dead(memcg);
+ up_write(&kmemcg_sharers_sem);
+
if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
return;
--
2.1.4
More information about the Devel
mailing list