[Devel] [PATCH RHEL7 COMMIT] tcache: Decrement removed from LRU pages out of __tcache_lru_del()
Konstantin Khorenko
khorenko at virtuozzo.com
Thu Aug 31 18:18:17 MSK 2017
The commit is pushed to "branch-rh7-3.10.0-514.26.1.vz7.35.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.26.1.vz7.35.5
------>
commit eb34be224b7ca575751cc6f9752a7f8171c5c4f7
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date: Thu Aug 31 18:18:17 2017 +0300
tcache: Decrement removed from LRU pages out of __tcache_lru_del()
Patchset description:
tcache: Manage LRU lists under per-filesystem lock
Changes to v2:
Disable irqs in tcache_lru_isolate() [9/10]
Move update_ni_rb_first() to "tcache: Cache rb_first() of reclaim tree in tcache_nodeinfo::rb_first".
Add spin_lock_init() for lockdep [2/10]
Kirill Tkhai (10):
tcache: Decrement removed from LRU pages out of __tcache_lru_del()
tcache: Add tcache_pool_nodeinfo::lock
tcache: Cleanup unused expression from tcache_lru_isolate()
tcache: Remove excess variable from tcache_lru_isolate()
tcache: Cache rb_first() of reclaim tree in tcache_nodeinfo::rb_first
tcache: Make tcache_lru_isolate() keep ni->lock less
tcache: Move erase-insert logic out of tcache_check_events()
tcache: Make tcache_nodeinfo::nr_pages atomic_long_t
tcache: Use ni->lock only for inserting and erasing from rbtree.
tcache: Move add/sub out of pni->lock
https://jira.sw.ru/browse/PSBM-69296
This patchset decreases the cpu usage on writing big files in Containers.
======================
This patch description:
Move the subtraction out of __tcache_lru_del, and this will be used
in next patches. Also, delete ni argument of the function.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
Acked-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
mm/tcache.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/tcache.c b/mm/tcache.c
index 0bfbb69..9f296dc 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -274,11 +274,9 @@ static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
spin_unlock(&ni->lock);
}
-static void __tcache_lru_del(struct tcache_nodeinfo *ni,
- struct tcache_pool_nodeinfo *pni,
+static void __tcache_lru_del(struct tcache_pool_nodeinfo *pni,
struct page *page)
{
- ni->nr_pages--;
pni->nr_pages--;
list_del_init(&page->lru);
}
@@ -300,7 +298,8 @@ static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
if (unlikely(list_empty(&page->lru)))
goto out;
- __tcache_lru_del(ni, pni, page);
+ __tcache_lru_del(pni, page);
+ ni->nr_pages--;
if (reused)
pni->recent_gets++;
@@ -988,8 +987,7 @@ __tcache_insert_reclaim_node(struct tcache_nodeinfo *ni,
}
static noinline_for_stack int
-__tcache_lru_isolate(struct tcache_nodeinfo *ni,
- struct tcache_pool_nodeinfo *pni,
+__tcache_lru_isolate(struct tcache_pool_nodeinfo *pni,
struct page **pages, int nr_to_scan)
{
struct tcache_node *node;
@@ -1002,7 +1000,7 @@ __tcache_lru_isolate(struct tcache_nodeinfo *ni,
if (unlikely(!page_cache_get_speculative(page)))
continue;
- __tcache_lru_del(ni, pni, page);
+ __tcache_lru_del(pni, page);
/*
* A node can be destroyed only if all its pages have been
@@ -1041,7 +1039,8 @@ tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
if (!tcache_grab_pool(pni->pool))
goto again;
- nr = __tcache_lru_isolate(ni, pni, pages, nr_to_isolate);
+ nr = __tcache_lru_isolate(pni, pages, nr_to_isolate);
+ ni->nr_pages -= nr;
nr_isolated += nr;
nr_to_isolate -= nr;
@@ -1093,7 +1092,8 @@ tcache_try_to_reclaim_page(struct tcache_pool *pool, int nid)
local_irq_save(flags);
spin_lock(&ni->lock);
- ret = __tcache_lru_isolate(ni, pni, &page, 1);
+ ret = __tcache_lru_isolate(pni, &page, 1);
+ ni->nr_pages -= ret;
spin_unlock(&ni->lock);
if (!ret)
More information about the Devel
mailing list