[Devel] [PATCH RHEL7 COMMIT] tcache: Make tcache_nodeinfo::nr_pages atomic_long_t
Konstantin Khorenko
khorenko at virtuozzo.com
Thu Aug 31 18:18:21 MSK 2017
The commit is pushed to "branch-rh7-3.10.0-514.26.1.vz7.35.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.26.1.vz7.35.5
------>
commit 89f8a885e1deeff230554cf1c4dcd323fcbaa9ea
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date: Thu Aug 31 18:18:21 2017 +0300
tcache: Make tcache_nodeinfo::nr_pages atomic_long_t
This allows to do not avoid tcache_nodeinfo::lock
to change nr_pages.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
Acked-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
mm/tcache.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/mm/tcache.c b/mm/tcache.c
index 6962097..202834c 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -160,7 +160,7 @@ struct tcache_nodeinfo {
struct rb_node __rcu *rb_first;
/* total number of pages on all LRU lists corresponding to this node */
- unsigned long nr_pages;
+ atomic_long_t nr_pages;
} ____cacheline_aligned_in_smp;
/*
@@ -263,8 +263,7 @@ static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
spin_lock(&ni->lock);
spin_lock(&pni->lock);
-
- ni->nr_pages++;
+ atomic_long_inc(&ni->nr_pages);
pni->nr_pages++;
list_add_tail(&page->lru, &pni->lru);
@@ -310,7 +309,7 @@ static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
goto out;
__tcache_lru_del(pni, page);
- ni->nr_pages--;
+ atomic_long_dec(&ni->nr_pages);
if (reused)
pni->recent_gets++;
@@ -1073,7 +1072,7 @@ tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
if (!nr_isolated)
goto unlock;
- ni->nr_pages -= nr_isolated;
+ atomic_long_sub(nr_isolated, &ni->nr_pages);
if (!RB_EMPTY_NODE(rbn)) {
rb_erase(rbn, &ni->reclaim_tree);
@@ -1136,9 +1135,7 @@ tcache_try_to_reclaim_page(struct tcache_pool *pool, int nid)
if (!ret)
goto out;
- spin_lock(&ni->lock);
- ni->nr_pages -= ret;
- spin_unlock(&ni->lock);
+ atomic_long_dec(&ni->nr_pages);
if (!__tcache_reclaim_page(page))
page = NULL;
@@ -1163,7 +1160,12 @@ static struct page *tcache_alloc_page(struct tcache_pool *pool)
static unsigned long tcache_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- return tcache_nodeinfo[sc->nid].nr_pages;
+ atomic_long_t *nr_pages = &tcache_nodeinfo[sc->nid].nr_pages;
+ long ret;
+
+ ret = atomic_long_read(nr_pages);
+ WARN_ON(ret < 0);
+ return ret >= 0 ? ret : 0;
}
#define TCACHE_SCAN_BATCH 128UL
@@ -1380,6 +1382,7 @@ static int __init tcache_nodeinfo_init(void)
for (i = 0; i < nr_node_ids; i++) {
ni = &tcache_nodeinfo[i];
spin_lock_init(&ni->lock);
+ atomic_long_set(&ni->nr_pages, 0);
ni->reclaim_tree = RB_ROOT;
update_ni_rb_first(ni);
}
More information about the Devel
mailing list