[Devel] [PATCH rh7 v2 09/10] tcache: Use ni->lock only for inserting and erasing from rbtree.

Kirill Tkhai ktkhai at virtuozzo.com
Wed Aug 16 14:53:17 MSK 2017


This patch completes splitting of ni->lock into ni->lock and pni->lock.
Now, global ni->lock is used for inserting in tcache_nodeinfo::reclaim_tree,
which happen just on every 1024 inserting or erasing of pages.
For other LRU operations is used pni->lock, which is per-filesystem
(i.e., per-container), and does not affect other containers.

Also, lock order is changed:

	spin_lock(&pni->lock);
	spin_lock(&ni->lock);

Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
 mm/tcache.c |   27 ++++++++++++++-------------
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/mm/tcache.c b/mm/tcache.c
index f91c66690af..2b82a00c647 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -261,7 +261,6 @@ static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 	struct tcache_nodeinfo *ni = &tcache_nodeinfo[nid];
 	struct tcache_pool_nodeinfo *pni = &pool->nodeinfo[nid];
 
-	spin_lock(&ni->lock);
 	spin_lock(&pni->lock);
 	atomic_long_inc(&ni->nr_pages);
 	pni->nr_pages++;
@@ -274,13 +273,14 @@ static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 	}
 
 	if (tcache_check_events(pni) || RB_EMPTY_NODE(&pni->reclaim_node)) {
+		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node))
 			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 		__tcache_insert_reclaim_node(ni, pni);
 		update_ni_rb_first(ni);
+		spin_unlock(&ni->lock);
 	}
 	spin_unlock(&pni->lock);
-	spin_unlock(&ni->lock);
 }
 
 static void __tcache_lru_del(struct tcache_pool_nodeinfo *pni,
@@ -301,7 +301,6 @@ static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 	struct tcache_nodeinfo *ni = &tcache_nodeinfo[nid];
 	struct tcache_pool_nodeinfo *pni = &pool->nodeinfo[nid];
 
-	spin_lock(&ni->lock);
 	spin_lock(&pni->lock);
 
 	/* Raced with reclaimer? */
@@ -315,14 +314,15 @@ static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 		pni->recent_gets++;
 
 	if (tcache_check_events(pni)) {
+		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node))
 			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 		__tcache_insert_reclaim_node(ni, pni);
 		update_ni_rb_first(ni);
+		spin_unlock(&ni->lock);
 	}
 out:
 	spin_unlock(&pni->lock);
-	spin_unlock(&ni->lock);
 }
 
 static int tcache_create_pool(void)
@@ -1064,7 +1064,6 @@ tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 	}
 	rcu_read_unlock();
 
-	spin_lock_irq(&ni->lock);
 	spin_lock(&pni->lock);
 	nr_isolated = __tcache_lru_isolate(pni, pages, nr_to_isolate);
 
@@ -1073,17 +1072,19 @@ tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 
 	atomic_long_sub(nr_isolated, &ni->nr_pages);
 
-	if (!RB_EMPTY_NODE(rbn)) {
-		rb_erase(rbn, &ni->reclaim_tree);
-		RB_CLEAR_NODE(rbn);
+	if (!RB_EMPTY_NODE(rbn) || !list_empty(&pni->lru)) {
+		spin_lock(&ni->lock);
+		if (!RB_EMPTY_NODE(rbn))
+			rb_erase(rbn, &ni->reclaim_tree);
+		if (!list_empty(&pni->lru))
+			__tcache_insert_reclaim_node(ni, pni);
+		else
+			RB_CLEAR_NODE(rbn);
+		update_ni_rb_first(ni);
+		spin_unlock(&ni->lock);
 	}
-	if (!list_empty(&pni->lru))
-		__tcache_insert_reclaim_node(ni, pni);
-	update_ni_rb_first(ni);
-
 unlock:
 	spin_unlock(&pni->lock);
-	spin_unlock_irq(&ni->lock);
 	tcache_put_pool(pni->pool);
 out:
 	return nr_isolated;



More information about the Devel mailing list