[Devel] [PATCH RHEL7 COMMIT] tswap: use global lock to protect lru lists

Konstantin Khorenko khorenko at virtuozzo.com
Mon Jun 22 02:38:33 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-123.1.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-123.1.2.vz7.5.15
------>
commit e770171abd9e77f9838a450d1f7deea81b632aef
Author: Vladimir Davydov <vdavydov at parallels.com>
Date:   Mon Jun 22 13:38:32 2015 +0400

    tswap: use global lock to protect lru lists
    
    Patchset description:
    
    Patches 1 and 2 fix memory corruption caused by tswap:
      https://jira.sw.ru/browse/PSBM-34269
    Patch 5 fixes pinning a page in tswap.
    Patch 7 fixes indefinitely long stalls during tswap reclaim.
    Patch 3, 4, and 6 do cleanup.
    
    Vladimir Davydov (6):
      tswap: enable exclusive gets
      tswap: do not allocate a page on store if there is already one
      tswap: introduce tswap_delete_page helper
      tswap: shrink tswap page if swapcache page is uptodate
      tswap: use global lock to protect lru lists
      tswap: do not writeback pages on reclaim
    
    Weijie Yang (1):
      mm: frontswap: invalidate expired data on a dup-store failure
    
    ###############################################################
    This patch description:
    
    Currently, they are protected by per node locks. However, there is no
    much point in that, because we contend for the global lock anyway on any
    load/store/lookup/shrink. So switch to the global lock everywhere for
    simplicity.
    
    Also, remove the check if a page is on the list from tswap_lru_del - it
    is useless now, because the shrinker does not remove a page from the lru
    while leaving it in the tree any more.
    
    Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 mm/tswap.c | 36 ++++++++++++++----------------------
 1 file changed, 14 insertions(+), 22 deletions(-)

diff --git a/mm/tswap.c b/mm/tswap.c
index e4b27df..4b8cef9 100644
--- a/mm/tswap.c
+++ b/mm/tswap.c
@@ -19,7 +19,6 @@ static RADIX_TREE(tswap_page_tree, GFP_ATOMIC | __GFP_NOWARN);
 static DEFINE_SPINLOCK(tswap_lock);
 
 struct tswap_lru {
-	spinlock_t lock;
 	struct list_head list;
 	unsigned long nr_items;
 } ____cacheline_aligned_in_smp;
@@ -42,22 +41,16 @@ static void tswap_lru_add(struct page *page)
 {
 	struct tswap_lru *lru = &tswap_lru_node[page_to_nid(page)];
 
-	spin_lock(&lru->lock);
 	list_add_tail(&page->lru, &lru->list);
 	lru->nr_items++;
-	spin_unlock(&lru->lock);
 }
 
 static void tswap_lru_del(struct page *page)
 {
 	struct tswap_lru *lru = &tswap_lru_node[page_to_nid(page)];
 
-	spin_lock(&lru->lock);
-	if (!list_empty(&page->lru)) {
-		list_del_init(&page->lru);
-		lru->nr_items--;
-	}
-	spin_unlock(&lru->lock);
+	list_del(&page->lru);
+	lru->nr_items--;
 }
 
 static struct page *tswap_lookup_page(swp_entry_t entry)
@@ -78,11 +71,11 @@ static int tswap_insert_page(swp_entry_t entry, struct page *page)
 	set_page_private(page, entry.val);
 	spin_lock(&tswap_lock);
 	err = radix_tree_insert(&tswap_page_tree, entry.val, page);
-	if (!err)
+	if (!err) {
+		tswap_lru_add(page);
 		tswap_nr_pages++;
+	}
 	spin_unlock(&tswap_lock);
-	if (!err)
-		tswap_lru_add(page);
 	return err;
 }
 
@@ -92,13 +85,14 @@ static struct page *tswap_delete_page(swp_entry_t entry, struct page *expected)
 
 	spin_lock(&tswap_lock);
 	page = radix_tree_delete_item(&tswap_page_tree, entry.val, expected);
-	if (page)
+	if (page) {
+		tswap_lru_del(page);
 		tswap_nr_pages--;
+	}
 	spin_unlock(&tswap_lock);
 	if (page) {
 		BUG_ON(expected && page != expected);
 		BUG_ON(page_private(page) != entry.val);
-		tswap_lru_del(page);
 	}
 	return page;
 }
@@ -223,7 +217,7 @@ static unsigned long tswap_shrink_scan(struct shrinker *shrink,
 	struct tswap_lru *lru = &tswap_lru_node[sc->nid];
 	unsigned long nr_reclaimed = 0;
 
-	spin_lock(&lru->lock);
+	spin_lock(&tswap_lock);
 	while (sc->nr_to_scan-- > 0) {
 		struct page *page;
 
@@ -235,11 +229,11 @@ static unsigned long tswap_shrink_scan(struct shrinker *shrink,
 		 * other reclaiming threads */
 		if (!trylock_page(page)) {
 			list_move_tail(&page->lru, &lru->list);
-			cond_resched_lock(&lru->lock);
+			cond_resched_lock(&tswap_lock);
 			continue;
 		}
 		get_page(page);
-		spin_unlock(&lru->lock);
+		spin_unlock(&tswap_lock);
 
 		if (tswap_writeback_page(page) == 0)
 			nr_reclaimed++;
@@ -247,9 +241,9 @@ static unsigned long tswap_shrink_scan(struct shrinker *shrink,
 		put_page(page);
 
 		cond_resched();
-		spin_lock(&lru->lock);
+		spin_lock(&tswap_lock);
 	}
-	spin_unlock(&lru->lock);
+	spin_unlock(&tswap_lock);
 
 	return nr_reclaimed;
 }
@@ -351,10 +345,8 @@ static int __init tswap_lru_init(void)
 	if (!tswap_lru_node)
 		return -ENOMEM;
 
-	for (i = 0; i < nr_node_ids; i++) {
-		spin_lock_init(&tswap_lru_node[i].lock);
+	for (i = 0; i < nr_node_ids; i++)
 		INIT_LIST_HEAD(&tswap_lru_node[i].list);
-	}
 	return 0;
 }
 



More information about the Devel mailing list