[Devel] [PATCH RHEL7 COMMIT] mm: Add and use batched version of __tlb_remove_table()

Vasily Averin vvs at virtuozzo.com
Tue Jul 21 17:59:36 MSK 2020


The commit is pushed to "branch-rh7-3.10.0-1127.10.1.vz7.162.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1127.10.1.vz7.162.13
------>
commit 9a3ca2497cdbf4c80f822b2076e8b707b1297b22
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Tue Jul 21 17:59:36 2020 +0300

    mm: Add and use batched version of __tlb_remove_table()
    
    tlb_remove_table_rcu() removes tables on by one using the
    __tlb_remove_table() -> free_page_and_swap_cache(table). Use batched
    free_pages_and_swap_cache_nodrain() instead to remove all tables in
    one go. This helps to remove contention on the memcgroups counters
    since we decrease them only once instead of decrementing one by one
    for each page individually.
    
    https://jira.sw.ru/browse/PSBM-101300
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 arch/x86/include/asm/tlb.h |  5 +++++
 include/linux/swap.h       |  1 +
 mm/memory.c                |  4 +---
 mm/swap_state.c            | 16 ++++++++++++++--
 4 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 79a4ca6a96063..64924a1bb1469 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -29,4 +29,9 @@ static inline void __tlb_remove_table(void *table)
 	free_page_and_swap_cache(table);
 }
 
+static inline void __tlb_remove_tables(void **tables, int nr)
+{
+	free_pages_and_swap_cache_nodrain((struct page **)tables, nr);
+}
+
 #endif /* _ASM_X86_TLB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9c48d29a6e054..c07cabc814e65 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -481,6 +481,7 @@ extern void __delete_from_swap_cache(struct page *);
 extern void delete_from_swap_cache(struct page *);
 extern void free_page_and_swap_cache(struct page *);
 extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache_nodrain(struct page **, int);
 extern struct page *lookup_swap_cache(swp_entry_t entry,
 				      struct vm_area_struct *vma,
 				      unsigned long addr);
diff --git a/mm/memory.c b/mm/memory.c
index 4370dd4008220..1f43e46a1f93c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -379,12 +379,10 @@ static void tlb_remove_table_one(void *table)
 static void tlb_remove_table_rcu(struct rcu_head *head)
 {
 	struct mmu_table_batch *batch;
-	int i;
 
 	batch = container_of(head, struct mmu_table_batch, rcu);
 
-	for (i = 0; i < batch->nr; i++)
-		__tlb_remove_table(batch->tables[i]);
+	__tlb_remove_tables(batch->tables, batch->nr);
 
 	free_page((unsigned long)batch);
 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5292312e1bd9d..58af0af81cc1d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -295,11 +295,13 @@ void free_page_and_swap_cache(struct page *page)
  * Passed an array of pages, drop them all from swapcache and then release
  * them.  They are removed from the LRU and freed if this is their last use.
  */
-void free_pages_and_swap_cache(struct page **pages, int nr)
+static void __free_pages_and_swap_cache(struct page **pages, int nr, bool drain)
 {
 	struct page **pagep = pages;
 
-	lru_add_drain();
+	if (drain)
+		lru_add_drain();
+
 	while (nr) {
 		int todo = min(nr, PAGEVEC_SIZE);
 		int i;
@@ -319,6 +321,16 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
 	}
 }
 
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+	__free_pages_and_swap_cache(pages, nr, true);
+}
+
+void free_pages_and_swap_cache_nodrain(struct page **pages, int nr)
+{
+	__free_pages_and_swap_cache(pages, nr, false);
+}
+
 /*
  * Lookup a swap entry in the swap cache. A found page will be returned
  * unlocked and with its refcount incremented - we rely on the kernel


More information about the Devel mailing list