[Devel] [PATCH RH9 17/28] mm: Add and use batched version of __tlb_remove_table()

Andrey Zhadchenko andrey.zhadchenko at virtuozzo.com
Thu Oct 14 13:33:25 MSK 2021


From: Andrey Ryabinin <aryabinin at virtuozzo.com>

tlb_remove_table_rcu() removes tables on by one using the
__tlb_remove_table() -> free_page_and_swap_cache(table). Use batched
free_pages_and_swap_cache_nodrain() instead to remove all tables in
one go. This helps to remove contention on the memcgroups counters
since we decrease them only once instead of decrementing one by one
for each page individually.

https://jira.sw.ru/browse/PSBM-101300
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>

(cherry-picked from vz7 commit 9a3ca2497cdb ("mm: Add and use batched version of
__tlb_remove_table()"))

https://jira.sw.ru/browse/PSBM-127854
Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>

(cherry picked from vz8 commit 006557b2c3f6a6197fc480fea3155fe2f6856187)
Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
---
 arch/x86/include/asm/tlb.h |  5 +++++
 include/linux/swap.h       |  1 +
 mm/mmu_gather.c            |  6 +-----
 mm/swap_state.c            | 16 ++++++++++++++--
 4 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 1bfe979..f2a21d8 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -37,4 +37,9 @@ static inline void __tlb_remove_table(void *table)
 	free_page_and_swap_cache(table);
 }
 
+static inline void __tlb_remove_tables(void **tables, int nr)
+{
+	free_pages_and_swap_cache_nodrain((struct page **)tables, nr);
+}
+
 #endif /* _ASM_X86_TLB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 68c1863..f712ad1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -454,6 +454,7 @@ extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
 extern void free_swap_cache(struct page *);
 extern void free_page_and_swap_cache(struct page *);
 extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache_nodrain(struct page **, int);
 extern struct page *lookup_swap_cache(swp_entry_t entry,
 				      struct vm_area_struct *vma,
 				      unsigned long addr);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 1b98374..2faa0d59 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -95,11 +95,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
 
 static void __tlb_remove_table_free(struct mmu_table_batch *batch)
 {
-	int i;
-
-	for (i = 0; i < batch->nr; i++)
-		__tlb_remove_table(batch->tables[i]);
-
+	__tlb_remove_tables(batch->tables, batch->nr);
 	free_page((unsigned long)batch);
 }
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7e3104d..52df32e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -305,17 +305,29 @@ void free_page_and_swap_cache(struct page *page)
  * Passed an array of pages, drop them all from swapcache and then release
  * them.  They are removed from the LRU and freed if this is their last use.
  */
-void free_pages_and_swap_cache(struct page **pages, int nr)
+void __free_pages_and_swap_cache(struct page **pages, int nr, bool drain)
 {
 	struct page **pagep = pages;
 	int i;
 
-	lru_add_drain();
+	if (drain)
+		lru_add_drain();
+
 	for (i = 0; i < nr; i++)
 		free_swap_cache(pagep[i]);
 	release_pages(pagep, nr);
 }
 
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+	__free_pages_and_swap_cache(pages, nr, true);
+}
+
+void free_pages_and_swap_cache_nodrain(struct page **pages, int nr)
+{
+	__free_pages_and_swap_cache(pages, nr, false);
+}
+
 static inline bool swap_use_vma_readahead(void)
 {
 	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
-- 
1.8.3.1



More information about the Devel mailing list