[Devel] [PATCH RHEL9 COMMIT] mm: Add and use batched version of __tlb_remove_table()
Konstantin Khorenko
khorenko at virtuozzo.com
Wed Oct 20 11:40:43 MSK 2021
The commit is pushed to "branch-rh9-5.14.vz9.1.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh9-5.14.0-4.vz9.10.12
------>
commit ab337b41675eec3fe7e97f1efd155677d77d7890
Author: Andrey Ryabinin <ryabinin.a.a at gmail.com>
Date: Wed Oct 20 11:40:43 2021 +0300
mm: Add and use batched version of __tlb_remove_table()
tlb_remove_table_rcu() removes tables on by one using the
__tlb_remove_table() -> free_page_and_swap_cache(table). Use batched
free_pages_and_swap_cache_nodrain() instead to remove all tables in
one go. This helps to remove contention on the memcgroups counters
since we decrease them only once instead of decrementing one by one
for each page individually.
https://jira.sw.ru/browse/PSBM-101300
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
(cherry-picked from vz7 commit 9a3ca2497cdb ("mm: Add and use batched version of
__tlb_remove_table()"))
https://jira.sw.ru/browse/PSBM-127854
Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
(cherry picked from vz8 commit 006557b2c3f6a6197fc480fea3155fe2f6856187)
Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
---
arch/x86/include/asm/tlb.h | 5 +++++
include/linux/swap.h | 1 +
mm/mmu_gather.c | 6 +-----
mm/swap_state.c | 16 ++++++++++++++--
4 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 1bfe979bb9bc..f2a21d87e0e8 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -37,4 +37,9 @@ static inline void __tlb_remove_table(void *table)
free_page_and_swap_cache(table);
}
+static inline void __tlb_remove_tables(void **tables, int nr)
+{
+ free_pages_and_swap_cache_nodrain((struct page **)tables, nr);
+}
+
#endif /* _ASM_X86_TLB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 68c186361397..f712ad1f0510 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -454,6 +454,7 @@ extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
extern void free_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache_nodrain(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
struct vm_area_struct *vma,
unsigned long addr);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 1b9837419bf9..2faa0d59aeca 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -95,11 +95,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
static void __tlb_remove_table_free(struct mmu_table_batch *batch)
{
- int i;
-
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
-
+ __tlb_remove_tables(batch->tables, batch->nr);
free_page((unsigned long)batch);
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7e3104d27d4f..52df32e65624 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -305,17 +305,29 @@ void free_page_and_swap_cache(struct page *page)
* Passed an array of pages, drop them all from swapcache and then release
* them. They are removed from the LRU and freed if this is their last use.
*/
-void free_pages_and_swap_cache(struct page **pages, int nr)
+void __free_pages_and_swap_cache(struct page **pages, int nr, bool drain)
{
struct page **pagep = pages;
int i;
- lru_add_drain();
+ if (drain)
+ lru_add_drain();
+
for (i = 0; i < nr; i++)
free_swap_cache(pagep[i]);
release_pages(pagep, nr);
}
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+ __free_pages_and_swap_cache(pages, nr, true);
+}
+
+void free_pages_and_swap_cache_nodrain(struct page **pages, int nr)
+{
+ __free_pages_and_swap_cache(pages, nr, false);
+}
+
static inline bool swap_use_vma_readahead(void)
{
return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
More information about the Devel
mailing list