[Devel] [PATCH RHEL8 COMMIT] ms/asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather
Konstantin Khorenko
khorenko at virtuozzo.com
Mon Jun 21 20:01:19 MSK 2021
The commit is pushed to "branch-rh8-4.18.0-240.1.1.vz8.5.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-240.1.1.vz8.5.47
------>
commit 61853005887350303d40f2d8468905114d449066
Author: Peter Zijlstra <peterz at infradead.org>
Date: Mon Jun 21 20:01:19 2021 +0300
ms/asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather
Some architectures require different TLB invalidation instructions
depending on whether it is only the last-level of page table being
changed, or whether there are also changes to the intermediate
(directory) entries higher up the tree.
Add a new bit to the flags bitfield in struct mmu_gather so that the
architecture code can operate accordingly if it's the intermediate
levels being invalidated.
Acked-by: Nicholas Piggin <npiggin at gmail.com>
Signed-off-by: Peter Zijlstra <peterz at infradead.org>
Signed-off-by: Will Deacon <will.deacon at arm.com>
https://jira.sw.ru/browse/PSBM-101300
(cherry picked from ms commit 22a61c3c4f1379ef8b0ce0d5cb78baf3178950e2)
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
(cherry picked from vz7 commit ec73eb60720c3c92f3f6388a1b1988b895c1e53f)
https://jira.sw.ru/browse/PSBM-127854
Signed-off-by: Valeriy Vdovin <valeriy.vdovin at virtuozzo.com>
---
include/asm-generic/tlb.h | 26 ++++++++++++++++++++------
1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 81db3c11bb11..ef44ee9c8b42 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -108,13 +108,22 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- /* we are in the middle of an operation to clear
- * a full mm and can make some optimizations */
- unsigned int fullmm : 1,
- /* we have performed an operation which
- * requires a complete flush of the tlb */
- need_flush_all : 1;
+ /*
+ * we are in the middle of an operation to clear
+ * a full mm and can make some optimizations
+ */
+ unsigned int fullmm : 1;
+ /*
+ * we have performed an operation which
+ * requires a complete flush of the tlb
+ */
+ unsigned int need_flush_all : 1;
+
+ /*
+ * we have removed page directories
+ */
+ unsigned int freed_tables : 1;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
@@ -148,6 +157,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->start = TASK_SIZE;
tlb->end = 0;
}
+ tlb->freed_tables = 0;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -289,6 +299,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -297,6 +308,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -306,6 +318,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -316,6 +329,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
More information about the Devel
mailing list