[Devel] [PATCH rh7 3/7] asm-generic/tlb: Track which levels of the page tables have been cleared
Andrey Ryabinin
aryabinin at virtuozzo.com
Mon Jul 13 16:09:34 MSK 2020
From: Will Deacon <will.deacon at arm.com>
It is common for architectures with hugepage support to require only a
single TLB invalidation operation per hugepage during unmap(), rather than
iterating through the mapping at a PAGE_SIZE increment. Currently,
however, the level in the page table where the unmap() operation occurs
is not stored in the mmu_gather structure, therefore forcing
architectures to issue additional TLB invalidation operations or to give
up and over-invalidate by e.g. invalidating the entire TLB.
Ideally, we could add an interval rbtree to the mmu_gather structure,
which would allow us to associate the correct mapping granule with the
various sub-mappings within the range being invalidated. However, this
is costly in terms of book-keeping and memory management, so instead we
approximate by keeping track of the page table levels that are cleared
and provide a means to query the smallest granule required for invalidation.
Acked-by: Peter Zijlstra (Intel) <peterz at infradead.org>
Acked-by: Nicholas Piggin <npiggin at gmail.com>
Signed-off-by: Will Deacon <will.deacon at arm.com>
https://jira.sw.ru/browse/PSBM-101300
(cherry picked from commit a6d60245d6d9b1caf66b0d94419988c4836980af)
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
include/asm-generic/tlb.h | 41 ++++++++++++++++++++++++++++++++++++---
mm/memory.c | 4 +++-
2 files changed, 41 insertions(+), 4 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index bb8fd22ec20b..e667f8e557d0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -113,6 +113,14 @@ struct mmu_gather {
*/
unsigned int freed_tables : 1;
+ /*
+ * at which levels have we cleared entries?
+ */
+ unsigned int cleared_ptes : 1;
+ unsigned int cleared_pmds : 1;
+ unsigned int cleared_puds : 1;
+ unsigned int cleared_p4ds : 1;
+
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
@@ -151,6 +159,27 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->start = TASK_SIZE;
tlb->end = 0;
tlb->freed_tables = 0;
+ tlb->cleared_ptes = 0;
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
+}
+
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+ if (tlb->cleared_ptes)
+ return PAGE_SHIFT;
+ if (tlb->cleared_pmds)
+ return PMD_SHIFT;
+ if (tlb->cleared_puds)
+ return PUD_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+ return 1UL << tlb_get_unmap_shift(tlb);
}
/*
@@ -188,6 +217,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->cleared_ptes = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -202,6 +232,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ tlb->cleared_pmds = 1; \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -216,13 +247,15 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->cleared_puds = 1; \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1; \
+ tlb->freed_tables = 1; \
+ tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
@@ -230,7 +263,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1; \
+ tlb->freed_tables = 1; \
+ tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -238,7 +272,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->freed_tables = 1;
+ tlb->freed_tables = 1; \
+ tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
diff --git a/mm/memory.c b/mm/memory.c
index 0ec7b1ffd434..f4874a2b8be8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -288,8 +288,10 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
{
struct mmu_gather_batch *batch, *next;
- if (force)
+ if (force) {
+ __tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
+ }
tlb_flush_mmu(tlb);
--
2.26.2
More information about the Devel
mailing list