[Devel] [PATCH RHEL7 COMMIT] asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather

Vasily Averin vvs at virtuozzo.com
Tue Jul 21 17:58:38 MSK 2020


The commit is pushed to "branch-rh7-3.10.0-1127.10.1.vz7.162.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1127.10.1.vz7.162.13
------>
commit ec73eb60720c3c92f3f6388a1b1988b895c1e53f
Author: Peter Zijlstra <peterz at infradead.org>
Date:   Tue Jul 21 17:58:38 2020 +0300

    asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather
    
    Some architectures require different TLB invalidation instructions
    depending on whether it is only the last-level of page table being
    changed, or whether there are also changes to the intermediate
    (directory) entries higher up the tree.
    
    Add a new bit to the flags bitfield in struct mmu_gather so that the
    architecture code can operate accordingly if it's the intermediate
    levels being invalidated.
    
    Acked-by: Nicholas Piggin <npiggin at gmail.com>
    Signed-off-by: Peter Zijlstra <peterz at infradead.org>
    Signed-off-by: Will Deacon <will.deacon at arm.com>
    
    https://jira.sw.ru/browse/PSBM-101300
    (cherry picked from commit 22a61c3c4f1379ef8b0ce0d5cb78baf3178950e2)
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 include/asm-generic/tlb.h | 26 ++++++++++++++++++++------
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 9b1e2af99ddfe..bb8fd22ec20b3 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -96,12 +96,22 @@ struct mmu_gather {
 #endif
 	unsigned long		start;
 	unsigned long		end;
-	/* we are in the middle of an operation to clear
-	 * a full mm and can make some optimizations */
-	unsigned int		fullmm : 1,
-	/* we have performed an operation which
-	 * requires a complete flush of the tlb */
-				need_flush_all : 1;
+	/*
+	 * we are in the middle of an operation to clear
+	 * a full mm and can make some optimizations
+	 */
+	unsigned int		fullmm : 1;
+
+	/*
+	 * we have performed an operation which
+	 * requires a complete flush of the tlb
+	 */
+	unsigned int		need_flush_all : 1;
+
+	/*
+	 * we have removed page directories
+	 */
+	unsigned int		freed_tables : 1;
 
 	struct mmu_gather_batch *active;
 	struct mmu_gather_batch	local;
@@ -140,6 +150,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
 {
 	tlb->start = TASK_SIZE;
 	tlb->end = 0;
+	tlb->freed_tables = 0;
 }
 
 /*
@@ -211,6 +222,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
 #define pte_free_tlb(tlb, ptep, address)			\
 	do {							\
 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;			\
 		__pte_free_tlb(tlb, ptep, address);		\
 	} while (0)
 
@@ -218,6 +230,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
 #define pud_free_tlb(tlb, pudp, address)			\
 	do {							\
 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;			\
 		__pud_free_tlb(tlb, pudp, address);		\
 	} while (0)
 #endif
@@ -225,6 +238,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
 #define pmd_free_tlb(tlb, pmdp, address)			\
 	do {							\
 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;
 		__pmd_free_tlb(tlb, pmdp, address);		\
 	} while (0)
 


More information about the Devel mailing list