[Devel] [PATCH RHEL8 COMMIT] core: Add glob_kstat, percpu kstat and account mm stat

Konstantin Khorenko khorenko at virtuozzo.com
Thu May 6 00:55:02 MSK 2021


The commit is pushed to "branch-rh8-4.18.0-240.1.1.vz8.5.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-240.1.1.vz8.5.26
------>
commit 928833c25f22e74a06db121277929b6797c65fdc
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date:   Thu May 6 00:55:02 2021 +0300

    core: Add glob_kstat, percpu kstat and account mm stat
    
    Adds latency calculation for:
      kstat_glob.swap_in
      kstat_glob.page_in
      kstat_glob.alloc_lat
    And fail count in:
      kstat_glob.alloc_fails
    
    Also incorporates fixups patches:
      kstat: Make kstat_glob::swap_in percpu - core part
      ve/mm/kstat: Port diff-ve-kstat-disable-interrupts-around-seqcount-write-lock
    
    Related buglinks:
    https://jira.sw.ru/browse/PCLIN-31259
    https://jira.sw.ru/browse/PSBM-33650
    
    Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
    Signed-off-by: Konstantin Khlebnikov <khlebnikov at openvz.org>
    Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
    
    Rebase to vz8:
    
    The commit [1] is trying to reimplement swap_in part of this patch, but
    loses "goto out" hunk added in vz7.150.1 on rebase, bring the hunk back.
    
    Note: On rebase I would prefere merging [1] to this patch instead of
    merging this to [1].
    
    Add vzstat.h where needed and replace __GFP_WAIT with it's successor
    __GFP_RECLAIM, skip kstat_init as it is already there.
    
    https://jira.sw.ru/browse/PSBM-127780
    Fixes: c30a7bebb0e73 ("kstat: Make kstat_glob::swap_in percpu") [1]
    (cherry-picked from vz7 commit 9caa91f6a857 ("core: Add glob_kstat, percpu kstat
    and account mm stat"))
    
    Signed-off-by: Pavel Tikhomirov <ptikhomirov at virtuozzo.com>
---
 mm/memory.c     |  8 +++++++-
 mm/page_alloc.c | 34 ++++++++++++++++++++++++++++++++++
 2 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/mm/memory.c b/mm/memory.c
index 3a483796dea4..9ee4c80bcd0c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3244,7 +3244,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 		unlock_page(swapcache);
 		put_page(swapcache);
 	}
-	return ret;
+	goto out;
 }
 
 /*
@@ -3371,6 +3371,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret;
+	cycles_t start;
 
 	/*
 	 * Preallocate pte before we take page_lock because this might lead to
@@ -3394,6 +3395,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
 		smp_wmb(); /* See comment in __pte_alloc() */
 	}
 
+	start = get_cycles();
 	ret = vma->vm_ops->fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
 			    VM_FAULT_DONE_COW)))
@@ -3412,6 +3414,10 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
 	else
 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
+	local_irq_disable();
+	KSTAT_LAT_PCPU_ADD(&kstat_glob.page_in, get_cycles() - start);
+	local_irq_enable();
+
 	return ret;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fd04c836d7e1..28f06ac65096 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -71,6 +71,7 @@
 #include <linux/nmi.h>
 #include <linux/psi.h>
 #include <linux/padata.h>
+#include <linux/vzstat.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -4516,6 +4517,36 @@ static __always_inline void warn_high_order(int order, gfp_t gfp_mask)
 	}
 }
 
+static void __alloc_collect_stats(gfp_t gfp_mask, unsigned int order,
+		struct page *page, u64 time)
+{
+#ifdef CONFIG_VE
+	unsigned long flags;
+	int ind, cpu;
+
+	time = jiffies_to_usecs(jiffies - time) * 1000;
+	if (!(gfp_mask & __GFP_RECLAIM))
+		ind = KSTAT_ALLOCSTAT_ATOMIC;
+	else if (!(gfp_mask & __GFP_HIGHMEM))
+		if (order > 0)
+			ind = KSTAT_ALLOCSTAT_LOW_MP;
+		else
+			ind = KSTAT_ALLOCSTAT_LOW;
+	else
+		if (order > 0)
+			ind = KSTAT_ALLOCSTAT_HIGH_MP;
+		else
+			ind = KSTAT_ALLOCSTAT_HIGH;
+
+	local_irq_save(flags);
+	cpu = smp_processor_id();
+	KSTAT_LAT_PCPU_ADD(&kstat_glob.alloc_lat[ind], time);
+	if (!page)
+		kstat_glob.alloc_fails[cpu][ind]++;
+	local_irq_restore(flags);
+#endif
+}
+
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
@@ -4527,6 +4558,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
 	struct alloc_context ac = { };
+	cycles_t start;
 
 	gfp_mask &= gfp_allowed_mask;
 	alloc_mask = gfp_mask;
@@ -4538,6 +4570,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 
 	finalise_ac(gfp_mask, &ac);
 
+	start = jiffies;
 	/* First allocation attempt */
 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
 	if (likely(page))
@@ -4568,6 +4601,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 		page = NULL;
 	}
 
+	__alloc_collect_stats(alloc_mask, order, page, start);
 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
 
 	return page;


More information about the Devel mailing list