[Devel] [PATCH RHEL7 COMMIT] mm/page_alloc: use sched_clock() instead of jiffies to measure latency

Konstantin Khorenko khorenko at virtuozzo.com
Tue Feb 26 13:15:06 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.1.3.vz7.83.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.1.3.vz7.83.14
------>
commit 99407f6d6f504d00aa3cd3ca87782ab32b0ec364
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Tue Feb 26 13:15:03 2019 +0300

    mm/page_alloc: use sched_clock() instead of jiffies to measure latency
    
    sched_clock() (which is rdtsc() on x86) gives us more precise result
    than jiffies.
    
    Q: Why do we need greater accuracy?
    A: Because if we target to, say, 10000 IOPS (per cpu) then
       1 ms memory allocation latency is too much and we need
       to achieve less alloc latency and thus measure it.
    
    https://pmc.acronis.com/browse/VSTOR-19040
    
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/page_alloc.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 74395116344b..362c2a2235c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3468,9 +3468,11 @@ static void __alloc_collect_stats(gfp_t gfp_mask, unsigned int order,
 {
 #ifdef CONFIG_VE
 	unsigned long flags;
+	u64 current_clock, delta;
 	int ind, cpu;
 
-	time = jiffies_to_usecs(jiffies - time) * 1000;
+	current_clock = sched_clock();
+	delta = current_clock - time;
 	if (!(gfp_mask & __GFP_WAIT)) {
 		if (in_task())
 			ind = KSTAT_ALLOCSTAT_ATOMIC;
@@ -3485,12 +3487,12 @@ static void __alloc_collect_stats(gfp_t gfp_mask, unsigned int order,
 
 	local_irq_save(flags);
 	cpu = smp_processor_id();
-	KSTAT_LAT_PCPU_ADD(&kstat_glob.alloc_lat[ind], time);
+	KSTAT_LAT_PCPU_ADD(&kstat_glob.alloc_lat[ind], delta);
 
 	if (in_task()) {
-		current->alloc_lat[ind].totlat += time;
+		current->alloc_lat[ind].totlat += delta;
 		current->alloc_lat[ind].count++;
-		update_maxlat(&current->alloc_lat[ind], time, jiffies);
+		update_maxlat(&current->alloc_lat[ind], delta, current_clock);
 	}
 
 	if (!page)
@@ -3545,7 +3547,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 	int migratetype = allocflags_to_migratetype(gfp_mask);
 	unsigned int cpuset_mems_cookie;
 	int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
-	cycles_t start;
+	u64 start;
 
 	gfp_mask &= gfp_allowed_mask;
 
@@ -3583,7 +3585,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 		alloc_flags |= ALLOC_CMA;
 #endif
 retry:
-	start = jiffies;
+	start = sched_clock();
 	/* First allocation attempt */
 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
 			zonelist, high_zoneidx, alloc_flags,



More information about the Devel mailing list