[Devel] [PATCH RHEL7 COMMIT] kstat: Make kstat_glob::swap_in percpu
Konstantin Khorenko
khorenko at virtuozzo.com
Wed Dec 20 12:00:08 MSK 2017
The commit is pushed to "branch-rh7-3.10.0-693.11.1.vz7.39.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-693.11.1.vz7.39.8
------>
commit ed033a381e01996f7f8061d9838d1c9ec6b38d96
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date: Wed Dec 20 12:00:07 2017 +0300
kstat: Make kstat_glob::swap_in percpu
Patchset description:
Make kstat_glob::swap_in percpu and cleanup
This patchset continues escaping of kstat_glb_lock
and makes swap_in percpu. Also, newly unused primitives
are dropped and reduced memory usage by using percpu
seqcount (instead of separate percpu seqcount for every
kstat percpu variable).
Kirill Tkhai (4):
kstat: Make kstat_glob::swap_in percpu
kstat: Drop global kstat_lat_struct
kstat: Drop cpu argument in KSTAT_LAT_PCPU_ADD()
kstat: Make global percpu kstat_pcpu_seq instead of percpu seq for every
variable
==========================================
This patch description:
Using of global local is not good for scalability.
Better we make swap_in percpu, and it will be updated
lockless like other statistics (e.g., page_in).
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
include/linux/vzstat.h | 2 +-
kernel/sched/core.c | 2 ++
kernel/ve/vzstat.c | 2 +-
mm/memory.c | 6 +++---
4 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/include/linux/vzstat.h b/include/linux/vzstat.h
index 8220f99fb657..5050bc194505 100644
--- a/include/linux/vzstat.h
+++ b/include/linux/vzstat.h
@@ -80,7 +80,7 @@ struct kernel_stat_glob {
struct kstat_lat_pcpu_struct alloc_lat[KSTAT_ALLOCSTAT_NR];
struct kstat_lat_pcpu_struct sched_lat;
struct kstat_lat_pcpu_struct page_in;
- struct kstat_lat_struct swap_in;
+ struct kstat_lat_pcpu_struct swap_in;
struct kstat_perf_pcpu_struct ttfp, cache_reap,
refill_inact, shrink_icache, shrink_dcache;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4764ba9ca7e1..8418db07251d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -143,6 +143,7 @@ EXPORT_SYMBOL(kstat_glb_lock);
static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_lat);
static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_page_in);
+static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_swap_in);
static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, alloc_kstat_lat[KSTAT_ALLOCSTAT_NR]);
static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_ttfp);
@@ -158,6 +159,7 @@ void __init kstat_init(void)
seqcount_init(&kstat_glob.nr_unint_avg_seq);
kstat_glob.sched_lat.cur = &glob_kstat_lat;
kstat_glob.page_in.cur = &glob_kstat_page_in;
+ kstat_glob.swap_in.cur = &glob_kstat_swap_in;
for ( i = 0 ; i < KSTAT_ALLOCSTAT_NR ; i++)
kstat_glob.alloc_lat[i].cur = &alloc_kstat_lat[i];
diff --git a/kernel/ve/vzstat.c b/kernel/ve/vzstat.c
index 319625fedd58..69cbb38210c0 100644
--- a/kernel/ve/vzstat.c
+++ b/kernel/ve/vzstat.c
@@ -151,7 +151,7 @@ static void update_alloc_latency(void)
spin_lock_irq(&kstat_glb_lock);
for (i = 0; i < KSTAT_ALLOCSTAT_NR; i++)
KSTAT_LAT_PCPU_UPDATE(&kstat_glob.alloc_lat[i]);
- KSTAT_LAT_UPDATE(&kstat_glob.swap_in);
+ KSTAT_LAT_PCPU_UPDATE(&kstat_glob.swap_in);
KSTAT_LAT_PCPU_UPDATE(&kstat_glob.page_in);
spin_unlock_irq(&kstat_glb_lock);
}
diff --git a/mm/memory.c b/mm/memory.c
index b1c6968f1746..13e9dc577dbd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2672,9 +2672,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock:
pte_unmap_unlock(page_table, ptl);
out:
- spin_lock_irq(&kstat_glb_lock);
- KSTAT_LAT_ADD(&kstat_glob.swap_in, get_cycles() - start);
- spin_unlock_irq(&kstat_glb_lock);
+ local_irq_disable();
+ KSTAT_LAT_PCPU_ADD(&kstat_glob.swap_in, smp_processor_id(), get_cycles() - start);
+ local_irq_enable();
return ret;
out_nomap:
More information about the Devel
mailing list