[Devel] [PATCH 1/4] kstat: Make kstat_glob::swap_in percpu
Kirill Tkhai
ktkhai at virtuozzo.com
Mon Dec 11 18:11:02 MSK 2017
Using of global local is not good for scalability.
Better we make swap_in percpu, and it will be updated
lockless like other statistics (e.g., page_in).
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
include/linux/vzstat.h | 2 +-
kernel/sched/core.c | 2 ++
kernel/ve/vzstat.c | 2 +-
mm/memory.c | 6 +++---
4 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/include/linux/vzstat.h b/include/linux/vzstat.h
index 8220f99fb657..5050bc194505 100644
--- a/include/linux/vzstat.h
+++ b/include/linux/vzstat.h
@@ -80,7 +80,7 @@ struct kernel_stat_glob {
struct kstat_lat_pcpu_struct alloc_lat[KSTAT_ALLOCSTAT_NR];
struct kstat_lat_pcpu_struct sched_lat;
struct kstat_lat_pcpu_struct page_in;
- struct kstat_lat_struct swap_in;
+ struct kstat_lat_pcpu_struct swap_in;
struct kstat_perf_pcpu_struct ttfp, cache_reap,
refill_inact, shrink_icache, shrink_dcache;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4764ba9ca7e1..8418db07251d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -143,6 +143,7 @@ EXPORT_SYMBOL(kstat_glb_lock);
static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_lat);
static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_page_in);
+static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_swap_in);
static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, alloc_kstat_lat[KSTAT_ALLOCSTAT_NR]);
static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_ttfp);
@@ -158,6 +159,7 @@ void __init kstat_init(void)
seqcount_init(&kstat_glob.nr_unint_avg_seq);
kstat_glob.sched_lat.cur = &glob_kstat_lat;
kstat_glob.page_in.cur = &glob_kstat_page_in;
+ kstat_glob.swap_in.cur = &glob_kstat_swap_in;
for ( i = 0 ; i < KSTAT_ALLOCSTAT_NR ; i++)
kstat_glob.alloc_lat[i].cur = &alloc_kstat_lat[i];
diff --git a/kernel/ve/vzstat.c b/kernel/ve/vzstat.c
index 319625fedd58..69cbb38210c0 100644
--- a/kernel/ve/vzstat.c
+++ b/kernel/ve/vzstat.c
@@ -151,7 +151,7 @@ static void update_alloc_latency(void)
spin_lock_irq(&kstat_glb_lock);
for (i = 0; i < KSTAT_ALLOCSTAT_NR; i++)
KSTAT_LAT_PCPU_UPDATE(&kstat_glob.alloc_lat[i]);
- KSTAT_LAT_UPDATE(&kstat_glob.swap_in);
+ KSTAT_LAT_PCPU_UPDATE(&kstat_glob.swap_in);
KSTAT_LAT_PCPU_UPDATE(&kstat_glob.page_in);
spin_unlock_irq(&kstat_glb_lock);
}
diff --git a/mm/memory.c b/mm/memory.c
index b1c6968f1746..13e9dc577dbd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2672,9 +2672,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock:
pte_unmap_unlock(page_table, ptl);
out:
- spin_lock_irq(&kstat_glb_lock);
- KSTAT_LAT_ADD(&kstat_glob.swap_in, get_cycles() - start);
- spin_unlock_irq(&kstat_glb_lock);
+ local_irq_disable();
+ KSTAT_LAT_PCPU_ADD(&kstat_glob.swap_in, smp_processor_id(), get_cycles() - start);
+ local_irq_enable();
return ret;
out_nomap:
More information about the Devel
mailing list