[Devel] [PATCH RHEL7 COMMIT] ms/perf/x86/intel: make reusable LBR initialization code, part 2/2

Konstantin Khorenko khorenko at virtuozzo.com
Thu Oct 3 18:38:14 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.27.2.vz7.107.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.27.2.vz7.107.13
------>
commit e84ff4f253fe3210508002dee835c51179eb67de
Author: Jan Dakinevich <jan.dakinevich at virtuozzo.com>
Date:   Thu Oct 3 18:38:12 2019 +0300

    ms/perf/x86/intel: make reusable LBR initialization code, part 2/2
    
    This patch introduces globally visible intel_pmu_lbr_fill() routine,
    which gathers information which LBR MSRs are support for specific CPU
    family/model.
    
    It is supposed that the routine would be used in KVM code, using guest
    CPU information as an input. By this reason, it should not have any side
    effect which could affect host system.
    
    https://jira.sw.ru/browse/PSBM-75679
    Signed-off-by: Jan Dakinevich <jan.dakinevich at virtuozzo.com>
    
    +++
    perf/x86/intel: fix Intel processors define's naming
    
    Some Intel cpu defines were renamed, so update
    __intel_pmu_lbr_fill() where we use those defines.
    
    mFixes: 746e059d3153 ("ms/perf/x86/intel: make reusable LBR initialization
    code")
    
    https://jira.sw.ru/browse/PSBM-94406
    
    Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
 arch/x86/events/intel/core.c      |  29 +-------
 arch/x86/events/intel/lbr.c       | 137 +++++++++++++++++++++++++++++++++++---
 arch/x86/events/perf_event.h      |  16 +----
 arch/x86/include/asm/perf_event.h |   2 +
 4 files changed, 134 insertions(+), 50 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index c0ce9c6b30b6..094a43249f89 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3848,6 +3848,8 @@ __init int intel_pmu_init(void)
 
 	intel_ds_init();
 
+	intel_pmu_lbr_init();
+
 	x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
 
 	/*
@@ -3867,8 +3869,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 
-		intel_pmu_lbr_init_core(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_core2_event_constraints;
 		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
 		pr_cont("Core2 events, ");
@@ -3883,8 +3883,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_nhm(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_nehalem_event_constraints;
 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
@@ -3916,8 +3914,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 
-		intel_pmu_lbr_init_atom(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_gen_event_constraints;
 		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
 		x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
@@ -3935,8 +3931,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_slm(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_slm_event_constraints;
 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
 		x86_pmu.extra_regs = intel_slm_extra_regs;
@@ -3953,8 +3947,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_skl(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_slm_event_constraints;
 		x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
 		x86_pmu.extra_regs = intel_glm_extra_regs;
@@ -3965,7 +3957,6 @@ __init int intel_pmu_init(void)
 		 */
 		x86_pmu.pebs_aliases = NULL;
 		x86_pmu.pebs_prec_dist = true;
-		x86_pmu.lbr.pt_coexist = true;
 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
 		x86_pmu.cpu_events = glm_events_attrs;
 		extra_attr = slm_format_attr;
@@ -3979,8 +3970,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_skl(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_slm_event_constraints;
 		x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
 		x86_pmu.extra_regs = intel_glm_extra_regs;
@@ -3990,7 +3979,6 @@ __init int intel_pmu_init(void)
 		 */
 		x86_pmu.pebs_aliases = NULL;
 		x86_pmu.pebs_prec_dist = true;
-		x86_pmu.lbr.pt_coexist = true;
 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
 		x86_pmu.get_event_constraints = glp_get_event_constraints;
 		x86_pmu.cpu_events = glm_events_attrs;
@@ -4009,8 +3997,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_nhm(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_westmere_event_constraints;
 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
 		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
@@ -4041,8 +4027,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_snb(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_snb_event_constraints;
 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
 		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
@@ -4082,8 +4066,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_snb(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_ivb_event_constraints;
 		x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
 		x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
@@ -4118,8 +4100,6 @@ __init int intel_pmu_init(void)
 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_hsw(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_hsw_event_constraints;
 		x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
 		x86_pmu.extra_regs = intel_snbep_extra_regs;
@@ -4132,7 +4112,6 @@ __init int intel_pmu_init(void)
 		x86_pmu.hw_config = hsw_hw_config;
 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
 		x86_pmu.cpu_events = get_hsw_events_attrs();
-		x86_pmu.lbr.double_abort = true;
 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
 			hsw_format_attr : nhm_format_attr;
 		pr_cont("Haswell events, ");
@@ -4157,8 +4136,6 @@ __init int intel_pmu_init(void)
 		hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
 									      BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
 
-		intel_pmu_lbr_init_hsw(&x86_pmu.lbr);
-
 		x86_pmu.event_constraints = intel_bdw_event_constraints;
 		x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
 		x86_pmu.extra_regs = intel_snbep_extra_regs;
@@ -4184,7 +4161,6 @@ __init int intel_pmu_init(void)
 		       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs,
 		       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
-		intel_pmu_lbr_init_knl(&x86_pmu.lbr);
 
 		x86_pmu.event_constraints = intel_slm_event_constraints;
 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
@@ -4206,7 +4182,6 @@ __init int intel_pmu_init(void)
 		x86_pmu.late_ack = true;
 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
-		intel_pmu_lbr_init_skl(&x86_pmu.lbr);
 
 		x86_pmu.event_constraints = intel_skl_event_constraints;
 		x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index ac08ac962b2f..3aed06f21fb6 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -4,6 +4,7 @@
 #include <asm/perf_event.h>
 #include <asm/msr.h>
 #include <asm/insn.h>
+#include <asm/intel-family.h>
 
 #include "../perf_event.h"
 
@@ -990,7 +991,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
 };
 
 /* core */
-void __init intel_pmu_lbr_init_core(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_core(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr     = 4;
 	lbr->tos    = MSR_LBR_TOS;
@@ -1004,7 +1005,7 @@ void __init intel_pmu_lbr_init_core(struct x86_pmu_lbr *lbr)
 }
 
 /* nehalem/westmere */
-void __init intel_pmu_lbr_init_nhm(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_nhm(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr     = 16;
 	lbr->tos    = MSR_LBR_TOS;
@@ -1024,7 +1025,7 @@ void __init intel_pmu_lbr_init_nhm(struct x86_pmu_lbr *lbr)
 }
 
 /* sandy bridge */
-void __init intel_pmu_lbr_init_snb(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_snb(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr   = 16;
 	lbr->tos  = MSR_LBR_TOS;
@@ -1043,7 +1044,7 @@ void __init intel_pmu_lbr_init_snb(struct x86_pmu_lbr *lbr)
 }
 
 /* haswell */
-void intel_pmu_lbr_init_hsw(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_hsw(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr   = 16;
 	lbr->tos  = MSR_LBR_TOS;
@@ -1055,7 +1056,7 @@ void intel_pmu_lbr_init_hsw(struct x86_pmu_lbr *lbr)
 }
 
 /* skylake */
-__init void intel_pmu_lbr_init_skl(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_skl(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr   = 32;
 	lbr->tos  = MSR_LBR_TOS;
@@ -1074,7 +1075,7 @@ __init void intel_pmu_lbr_init_skl(struct x86_pmu_lbr *lbr)
 }
 
 /* atom */
-void __init intel_pmu_lbr_init_atom(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_atom(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr     = 8;
 	lbr->tos    = MSR_LBR_TOS;
@@ -1088,7 +1089,7 @@ void __init intel_pmu_lbr_init_atom(struct x86_pmu_lbr *lbr)
 }
 
 /* slm */
-void __init intel_pmu_lbr_init_slm(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_slm(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr     = 8;
 	lbr->tos    = MSR_LBR_TOS;
@@ -1105,7 +1106,7 @@ void __init intel_pmu_lbr_init_slm(struct x86_pmu_lbr *lbr)
 }
 
 /* Knights Landing */
-void intel_pmu_lbr_init_knl(struct x86_pmu_lbr *lbr)
+static void intel_pmu_lbr_init_knl(struct x86_pmu_lbr *lbr)
 {
 	lbr->nr     = 8;
 	lbr->tos    = MSR_LBR_TOS;
@@ -1115,3 +1116,123 @@ void intel_pmu_lbr_init_knl(struct x86_pmu_lbr *lbr)
 	lbr->sel_mask = LBR_SEL_MASK;
 	lbr->sel_map  = snb_lbr_sel_map;
 }
+
+static void __intel_pmu_lbr_fill(struct x86_pmu_lbr *lbr, u8 family, u8 model)
+{
+	if (family != 0x6)
+		return;
+
+	switch (model) {
+	case INTEL_FAM6_CORE_YONAH:
+		break;
+	case INTEL_FAM6_CORE2_MEROM:
+	case INTEL_FAM6_CORE2_MEROM_L:
+	case INTEL_FAM6_CORE2_PENRYN:
+	case INTEL_FAM6_CORE2_DUNNINGTON:
+		intel_pmu_lbr_init_core(lbr);
+		break;
+	case INTEL_FAM6_NEHALEM:
+	case INTEL_FAM6_NEHALEM_EP:
+	case INTEL_FAM6_NEHALEM_EX:
+		intel_pmu_lbr_init_nhm(lbr);
+		break;
+	case INTEL_FAM6_ATOM_BONNELL:
+	case INTEL_FAM6_ATOM_BONNELL_MID:
+	case INTEL_FAM6_ATOM_SALTWELL:
+	case INTEL_FAM6_ATOM_SALTWELL_MID:
+	case INTEL_FAM6_ATOM_SALTWELL_TABLET:
+		intel_pmu_lbr_init_atom(lbr);
+		break;
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_X:
+	case INTEL_FAM6_ATOM_SILVERMONT_MID:
+	case INTEL_FAM6_ATOM_AIRMONT:
+	case INTEL_FAM6_ATOM_AIRMONT_MID:
+		intel_pmu_lbr_init_slm(lbr);
+		break;
+	case INTEL_FAM6_ATOM_GOLDMONT:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:
+		intel_pmu_lbr_init_skl(lbr);
+		lbr->pt_coexist = true;
+		break;
+	case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+		intel_pmu_lbr_init_skl(lbr);
+		lbr->pt_coexist = true;
+		break;
+	case INTEL_FAM6_ATOM_TREMONT_X:
+		intel_pmu_lbr_init_skl(lbr);
+		lbr->pt_coexist = true;
+		break;
+	case INTEL_FAM6_WESTMERE:
+	case INTEL_FAM6_WESTMERE_EP:
+	case INTEL_FAM6_WESTMERE_EX:
+		intel_pmu_lbr_init_nhm(lbr);
+		break;
+	case INTEL_FAM6_SANDYBRIDGE:
+	case INTEL_FAM6_SANDYBRIDGE_X:
+		intel_pmu_lbr_init_snb(lbr);
+		break;
+	case INTEL_FAM6_IVYBRIDGE:
+	case INTEL_FAM6_IVYBRIDGE_X:
+		intel_pmu_lbr_init_snb(lbr);
+		break;
+	case INTEL_FAM6_HASWELL_CORE:
+	case INTEL_FAM6_HASWELL_X:
+	case INTEL_FAM6_HASWELL_ULT:
+	case INTEL_FAM6_HASWELL_GT3E:
+		intel_pmu_lbr_init_hsw(lbr);
+		lbr->double_abort = true;
+		break;
+	case INTEL_FAM6_BROADWELL_CORE:
+	case INTEL_FAM6_BROADWELL_XEON_D:
+	case INTEL_FAM6_BROADWELL_GT3E:
+	case INTEL_FAM6_BROADWELL_X:
+		intel_pmu_lbr_init_hsw(lbr);
+		break;
+	case INTEL_FAM6_XEON_PHI_KNL:
+	case INTEL_FAM6_XEON_PHI_KNM:
+		intel_pmu_lbr_init_knl(lbr);
+		break;
+	case INTEL_FAM6_SKYLAKE_MOBILE:
+	case INTEL_FAM6_SKYLAKE_DESKTOP:
+	case INTEL_FAM6_SKYLAKE_X:
+	case INTEL_FAM6_KABYLAKE_MOBILE:
+	case INTEL_FAM6_KABYLAKE_DESKTOP:
+		intel_pmu_lbr_init_skl(lbr);
+		break;
+	case INTEL_FAM6_ICELAKE_MOBILE:
+		intel_pmu_lbr_init_skl(lbr);
+		lbr->pt_coexist = true;
+		break;
+	}
+}
+
+void __init intel_pmu_lbr_init(void)
+{
+	memset(&x86_pmu.lbr, 0, sizeof(struct x86_pmu_lbr));
+
+	switch (boot_cpu_data.x86_model) {
+	case INTEL_FAM6_ATOM_BONNELL:
+		/*
+		 * only models starting at stepping 10 seems
+		 * to have an operational LBR which can freeze
+		 * on PMU interrupt
+		 */
+		if (boot_cpu_data.x86_mask < 10) {
+			pr_cont("LBR disabled due to erratum");
+			return;
+		}
+		break;
+	}
+
+	__intel_pmu_lbr_fill(&x86_pmu.lbr, boot_cpu_data.x86,
+			     boot_cpu_data.x86_model);
+}
+
+void intel_pmu_lbr_fill(struct x86_pmu_lbr *lbr, u8 family, u8 model)
+{
+	memset(&x86_pmu.lbr, 0, sizeof(struct x86_pmu_lbr));
+
+	__intel_pmu_lbr_fill(lbr, family, model);
+}
+EXPORT_SYMBOL_GPL(intel_pmu_lbr_fill);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index e48403278fe5..d4e2a2a57eb6 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -933,21 +933,7 @@ void intel_pmu_lbr_disable_all(void);
 
 void intel_pmu_lbr_read(void);
 
-void intel_pmu_lbr_init_core(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_nhm(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_atom(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_slm(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_snb(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_hsw(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_skl(struct x86_pmu_lbr *lbr);
-
-void intel_pmu_lbr_init_knl(struct x86_pmu_lbr *lbr);
+void intel_pmu_lbr_init(void);
 
 void intel_pmu_pebs_data_source_nhm(void);
 
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 440d3ecb0110..fab636df30d9 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -133,6 +133,8 @@ struct x86_pmu_lbr {
 	bool		pt_coexist;	/* (LBR|BTS) may coexist with PT */
 };
 
+void intel_pmu_lbr_fill(struct x86_pmu_lbr *lbr, u8 family, u8 model);
+
 /*
  * Fixed-purpose performance events:
  */



More information about the Devel mailing list