[Devel] [PATCH RHEL7 COMMIT] ms/KVM: x86/vPMU: ignore access to LBR-related MSRs
Konstantin Khorenko
khorenko at virtuozzo.com
Fri Dec 8 19:03:25 MSK 2017
The commit is pushed to "branch-rh7-3.10.0-693.11.1.vz7.39.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-693.11.1.vz7.39.2
------>
commit 29ae48aecec83041cc348a55b5a079626aee2698
Author: Jan Dakinevich <jan.dakinevich at virtuozzo.com>
Date: Fri Dec 8 19:03:25 2017 +0300
ms/KVM: x86/vPMU: ignore access to LBR-related MSRs
Windows Server 2016 Essentials (for yet unknown reason) attempts to
access MSR_LBR_TOS and other LBR-related registers at startup. These
are not currently hadled by KVM so the guest gets #GP and crashes.
To prevent that, identify LBR-related MSRs pertinent to the CPU model
exposed to the guest, and dummy handle them (ignore writes and return
zero on reads).
https://jira.sw.ru/browse/PSBM-75679
Signed-off-by: Jan Dakinevich <jan.dakinevich at virtuozzo.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/pmu_intel.c | 34 ++++++++++++++++++++++++++++++++++
2 files changed, 36 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8a353cabab63..60b328f958ff 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
#include <asm/msr-index.h>
#include <asm/asm.h>
#include <asm/kvm_page_track.h>
+#include <asm/perf_event.h>
#define KVM_MAX_VCPUS 384
#define KVM_SOFT_MAX_VCPUS 384
@@ -363,6 +364,7 @@ struct kvm_pmu {
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
+ struct x86_pmu_lbr lbr;
};
struct kvm_pmu_ops;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index ab38af4f4947..4462bc3629dc 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -15,6 +15,7 @@
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <asm/perf_event.h>
+#include <asm/cpu.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
@@ -142,6 +143,24 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
return &counters[idx];
}
+static bool intel_is_lbr_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct x86_pmu_lbr *lbr = &pmu->lbr;
+
+ if (!lbr->nr)
+ return false;
+
+ if (msr == lbr->tos)
+ return true;
+ if (msr >= lbr->from && msr < lbr->from + lbr->nr)
+ return true;
+ if (msr >= lbr->to && msr < lbr->to + lbr->nr)
+ return true;
+
+ return false;
+}
+
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -155,6 +174,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
ret = pmu->version > 1;
break;
default:
+ if (intel_is_lbr_msr(vcpu, msr)) {
+ ret = true;
+ break;
+ }
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
get_fixed_pmc(pmu, msr);
@@ -183,6 +206,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
*data = pmu->global_ovf_ctrl;
return 0;
default:
+ if (intel_is_lbr_msr(vcpu, msr)) {
+ *data = 0;
+ return 0;
+ }
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
*data = pmc_read_counter(pmc);
@@ -235,6 +262,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
default:
+ if (intel_is_lbr_msr(vcpu, msr))
+ return 0;
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
if (!msr_info->host_initiated)
@@ -303,6 +332,11 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+
+ entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (entry)
+ intel_pmu_lbr_fill(&pmu->lbr,
+ x86_family(entry->eax), x86_model(entry->eax));
}
static void intel_pmu_init(struct kvm_vcpu *vcpu)
More information about the Devel
mailing list