[Devel] [PATCH v1] KVM: x86: avoid redundant REQ_EVENT
Denis Plotnikov
dplotnikov at virtuozzo.com
Tue Jan 10 08:14:01 PST 2017
When processing KVM_REQ_EVENT, apic_update_ppr is called which may set
KVM_REQ_EVENT again if the recalculated value of PPR becomes smaller
than the previous one. This results in cancelling the guest entry and
reiterating in vcpu_enter_guest.
However this is unnecessary because at this point KVM_REQ_EVENT is
already being processed and there are no other changes in the lapic
that may require full-fledged state recalculation.
This situation is often hit on systems with TPR shadow, where the
TPR can be updated by the guest without a vmexit, so that the first
apic_update_ppr to notice it is exactly the one called while
processing KVM_REQ_EVENT.
To avoid it, introduce a parameter in apic_update_ppr allowing to
suppress setting of KVM_REQ_EVENT, and use it on the paths called from
KVM_REQ_EVENT processing.
Also add the parameter to kvm_cpu_get_interrupt to be passed through
to apic_update_ppr to make sure the supression of KVM_REQ_EVENT is done
in the KVM_REQ_EVENT processing path only.
This microoptimization gives 10% performance increase on a synthetic
test doing a lot of IPC in Windows using window messages.
Reviewed-by: Roman Kagan <rkagan at virtuozzo.com>
Signed-off-by: Denis Plotnikov <dplotnikov at virtuozzo.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/irq.c | 8 ++++----
arch/x86/kvm/lapic.c | 26 +++++++++++++-------------
arch/x86/kvm/lapic.h | 4 ++--
arch/x86/kvm/vmx.c | 2 +-
arch/x86/kvm/x86.c | 2 +-
6 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ee50557..fb13c87 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1239,7 +1239,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
-int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+int kvm_cpu_get_interrupt(struct kvm_vcpu *v, bool make_req);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 95fcc7b..d434ad9 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -82,7 +82,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
if (kvm_vcpu_apicv_active(v))
return 0;
- return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
+ return kvm_apic_has_interrupt(v, false) != -1; /* LAPIC */
}
/*
@@ -97,7 +97,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
if (kvm_cpu_has_extint(v))
return 1;
- return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
+ return kvm_apic_has_interrupt(v, true) != -1; /* LAPIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
@@ -122,7 +122,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
/*
* Read pending interrupt vector and intack.
*/
-int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+int kvm_cpu_get_interrupt(struct kvm_vcpu *v, bool make_req)
{
int vector;
@@ -134,7 +134,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
if (vector != -1)
return vector; /* PIC */
- return kvm_get_apic_interrupt(v); /* APIC */
+ return kvm_get_apic_interrupt(v, make_req); /* APIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ff8d0fd..77ad90e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -561,7 +561,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
-static void apic_update_ppr(struct kvm_lapic *apic)
+static void apic_update_ppr(struct kvm_lapic *apic, bool make_req)
{
u32 tpr, isrv, ppr, old_ppr;
int isr;
@@ -581,7 +581,7 @@ static void apic_update_ppr(struct kvm_lapic *apic)
if (old_ppr != ppr) {
apic_set_reg(apic, APIC_PROCPRI, ppr);
- if (ppr < old_ppr)
+ if (make_req && ppr < old_ppr)
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
}
@@ -589,7 +589,7 @@ static void apic_update_ppr(struct kvm_lapic *apic)
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
apic_set_reg(apic, APIC_TASKPRI, tpr);
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
}
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
@@ -1055,7 +1055,7 @@ static int apic_set_eoi(struct kvm_lapic *apic)
return vector;
apic_clear_isr(vector, apic);
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
kvm_hv_synic_send_eoi(apic->vcpu, vector);
@@ -1170,7 +1170,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
val = apic_get_tmcct(apic);
break;
case APIC_PROCPRI:
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
val = kvm_apic_get_reg(apic, offset);
break;
case APIC_TASKPRI:
@@ -1776,7 +1776,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_lapic_set_base(vcpu,
vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu->arch.pv_eoi.msr_val = 0;
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
@@ -1897,7 +1897,7 @@ nomem:
return -ENOMEM;
}
-int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
+int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu, bool make_req)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
@@ -1905,7 +1905,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
if (!apic_enabled(apic))
return -1;
- apic_update_ppr(apic);
+ apic_update_ppr(apic, make_req);
highest_irr = apic_find_highest_irr(apic);
if ((highest_irr == -1) ||
((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
@@ -1938,9 +1938,9 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
}
}
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, bool make_req)
{
- int vector = kvm_apic_has_interrupt(vcpu);
+ int vector = kvm_apic_has_interrupt(vcpu, make_req);
struct kvm_lapic *apic = vcpu->arch.apic;
if (vector == -1)
@@ -1954,12 +1954,12 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
*/
apic_set_isr(vector, apic);
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
apic_clear_irr(vector, apic);
if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
apic_clear_isr(vector, apic);
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
}
return vector;
@@ -2009,7 +2009,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);
- apic_update_ppr(apic);
+ apic_update_ppr(apic, true);
hrtimer_cancel(&apic->lapic_timer.timer);
apic_update_lvtt(apic);
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 952d02c..053d3e6 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -48,9 +48,9 @@ struct dest_map;
int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
-int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu, bool make_req);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
+int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, bool make_req);
void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3d39923..7da0987 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10326,7 +10326,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
&& nested_exit_intr_ack_set(vcpu)) {
- int irq = kvm_cpu_get_interrupt(vcpu);
+ int irq = kvm_cpu_get_interrupt(vcpu, true);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e9c6b0c..6b547f6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6356,7 +6356,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
return r;
}
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
- kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
+ kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu, false),
false);
kvm_x86_ops->set_irq(vcpu);
}
--
2.10.1.352.g0cf3611
More information about the Devel
mailing list