[Devel] [PATCH RHEL7 COMMIT] ms/KVM: x86: introduce get_kvmclock_ns

Konstantin Khorenko khorenko at virtuozzo.com
Mon Oct 24 05:07:10 PDT 2016


The commit is pushed to "branch-rh7-3.10.0-327.36.1.vz7.19.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.36.1.vz7.19.3
------>
commit 5bac74955a0646b4ffd676c6e09b23e7d3c1d318
Author: Paolo Bonzini <pbonzini at redhat.com>
Date:   Mon Oct 24 16:07:10 2016 +0400

    ms/KVM: x86: introduce get_kvmclock_ns
    
    Introduce a function that reads the exact nanoseconds value that is
    provided to the guest in kvmclock.  This crystallizes the notion of
    kvmclock as a thin veneer over a stable TSC, that the guest will
    (hopefully) convert with NTP.  In other words, kvmclock is *not* a
    paravirtualized host-to-guest NTP.
    
    Drop the get_kernel_ns() function, that was used both to get the base
    value of the master clock and to get the current value of kvmclock.
    The former use is replaced by ktime_get_boot_ns(), the latter is
    the purpose of get_kernel_ns().
    
    This also allows KVM to provide a Hyper-V time reference counter that
    is synchronized with the time that is computed from the TSC page.
    
    Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
    (cherry picked from commit 108b249c453dd7132599ab6dc7e435a7036c193f)
    some code adapted to virtuozzo's branch
    Signed-off-by: Denis Plotnikov <dplotnikov at virtuozzo.com>
    Reviewed-by: Roman Kagan <rkagan at virtuozzo.com>
    Signed-off-by: "Denis V. Lunev" <den at openvz.org>
---
 arch/x86/include/asm/pvclock.h |  8 ++++----
 arch/x86/kernel/pvclock.c      |  6 ++++--
 arch/x86/kvm/hyperv.c          |  2 +-
 arch/x86/kvm/x86.c             | 45 ++++++++++++++++++++++++++++++++----------
 arch/x86/kvm/x86.h             |  1 +
 arch/x86/vdso/vclock_gettime.c |  3 ++-
 6 files changed, 47 insertions(+), 18 deletions(-)

diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 628954c..7ec049a 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -60,16 +60,16 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
 }
 
 static __always_inline
-u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
+u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src, u64 tsc)
 {
-	u64 delta = __native_read_tsc() - src->tsc_timestamp;
+	u64 delta = tsc - src->tsc_timestamp;
 	return pvclock_scale_delta(delta, src->tsc_to_system_mul,
 				   src->tsc_shift);
 }
 
 static __always_inline
 unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
-			       cycle_t *cycles, u8 *flags)
+			       cycle_t *cycles, u8 *flags, u64 tsc)
 {
 	unsigned version;
 	cycle_t ret, offset;
@@ -83,7 +83,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
 	 * below, resulting in violation of monotonicity.
 	 */
 	rdtsc_barrier();
-	offset = pvclock_get_nsec_offset(src);
+	offset = pvclock_get_nsec_offset(src, tsc);
 	ret = src->system_time + offset;
 	ret_flags = src->flags;
 
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d2..07391d4 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -65,7 +65,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
 	u8 flags;
 
 	do {
-		version = __pvclock_read_cycles(src, &ret, &flags);
+		version = __pvclock_read_cycles(src, &ret, &flags,
+						native_read_tsc());
 	} while ((src->version & 1) || version != src->version);
 
 	return flags & valid_flags;
@@ -79,7 +80,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
 	u8 flags;
 
 	do {
-		version = __pvclock_read_cycles(src, &ret, &flags);
+		version = __pvclock_read_cycles(src, &ret, &flags,
+						native_read_tsc());
 	} while ((src->version & 1) || version != src->version);
 
 	if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 9d60d33..0f07375 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -386,7 +386,7 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic)
 
 static u64 get_time_ref_counter(struct kvm *kvm)
 {
-	return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
+	return div_u64(get_kvmclock_ns(kvm), 100);
 }
 
 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 31b59bf..d39e4b5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1595,6 +1595,35 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 #endif
 }
 
+static u64 __get_kvmclock_ns(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
+	struct kvm_arch *ka = &kvm->arch;
+	u64 ns;
+	u8 flags;
+
+	if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
+		u64 tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+		__pvclock_read_cycles(&vcpu->arch.hv_clock, &ns, &flags, tsc);
+	} else {
+		ns = ktime_to_ns(ktime_get_boottime()) + ka->kvmclock_offset;
+	}
+
+	return ns;
+}
+
+u64 get_kvmclock_ns(struct kvm *kvm)
+{
+	unsigned long flags;
+	s64 ns;
+
+	local_irq_save(flags);
+	ns = __get_kvmclock_ns(kvm);
+	local_irq_restore(flags);
+
+	return ns;
+}
+
 static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 {
 	struct kvm_vcpu_arch *vcpu = &v->arch;
@@ -3943,7 +3972,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
 	case KVM_SET_CLOCK: {
 		struct kvm_clock_data user_ns;
 		u64 now_ns;
-		s64 delta;
 
 		r = -EFAULT;
 		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
@@ -3955,10 +3983,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
 
 		r = 0;
 		local_irq_disable();
-		now_ns = get_kernel_ns();
-		delta = user_ns.clock - now_ns;
+		now_ns = __get_kvmclock_ns(kvm);
+		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
 		local_irq_enable();
-		kvm->arch.kvmclock_offset = delta;
 		kvm_gen_update_masterclock(kvm);
 		break;
 	}
@@ -3966,10 +3993,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
 		struct kvm_clock_data user_ns;
 		u64 now_ns;
 
-		local_irq_disable();
-		now_ns = get_kernel_ns();
-		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
-		local_irq_enable();
+		now_ns = get_kvmclock_ns(kvm);
+		user_ns.clock = now_ns;
 		user_ns.flags = 0;
 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
@@ -7375,7 +7400,7 @@ int kvm_arch_hardware_enable(void)
 	 * before any KVM threads can be running.  Unfortunately, we can't
 	 * bring the TSCs fully up to date with real time, as we aren't yet far
 	 * enough into CPU bringup that we know how much real time has actually
-	 * elapsed; our helper function, get_kernel_ns() will be using boot
+	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
 	 * variables that haven't been updated yet.
 	 *
 	 * So we simply find the maximum observed TSC above, then record the
@@ -7579,7 +7604,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	mutex_init(&kvm->arch.apic_map_lock);
 	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
 
-	kvm->arch.kvmclock_offset = -get_kernel_ns();
+	kvm->arch.kvmclock_offset = -ktime_to_ns(ktime_get_boottime());
 	pvclock_update_vm_gtod_copy(kvm);
 
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 61eda944..ab4c57a 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -159,6 +159,7 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+u64 get_kvmclock_ns(struct kvm *kvm);
 
 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 	gva_t addr, void *val, unsigned int bytes,
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 3d16e23..9468944 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -107,7 +107,8 @@ static notrace cycle_t vread_pvclock(int *mode)
 
 		pvti = get_pvti(cpu);
 
-		version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
+		version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags,
+						__native_read_tsc());
 
 		/*
 		 * Test we're still on the cpu as well as the version.


More information about the Devel mailing list