647
650
/* Keep irq disabled to prevent changes to the clock */
648
651
local_irq_save(flags);
649
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
652
kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
650
653
ktime_get_ts(&ts);
651
654
monotonic_to_bootbased(&ts);
655
kernel_ns = timespec_to_ns(&ts);
652
656
local_irq_restore(flags);
659
* Time as measured by the TSC may go backwards when resetting the base
660
* tsc_timestamp. The reason for this is that the TSC resolution is
661
* higher than the resolution of the other clock scales. Thus, many
662
* possible measurments of the TSC correspond to one measurement of any
663
* other clock, and so a spread of values is possible. This is not a
664
* problem for the computation of the nanosecond clock; with TSC rates
665
* around 1GHZ, there can only be a few cycles which correspond to one
666
* nanosecond value, and any path through this code will inevitably
667
* take longer than that. However, with the kernel_ns value itself,
668
* the precision may be much lower, down to HZ granularity. If the
669
* first sampling of TSC against kernel_ns ends in the low part of the
670
* range, and the second in the high end of the range, we can get:
672
* (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
674
* As the sampling errors potentially range in the thousands of cycles,
675
* it is possible such a time value has already been observed by the
676
* guest. To protect against this, we must compute the system time as
677
* observed by the guest and ensure the new system time is greater.
680
if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
681
max_kernel_ns = vcpu->last_guest_tsc -
682
vcpu->hv_clock.tsc_timestamp;
683
max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
684
vcpu->hv_clock.tsc_to_system_mul,
685
vcpu->hv_clock.tsc_shift);
686
max_kernel_ns += vcpu->last_kernel_ns;
689
if (max_kernel_ns > kernel_ns)
690
kernel_ns = max_kernel_ns;
654
692
/* With all the info we got, fill in the values */
656
vcpu->hv_clock.system_time = ts.tv_nsec +
657
(NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
694
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
695
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
696
vcpu->last_kernel_ns = kernel_ns;
697
vcpu->last_guest_tsc = tsc_timestamp;
660
700
* The interface expects us to write an even number signaling that the