1
/* $Id: TMAllCpu.cpp $ */
3
* TM - Timeout Manager, CPU Time, All Contexts.
7
* Copyright (C) 2006-2012 Oracle Corporation
9
* This file is part of VirtualBox Open Source Edition (OSE), as
10
* available from http://www.virtualbox.org. This file is free software;
11
* you can redistribute it and/or modify it under the terms of the GNU
12
* General Public License (GPL) as published by the Free Software
13
* Foundation, in version 2 as it comes in the "COPYING" file of the
14
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
/*******************************************************************************
21
*******************************************************************************/
22
#define LOG_GROUP LOG_GROUP_TM
23
#include <VBox/vmm/tm.h>
24
#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25
#include "TMInternal.h"
26
#include <VBox/vmm/vm.h>
29
#include <VBox/param.h>
31
#include <iprt/asm-math.h>
32
#include <iprt/assert.h>
37
* Gets the raw cpu tick from current virtual time.
39
DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
43
u64 = TMVirtualSyncGet(pVM);
45
u64 = TMVirtualSyncGetNoCheck(pVM);
46
if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
47
u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
53
* Resumes the CPU timestamp counter ticking.
55
* @returns VBox status code.
56
* @param pVM Pointer to the VM.
57
* @param pVCpu Pointer to the VMCPU.
60
int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
62
if (!pVCpu->tm.s.fTSCTicking)
64
pVCpu->tm.s.fTSCTicking = true;
65
if (pVM->tm.s.fTSCVirtualized)
67
/** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
68
* unpaused before the virtual time and stopped after it. */
69
if (pVM->tm.s.fTSCUseRealTSC)
70
pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC;
72
pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
78
return VERR_TM_TSC_ALREADY_TICKING;
83
* Resumes the CPU timestamp counter ticking.
85
* @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
86
* @param pVM Pointer to the VM.
87
* @param pVCpu Pointer to the VCPU.
89
int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
91
if (!pVCpu->tm.s.fTSCTicking)
93
/* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
94
pVCpu->tm.s.fTSCTicking = true;
95
if (pVM->tm.s.fTSCVirtualized)
97
uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
98
AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
101
/* The first VCPU to resume. */
102
uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
104
STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
106
/* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
107
if (pVM->tm.s.fTSCUseRealTSC)
108
pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVM->tm.s.u64LastPausedTSC;
110
pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
111
- pVM->tm.s.u64LastPausedTSC;
113
/* Calculate the offset for other VCPUs to use. */
114
pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
118
/* All other VCPUs (if any). */
119
pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
128
* Pauses the CPU timestamp counter ticking.
130
* @returns VBox status code.
131
* @param pVCpu Pointer to the VMCPU.
134
int tmCpuTickPause(PVMCPU pVCpu)
136
if (pVCpu->tm.s.fTSCTicking)
138
pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
139
pVCpu->tm.s.fTSCTicking = false;
143
return VERR_TM_TSC_ALREADY_PAUSED;
148
* Pauses the CPU timestamp counter ticking.
150
* @returns VBox status code.
151
* @param pVM Pointer to the VM.
152
* @param pVCpu Pointer to the VMCPU.
155
int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu)
157
if (pVCpu->tm.s.fTSCTicking)
159
pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
160
pVCpu->tm.s.fTSCTicking = false;
162
uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
163
AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
166
/* When the last TSC stops, remember the value. */
167
STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
168
pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
173
return VERR_TM_TSC_ALREADY_PAUSED;
178
* Record why we refused to use offsetted TSC.
180
* Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
182
* @param pVM Pointer to the VM.
183
* @param pVCpu The current CPU.
185
DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
188
/* Sample the reason for refusing. */
189
if (!pVM->tm.s.fMaybeUseOffsettedHostTSC)
190
STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
191
else if (!pVCpu->tm.s.fTSCTicking)
192
STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
193
else if (!pVM->tm.s.fTSCUseRealTSC)
195
if (pVM->tm.s.fVirtualSyncCatchUp)
197
if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
198
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
199
else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
200
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
201
else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
202
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
204
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
206
else if (!pVM->tm.s.fVirtualSyncTicking)
207
STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
208
else if (pVM->tm.s.fVirtualWarpDrive)
209
STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
215
* Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
217
* @returns true/false accordingly.
218
* @param pVCpu Pointer to the VMCPU.
219
* @param poffRealTSC The offset against the TSC of the current CPU.
223
VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC)
225
PVM pVM = pVCpu->CTX_SUFF(pVM);
229
* 1. A fixed TSC, this is checked at init time.
230
* 2. That the TSC is ticking (we shouldn't be here if it isn't)
231
* 3. Either that we're using the real TSC as time source or
232
* a) we don't have any lag to catch up, and
233
* b) the virtual sync clock hasn't been halted by an expired timer, and
234
* c) we're not using warp drive (accelerated virtual guest time).
236
if ( pVM->tm.s.fMaybeUseOffsettedHostTSC
237
&& RT_LIKELY(pVCpu->tm.s.fTSCTicking)
238
&& ( pVM->tm.s.fTSCUseRealTSC
239
|| ( !pVM->tm.s.fVirtualSyncCatchUp
240
&& RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
241
&& !pVM->tm.s.fVirtualWarpDrive))
244
if (!pVM->tm.s.fTSCUseRealTSC)
246
/* The source is the timer synchronous virtual clock. */
247
Assert(pVM->tm.s.fTSCVirtualized);
251
uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
252
- pVCpu->tm.s.offTSCRawSrc;
253
/** @todo When we start collecting statistics on how much time we spend executing
254
* guest code before exiting, we should check this against the next virtual sync
255
* timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
256
* the chance that we'll get interrupted right after the timer expired. */
257
*poffRealTSC = u64Now - ASMReadTSC();
260
else if (poffRealTSC)
262
/* The source is the real TSC. */
263
if (pVM->tm.s.fTSCVirtualized)
264
*poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
268
/** @todo count this? */
272
#ifdef VBOX_WITH_STATISTICS
273
tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
280
* Calculates the number of host CPU ticks till the next virtual sync deadline.
282
* @note To save work, this function will not bother calculating the accurate
283
* tick count for deadlines that are more than a second ahead.
285
* @returns The number of host cpu ticks to the next deadline. Max one second.
286
* @param cNsToDeadline The number of nano seconds to the next virtual
289
DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline)
291
AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
292
if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
293
return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
294
uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage),
296
TMCLOCK_FREQ_VIRTUAL);
298
cTicks -= 4000; /* fudge to account for overhead */
306
* Gets the next deadline in host CPU clock ticks and the TSC offset if we can
309
* @returns The number of host CPU clock ticks to the next timer deadline.
310
* @param pVCpu The current CPU.
311
* @param poffRealTSC The offset against the TSC of the current CPU.
312
* @thread EMT(pVCpu).
313
* @remarks Superset of TMCpuTickCanUseRealTSC.
315
VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC)
317
PVM pVM = pVCpu->CTX_SUFF(pVM);
318
uint64_t cTicksToDeadline;
322
* 1. A fixed TSC, this is checked at init time.
323
* 2. That the TSC is ticking (we shouldn't be here if it isn't)
324
* 3. Either that we're using the real TSC as time source or
325
* a) we don't have any lag to catch up, and
326
* b) the virtual sync clock hasn't been halted by an expired timer, and
327
* c) we're not using warp drive (accelerated virtual guest time).
329
if ( pVM->tm.s.fMaybeUseOffsettedHostTSC
330
&& RT_LIKELY(pVCpu->tm.s.fTSCTicking)
331
&& ( pVM->tm.s.fTSCUseRealTSC
332
|| ( !pVM->tm.s.fVirtualSyncCatchUp
333
&& RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
334
&& !pVM->tm.s.fVirtualWarpDrive))
337
*pfOffsettedTsc = true;
338
if (!pVM->tm.s.fTSCUseRealTSC)
340
/* The source is the timer synchronous virtual clock. */
341
Assert(pVM->tm.s.fTSCVirtualized);
343
uint64_t cNsToDeadline;
344
uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
345
uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
346
? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
348
u64Now -= pVCpu->tm.s.offTSCRawSrc;
349
*poffRealTSC = u64Now - ASMReadTSC();
350
cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
354
/* The source is the real TSC. */
355
if (pVM->tm.s.fTSCVirtualized)
356
*poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
359
cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
364
#ifdef VBOX_WITH_STATISTICS
365
tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
367
*pfOffsettedTsc = false;
369
cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
371
return cTicksToDeadline;
376
* Read the current CPU timestamp counter.
378
* @returns Gets the CPU tsc.
379
* @param pVCpu Pointer to the VMCPU.
381
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
385
if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
387
PVM pVM = pVCpu->CTX_SUFF(pVM);
388
if (pVM->tm.s.fTSCVirtualized)
390
if (pVM->tm.s.fTSCUseRealTSC)
393
u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
394
u64 -= pVCpu->tm.s.offTSCRawSrc;
399
/* Always return a value higher than what the guest has already seen. */
400
if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
401
pVCpu->tm.s.u64TSCLastSeen = u64;
404
STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
405
pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */
406
u64 = pVCpu->tm.s.u64TSCLastSeen;
410
u64 = pVCpu->tm.s.u64TSC;
416
* Read the current CPU timestamp counter.
418
* @returns Gets the CPU tsc.
419
* @param pVCpu Pointer to the VMCPU.
421
VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
423
return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
428
* Read the current CPU timestamp counter, don't check for expired timers.
430
* @returns Gets the CPU tsc.
431
* @param pVCpu Pointer to the VMCPU.
433
VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
435
return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
440
* Sets the current CPU timestamp counter.
442
* @returns VBox status code.
443
* @param pVM Pointer to the VM.
444
* @param pVCpu Pointer to the VMCPU.
445
* @param u64Tick The new timestamp value.
447
* @thread EMT which TSC is to be set.
449
VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
451
VMCPU_ASSERT_EMT(pVCpu);
452
STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
455
* This is easier to do when the TSC is paused since resume will
456
* do all the calculations for us. Actually, we don't need to
457
* call tmCpuTickPause here since we overwrite u64TSC anyway.
459
bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
460
pVCpu->tm.s.fTSCTicking = false;
461
pVCpu->tm.s.u64TSC = u64Tick;
462
pVCpu->tm.s.u64TSCLastSeen = u64Tick;
464
tmCpuTickResume(pVM, pVCpu);
465
/** @todo Try help synchronizing it better among the virtual CPUs? */
471
* Sets the last seen CPU timestamp counter.
473
* @returns VBox status code.
474
* @param pVCpu Pointer to the VMCPU.
475
* @param u64LastSeenTick The last seen timestamp value.
477
* @thread EMT which TSC is to be set.
479
VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
481
VMCPU_ASSERT_EMT(pVCpu);
483
LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
484
if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
485
pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
490
* Gets the last seen CPU timestamp counter.
492
* @returns last seen TSC
493
* @param pVCpu Pointer to the VMCPU.
495
* @thread EMT which TSC is to be set.
497
VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
499
VMCPU_ASSERT_EMT(pVCpu);
501
return pVCpu->tm.s.u64TSCLastSeen;
506
* Get the timestamp frequency.
508
* @returns Number of ticks per second.
511
VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
513
if (pVM->tm.s.fTSCUseRealTSC)
515
uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
516
if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
517
return cTSCTicksPerSecond;
519
return pVM->tm.s.cTSCTicksPerSecond;