VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 92386

Last change on this file since 92386 was 92229, checked in by vboxsync, 3 years ago

VMM/GVMM: Fixes to the worker thread registration. Added missing GVMMR3.cpp file. bugref:10093 bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 116.3 KB
Line 
1/* $Id: GVMMR0.cpp 92229 2021-11-05 00:52:57Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_gvmm GVMM - The Global VM Manager
20 *
21 * The Global VM Manager lives in ring-0. Its main function at the moment is
22 * to manage a list of all running VMs, keep a ring-0 only structure (GVM) for
23 * each of them, and assign them unique identifiers (so GMM can track page
24 * owners). The GVMM also manage some of the host CPU resources, like the
25 * periodic preemption timer.
26 *
27 * The GVMM will create a ring-0 object for each VM when it is registered, this
28 * is both for session cleanup purposes and for having a point where it is
29 * possible to implement usage polices later (in SUPR0ObjRegister).
30 *
31 *
32 * @section sec_gvmm_ppt Periodic Preemption Timer (PPT)
33 *
34 * On system that sports a high resolution kernel timer API, we use per-cpu
35 * timers to generate interrupts that preempts VT-x, AMD-V and raw-mode guest
36 * execution. The timer frequency is calculating by taking the max
37 * TMCalcHostTimerFrequency for all VMs running on a CPU for the last ~160 ms
38 * (RT_ELEMENTS((PGVMMHOSTCPU)0, Ppt.aHzHistory) *
39 * GVMMHOSTCPU_PPT_HIST_INTERVAL_NS).
40 *
41 * The TMCalcHostTimerFrequency() part of the things gets its takes the max
42 * TMTimerSetFrequencyHint() value and adjusts by the current catch-up percent,
43 * warp drive percent and some fudge factors. VMMR0.cpp reports the result via
44 * GVMMR0SchedUpdatePeriodicPreemptionTimer() before switching to the VT-x,
45 * AMD-V and raw-mode execution environments.
46 */
47
48
49/*********************************************************************************************************************************
50* Header Files *
51*********************************************************************************************************************************/
52#define LOG_GROUP LOG_GROUP_GVMM
53#include <VBox/vmm/gvmm.h>
54#include <VBox/vmm/gmm.h>
55#include "GVMMR0Internal.h"
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/iom.h>
58#include <VBox/vmm/pdm.h>
59#include <VBox/vmm/pgm.h>
60#include <VBox/vmm/vmm.h>
61#ifdef VBOX_WITH_NEM_R0
62# include <VBox/vmm/nem.h>
63#endif
64#include <VBox/vmm/vmcpuset.h>
65#include <VBox/vmm/vmcc.h>
66#include <VBox/param.h>
67#include <VBox/err.h>
68
69#include <iprt/asm.h>
70#include <iprt/asm-amd64-x86.h>
71#include <iprt/critsect.h>
72#include <iprt/mem.h>
73#include <iprt/semaphore.h>
74#include <iprt/time.h>
75#include <VBox/log.h>
76#include <iprt/thread.h>
77#include <iprt/process.h>
78#include <iprt/param.h>
79#include <iprt/string.h>
80#include <iprt/assert.h>
81#include <iprt/mem.h>
82#include <iprt/memobj.h>
83#include <iprt/mp.h>
84#include <iprt/cpuset.h>
85#include <iprt/spinlock.h>
86#include <iprt/timer.h>
87
88#include "dtrace/VBoxVMM.h"
89
90
91/*********************************************************************************************************************************
92* Defined Constants And Macros *
93*********************************************************************************************************************************/
94#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(DOXYGEN_RUNNING)
95/** Define this to enable the periodic preemption timer. */
96# define GVMM_SCHED_WITH_PPT
97#endif
98
99
100/** Special value that GVMMR0DeregisterVCpu sets. */
101#define GVMM_RTNATIVETHREAD_DESTROYED (~(RTNATIVETHREAD)1)
102AssertCompile(GVMM_RTNATIVETHREAD_DESTROYED != NIL_RTNATIVETHREAD);
103
104
105/*********************************************************************************************************************************
106* Structures and Typedefs *
107*********************************************************************************************************************************/
108
109/**
110 * Global VM handle.
111 */
112typedef struct GVMHANDLE
113{
114 /** The index of the next handle in the list (free or used). (0 is nil.) */
115 uint16_t volatile iNext;
116 /** Our own index / handle value. */
117 uint16_t iSelf;
118 /** The process ID of the handle owner.
119 * This is used for access checks. */
120 RTPROCESS ProcId;
121 /** The pointer to the ring-0 only (aka global) VM structure. */
122 PGVM pGVM;
123 /** The virtual machine object. */
124 void *pvObj;
125 /** The session this VM is associated with. */
126 PSUPDRVSESSION pSession;
127 /** The ring-0 handle of the EMT0 thread.
128 * This is used for ownership checks as well as looking up a VM handle by thread
129 * at times like assertions. */
130 RTNATIVETHREAD hEMT0;
131} GVMHANDLE;
132/** Pointer to a global VM handle. */
133typedef GVMHANDLE *PGVMHANDLE;
134
135/** Number of GVM handles (including the NIL handle). */
136#if HC_ARCH_BITS == 64
137# define GVMM_MAX_HANDLES 8192
138#else
139# define GVMM_MAX_HANDLES 128
140#endif
141
142/**
143 * Per host CPU GVMM data.
144 */
145typedef struct GVMMHOSTCPU
146{
147 /** Magic number (GVMMHOSTCPU_MAGIC). */
148 uint32_t volatile u32Magic;
149 /** The CPU ID. */
150 RTCPUID idCpu;
151 /** The CPU set index. */
152 uint32_t idxCpuSet;
153
154#ifdef GVMM_SCHED_WITH_PPT
155 /** Periodic preemption timer data. */
156 struct
157 {
158 /** The handle to the periodic preemption timer. */
159 PRTTIMER pTimer;
160 /** Spinlock protecting the data below. */
161 RTSPINLOCK hSpinlock;
162 /** The smalles Hz that we need to care about. (static) */
163 uint32_t uMinHz;
164 /** The number of ticks between each historization. */
165 uint32_t cTicksHistoriziationInterval;
166 /** The current historization tick (counting up to
167 * cTicksHistoriziationInterval and then resetting). */
168 uint32_t iTickHistorization;
169 /** The current timer interval. This is set to 0 when inactive. */
170 uint32_t cNsInterval;
171 /** The current timer frequency. This is set to 0 when inactive. */
172 uint32_t uTimerHz;
173 /** The current max frequency reported by the EMTs.
174 * This gets historicize and reset by the timer callback. This is
175 * read without holding the spinlock, so needs atomic updating. */
176 uint32_t volatile uDesiredHz;
177 /** Whether the timer was started or not. */
178 bool volatile fStarted;
179 /** Set if we're starting timer. */
180 bool volatile fStarting;
181 /** The index of the next history entry (mod it). */
182 uint32_t iHzHistory;
183 /** Historicized uDesiredHz values. The array wraps around, new entries
184 * are added at iHzHistory. This is updated approximately every
185 * GVMMHOSTCPU_PPT_HIST_INTERVAL_NS by the timer callback. */
186 uint32_t aHzHistory[8];
187 /** Statistics counter for recording the number of interval changes. */
188 uint32_t cChanges;
189 /** Statistics counter for recording the number of timer starts. */
190 uint32_t cStarts;
191 } Ppt;
192#endif /* GVMM_SCHED_WITH_PPT */
193
194} GVMMHOSTCPU;
195/** Pointer to the per host CPU GVMM data. */
196typedef GVMMHOSTCPU *PGVMMHOSTCPU;
197/** The GVMMHOSTCPU::u32Magic value (Petra, Tanya & Rachel Haden). */
198#define GVMMHOSTCPU_MAGIC UINT32_C(0x19711011)
199/** The interval on history entry should cover (approximately) give in
200 * nanoseconds. */
201#define GVMMHOSTCPU_PPT_HIST_INTERVAL_NS UINT32_C(20000000)
202
203
204/**
205 * The GVMM instance data.
206 */
207typedef struct GVMM
208{
209 /** Eyecatcher / magic. */
210 uint32_t u32Magic;
211 /** The index of the head of the free handle chain. (0 is nil.) */
212 uint16_t volatile iFreeHead;
213 /** The index of the head of the active handle chain. (0 is nil.) */
214 uint16_t volatile iUsedHead;
215 /** The number of VMs. */
216 uint16_t volatile cVMs;
217 /** Alignment padding. */
218 uint16_t u16Reserved;
219 /** The number of EMTs. */
220 uint32_t volatile cEMTs;
221 /** The number of EMTs that have halted in GVMMR0SchedHalt. */
222 uint32_t volatile cHaltedEMTs;
223 /** Mini lock for restricting early wake-ups to one thread. */
224 bool volatile fDoingEarlyWakeUps;
225 bool afPadding[3]; /**< explicit alignment padding. */
226 /** When the next halted or sleeping EMT will wake up.
227 * This is set to 0 when it needs recalculating and to UINT64_MAX when
228 * there are no halted or sleeping EMTs in the GVMM. */
229 uint64_t uNsNextEmtWakeup;
230 /** The lock used to serialize VM creation, destruction and associated events that
231 * isn't performance critical. Owners may acquire the list lock. */
232 RTCRITSECT CreateDestroyLock;
233 /** The lock used to serialize used list updates and accesses.
234 * This indirectly includes scheduling since the scheduler will have to walk the
235 * used list to examin running VMs. Owners may not acquire any other locks. */
236 RTCRITSECTRW UsedLock;
237 /** The handle array.
238 * The size of this array defines the maximum number of currently running VMs.
239 * The first entry is unused as it represents the NIL handle. */
240 GVMHANDLE aHandles[GVMM_MAX_HANDLES];
241
242 /** @gcfgm{/GVMM/cEMTsMeansCompany, 32-bit, 0, UINT32_MAX, 1}
243 * The number of EMTs that means we no longer consider ourselves alone on a
244 * CPU/Core.
245 */
246 uint32_t cEMTsMeansCompany;
247 /** @gcfgm{/GVMM/MinSleepAlone,32-bit, 0, 100000000, 750000, ns}
248 * The minimum sleep time for when we're alone, in nano seconds.
249 */
250 uint32_t nsMinSleepAlone;
251 /** @gcfgm{/GVMM/MinSleepCompany,32-bit,0, 100000000, 15000, ns}
252 * The minimum sleep time for when we've got company, in nano seconds.
253 */
254 uint32_t nsMinSleepCompany;
255 /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
256 * The limit for the first round of early wake-ups, given in nano seconds.
257 */
258 uint32_t nsEarlyWakeUp1;
259 /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
260 * The limit for the second round of early wake-ups, given in nano seconds.
261 */
262 uint32_t nsEarlyWakeUp2;
263
264 /** Set if we're doing early wake-ups.
265 * This reflects nsEarlyWakeUp1 and nsEarlyWakeUp2. */
266 bool volatile fDoEarlyWakeUps;
267
268 /** The number of entries in the host CPU array (aHostCpus). */
269 uint32_t cHostCpus;
270 /** Per host CPU data (variable length). */
271 GVMMHOSTCPU aHostCpus[1];
272} GVMM;
273AssertCompileMemberAlignment(GVMM, CreateDestroyLock, 8);
274AssertCompileMemberAlignment(GVMM, UsedLock, 8);
275AssertCompileMemberAlignment(GVMM, uNsNextEmtWakeup, 8);
276/** Pointer to the GVMM instance data. */
277typedef GVMM *PGVMM;
278
279/** The GVMM::u32Magic value (Charlie Haden). */
280#define GVMM_MAGIC UINT32_C(0x19370806)
281
282
283
284/*********************************************************************************************************************************
285* Global Variables *
286*********************************************************************************************************************************/
287/** Pointer to the GVMM instance data.
288 * (Just my general dislike for global variables.) */
289static PGVMM g_pGVMM = NULL;
290
291/** Macro for obtaining and validating the g_pGVMM pointer.
292 * On failure it will return from the invoking function with the specified return value.
293 *
294 * @param pGVMM The name of the pGVMM variable.
295 * @param rc The return value on failure. Use VERR_GVMM_INSTANCE for VBox
296 * status codes.
297 */
298#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
299 do { \
300 (pGVMM) = g_pGVMM;\
301 AssertPtrReturn((pGVMM), (rc)); \
302 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
303 } while (0)
304
305/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
306 * On failure it will return from the invoking function.
307 *
308 * @param pGVMM The name of the pGVMM variable.
309 */
310#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
311 do { \
312 (pGVMM) = g_pGVMM;\
313 AssertPtrReturnVoid((pGVMM)); \
314 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
315 } while (0)
316
317
318/*********************************************************************************************************************************
319* Internal Functions *
320*********************************************************************************************************************************/
321static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession);
322static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
323static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
324static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM);
325
326#ifdef GVMM_SCHED_WITH_PPT
327static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
328#endif
329
330
331/**
332 * Initializes the GVMM.
333 *
334 * This is called while owning the loader semaphore (see supdrvIOCtl_LdrLoad()).
335 *
336 * @returns VBox status code.
337 */
338GVMMR0DECL(int) GVMMR0Init(void)
339{
340 LogFlow(("GVMMR0Init:\n"));
341
342 /*
343 * Allocate and initialize the instance data.
344 */
345 uint32_t cHostCpus = RTMpGetArraySize();
346 AssertMsgReturn(cHostCpus > 0 && cHostCpus < _64K, ("%d", (int)cHostCpus), VERR_GVMM_HOST_CPU_RANGE);
347
348 PGVMM pGVMM = (PGVMM)RTMemAllocZ(RT_UOFFSETOF_DYN(GVMM, aHostCpus[cHostCpus]));
349 if (!pGVMM)
350 return VERR_NO_MEMORY;
351 int rc = RTCritSectInitEx(&pGVMM->CreateDestroyLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE,
352 "GVMM-CreateDestroyLock");
353 if (RT_SUCCESS(rc))
354 {
355 rc = RTCritSectRwInitEx(&pGVMM->UsedLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "GVMM-UsedLock");
356 if (RT_SUCCESS(rc))
357 {
358 pGVMM->u32Magic = GVMM_MAGIC;
359 pGVMM->iUsedHead = 0;
360 pGVMM->iFreeHead = 1;
361
362 /* the nil handle */
363 pGVMM->aHandles[0].iSelf = 0;
364 pGVMM->aHandles[0].iNext = 0;
365
366 /* the tail */
367 unsigned i = RT_ELEMENTS(pGVMM->aHandles) - 1;
368 pGVMM->aHandles[i].iSelf = i;
369 pGVMM->aHandles[i].iNext = 0; /* nil */
370
371 /* the rest */
372 while (i-- > 1)
373 {
374 pGVMM->aHandles[i].iSelf = i;
375 pGVMM->aHandles[i].iNext = i + 1;
376 }
377
378 /* The default configuration values. */
379 uint32_t cNsResolution = RTSemEventMultiGetResolution();
380 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */
381 if (cNsResolution >= 5*RT_NS_100US)
382 {
383 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
384 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */;
385 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */;
386 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */;
387 }
388 else if (cNsResolution > RT_NS_100US)
389 {
390 pGVMM->nsMinSleepAlone = cNsResolution / 2;
391 pGVMM->nsMinSleepCompany = cNsResolution / 4;
392 pGVMM->nsEarlyWakeUp1 = 0;
393 pGVMM->nsEarlyWakeUp2 = 0;
394 }
395 else
396 {
397 pGVMM->nsMinSleepAlone = 2000;
398 pGVMM->nsMinSleepCompany = 2000;
399 pGVMM->nsEarlyWakeUp1 = 0;
400 pGVMM->nsEarlyWakeUp2 = 0;
401 }
402 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
403
404 /* The host CPU data. */
405 pGVMM->cHostCpus = cHostCpus;
406 uint32_t iCpu = cHostCpus;
407 RTCPUSET PossibleSet;
408 RTMpGetSet(&PossibleSet);
409 while (iCpu-- > 0)
410 {
411 pGVMM->aHostCpus[iCpu].idxCpuSet = iCpu;
412#ifdef GVMM_SCHED_WITH_PPT
413 pGVMM->aHostCpus[iCpu].Ppt.pTimer = NULL;
414 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
415 pGVMM->aHostCpus[iCpu].Ppt.uMinHz = 5; /** @todo Add some API which figures this one out. (not *that* important) */
416 pGVMM->aHostCpus[iCpu].Ppt.cTicksHistoriziationInterval = 1;
417 //pGVMM->aHostCpus[iCpu].Ppt.iTickHistorization = 0;
418 //pGVMM->aHostCpus[iCpu].Ppt.cNsInterval = 0;
419 //pGVMM->aHostCpus[iCpu].Ppt.uTimerHz = 0;
420 //pGVMM->aHostCpus[iCpu].Ppt.uDesiredHz = 0;
421 //pGVMM->aHostCpus[iCpu].Ppt.fStarted = false;
422 //pGVMM->aHostCpus[iCpu].Ppt.fStarting = false;
423 //pGVMM->aHostCpus[iCpu].Ppt.iHzHistory = 0;
424 //pGVMM->aHostCpus[iCpu].Ppt.aHzHistory = {0};
425#endif
426
427 if (RTCpuSetIsMember(&PossibleSet, iCpu))
428 {
429 pGVMM->aHostCpus[iCpu].idCpu = RTMpCpuIdFromSetIndex(iCpu);
430 pGVMM->aHostCpus[iCpu].u32Magic = GVMMHOSTCPU_MAGIC;
431
432#ifdef GVMM_SCHED_WITH_PPT
433 rc = RTTimerCreateEx(&pGVMM->aHostCpus[iCpu].Ppt.pTimer,
434 50*1000*1000 /* whatever */,
435 RTTIMER_FLAGS_CPU(iCpu) | RTTIMER_FLAGS_HIGH_RES,
436 gvmmR0SchedPeriodicPreemptionTimerCallback,
437 &pGVMM->aHostCpus[iCpu]);
438 if (RT_SUCCESS(rc))
439 rc = RTSpinlockCreate(&pGVMM->aHostCpus[iCpu].Ppt.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "GVMM/CPU");
440 if (RT_FAILURE(rc))
441 {
442 while (iCpu < cHostCpus)
443 {
444 RTTimerDestroy(pGVMM->aHostCpus[iCpu].Ppt.pTimer);
445 RTSpinlockDestroy(pGVMM->aHostCpus[iCpu].Ppt.hSpinlock);
446 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
447 iCpu++;
448 }
449 break;
450 }
451#endif
452 }
453 else
454 {
455 pGVMM->aHostCpus[iCpu].idCpu = NIL_RTCPUID;
456 pGVMM->aHostCpus[iCpu].u32Magic = 0;
457 }
458 }
459 if (RT_SUCCESS(rc))
460 {
461 g_pGVMM = pGVMM;
462 LogFlow(("GVMMR0Init: pGVMM=%p cHostCpus=%u\n", pGVMM, cHostCpus));
463 return VINF_SUCCESS;
464 }
465
466 /* bail out. */
467 RTCritSectRwDelete(&pGVMM->UsedLock);
468 }
469 RTCritSectDelete(&pGVMM->CreateDestroyLock);
470 }
471
472 RTMemFree(pGVMM);
473 return rc;
474}
475
476
477/**
478 * Terminates the GVM.
479 *
480 * This is called while owning the loader semaphore (see supdrvLdrFree()).
481 * And unless something is wrong, there should be absolutely no VMs
482 * registered at this point.
483 */
484GVMMR0DECL(void) GVMMR0Term(void)
485{
486 LogFlow(("GVMMR0Term:\n"));
487
488 PGVMM pGVMM = g_pGVMM;
489 g_pGVMM = NULL;
490 if (RT_UNLIKELY(!RT_VALID_PTR(pGVMM)))
491 {
492 SUPR0Printf("GVMMR0Term: pGVMM=%RKv\n", pGVMM);
493 return;
494 }
495
496 /*
497 * First of all, stop all active timers.
498 */
499 uint32_t cActiveTimers = 0;
500 uint32_t iCpu = pGVMM->cHostCpus;
501 while (iCpu-- > 0)
502 {
503 ASMAtomicWriteU32(&pGVMM->aHostCpus[iCpu].u32Magic, ~GVMMHOSTCPU_MAGIC);
504#ifdef GVMM_SCHED_WITH_PPT
505 if ( pGVMM->aHostCpus[iCpu].Ppt.pTimer != NULL
506 && RT_SUCCESS(RTTimerStop(pGVMM->aHostCpus[iCpu].Ppt.pTimer)))
507 cActiveTimers++;
508#endif
509 }
510 if (cActiveTimers)
511 RTThreadSleep(1); /* fudge */
512
513 /*
514 * Invalidate the and free resources.
515 */
516 pGVMM->u32Magic = ~GVMM_MAGIC;
517 RTCritSectRwDelete(&pGVMM->UsedLock);
518 RTCritSectDelete(&pGVMM->CreateDestroyLock);
519
520 pGVMM->iFreeHead = 0;
521 if (pGVMM->iUsedHead)
522 {
523 SUPR0Printf("GVMMR0Term: iUsedHead=%#x! (cVMs=%#x cEMTs=%#x)\n", pGVMM->iUsedHead, pGVMM->cVMs, pGVMM->cEMTs);
524 pGVMM->iUsedHead = 0;
525 }
526
527#ifdef GVMM_SCHED_WITH_PPT
528 iCpu = pGVMM->cHostCpus;
529 while (iCpu-- > 0)
530 {
531 RTTimerDestroy(pGVMM->aHostCpus[iCpu].Ppt.pTimer);
532 pGVMM->aHostCpus[iCpu].Ppt.pTimer = NULL;
533 RTSpinlockDestroy(pGVMM->aHostCpus[iCpu].Ppt.hSpinlock);
534 pGVMM->aHostCpus[iCpu].Ppt.hSpinlock = NIL_RTSPINLOCK;
535 }
536#endif
537
538 RTMemFree(pGVMM);
539}
540
541
542/**
543 * A quick hack for setting global config values.
544 *
545 * @returns VBox status code.
546 *
547 * @param pSession The session handle. Used for authentication.
548 * @param pszName The variable name.
549 * @param u64Value The new value.
550 */
551GVMMR0DECL(int) GVMMR0SetConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t u64Value)
552{
553 /*
554 * Validate input.
555 */
556 PGVMM pGVMM;
557 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
558 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
559 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
560
561 /*
562 * String switch time!
563 */
564 if (strncmp(pszName, RT_STR_TUPLE("/GVMM/")))
565 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
566 int rc = VINF_SUCCESS;
567 pszName += sizeof("/GVMM/") - 1;
568 if (!strcmp(pszName, "cEMTsMeansCompany"))
569 {
570 if (u64Value <= UINT32_MAX)
571 pGVMM->cEMTsMeansCompany = u64Value;
572 else
573 rc = VERR_OUT_OF_RANGE;
574 }
575 else if (!strcmp(pszName, "MinSleepAlone"))
576 {
577 if (u64Value <= RT_NS_100MS)
578 pGVMM->nsMinSleepAlone = u64Value;
579 else
580 rc = VERR_OUT_OF_RANGE;
581 }
582 else if (!strcmp(pszName, "MinSleepCompany"))
583 {
584 if (u64Value <= RT_NS_100MS)
585 pGVMM->nsMinSleepCompany = u64Value;
586 else
587 rc = VERR_OUT_OF_RANGE;
588 }
589 else if (!strcmp(pszName, "EarlyWakeUp1"))
590 {
591 if (u64Value <= RT_NS_100MS)
592 {
593 pGVMM->nsEarlyWakeUp1 = u64Value;
594 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
595 }
596 else
597 rc = VERR_OUT_OF_RANGE;
598 }
599 else if (!strcmp(pszName, "EarlyWakeUp2"))
600 {
601 if (u64Value <= RT_NS_100MS)
602 {
603 pGVMM->nsEarlyWakeUp2 = u64Value;
604 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
605 }
606 else
607 rc = VERR_OUT_OF_RANGE;
608 }
609 else
610 rc = VERR_CFGM_VALUE_NOT_FOUND;
611 return rc;
612}
613
614
615/**
616 * A quick hack for getting global config values.
617 *
618 * @returns VBox status code.
619 *
620 * @param pSession The session handle. Used for authentication.
621 * @param pszName The variable name.
622 * @param pu64Value Where to return the value.
623 */
624GVMMR0DECL(int) GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value)
625{
626 /*
627 * Validate input.
628 */
629 PGVMM pGVMM;
630 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
631 AssertPtrReturn(pSession, VERR_INVALID_HANDLE);
632 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
633 AssertPtrReturn(pu64Value, VERR_INVALID_POINTER);
634
635 /*
636 * String switch time!
637 */
638 if (strncmp(pszName, RT_STR_TUPLE("/GVMM/")))
639 return VERR_CFGM_VALUE_NOT_FOUND; /* borrow status codes from CFGM... */
640 int rc = VINF_SUCCESS;
641 pszName += sizeof("/GVMM/") - 1;
642 if (!strcmp(pszName, "cEMTsMeansCompany"))
643 *pu64Value = pGVMM->cEMTsMeansCompany;
644 else if (!strcmp(pszName, "MinSleepAlone"))
645 *pu64Value = pGVMM->nsMinSleepAlone;
646 else if (!strcmp(pszName, "MinSleepCompany"))
647 *pu64Value = pGVMM->nsMinSleepCompany;
648 else if (!strcmp(pszName, "EarlyWakeUp1"))
649 *pu64Value = pGVMM->nsEarlyWakeUp1;
650 else if (!strcmp(pszName, "EarlyWakeUp2"))
651 *pu64Value = pGVMM->nsEarlyWakeUp2;
652 else
653 rc = VERR_CFGM_VALUE_NOT_FOUND;
654 return rc;
655}
656
657
658/**
659 * Acquire the 'used' lock in shared mode.
660 *
661 * This prevents destruction of the VM while we're in ring-0.
662 *
663 * @returns IPRT status code, see RTSemFastMutexRequest.
664 * @param a_pGVMM The GVMM instance data.
665 * @sa GVMMR0_USED_SHARED_UNLOCK, GVMMR0_USED_EXCLUSIVE_LOCK
666 */
667#define GVMMR0_USED_SHARED_LOCK(a_pGVMM) RTCritSectRwEnterShared(&(a_pGVMM)->UsedLock)
668
669/**
670 * Release the 'used' lock in when owning it in shared mode.
671 *
672 * @returns IPRT status code, see RTSemFastMutexRequest.
673 * @param a_pGVMM The GVMM instance data.
674 * @sa GVMMR0_USED_SHARED_LOCK
675 */
676#define GVMMR0_USED_SHARED_UNLOCK(a_pGVMM) RTCritSectRwLeaveShared(&(a_pGVMM)->UsedLock)
677
678/**
679 * Acquire the 'used' lock in exclusive mode.
680 *
681 * Only use this function when making changes to the used list.
682 *
683 * @returns IPRT status code, see RTSemFastMutexRequest.
684 * @param a_pGVMM The GVMM instance data.
685 * @sa GVMMR0_USED_EXCLUSIVE_UNLOCK
686 */
687#define GVMMR0_USED_EXCLUSIVE_LOCK(a_pGVMM) RTCritSectRwEnterExcl(&(a_pGVMM)->UsedLock)
688
689/**
690 * Release the 'used' lock when owning it in exclusive mode.
691 *
692 * @returns IPRT status code, see RTSemFastMutexRelease.
693 * @param a_pGVMM The GVMM instance data.
694 * @sa GVMMR0_USED_EXCLUSIVE_LOCK, GVMMR0_USED_SHARED_UNLOCK
695 */
696#define GVMMR0_USED_EXCLUSIVE_UNLOCK(a_pGVMM) RTCritSectRwLeaveExcl(&(a_pGVMM)->UsedLock)
697
698
699/**
700 * Try acquire the 'create & destroy' lock.
701 *
702 * @returns IPRT status code, see RTSemFastMutexRequest.
703 * @param pGVMM The GVMM instance data.
704 */
705DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
706{
707 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
708 int rc = RTCritSectEnter(&pGVMM->CreateDestroyLock);
709 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
710 return rc;
711}
712
713
714/**
715 * Release the 'create & destroy' lock.
716 *
717 * @returns IPRT status code, see RTSemFastMutexRequest.
718 * @param pGVMM The GVMM instance data.
719 */
720DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
721{
722 LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
723 int rc = RTCritSectLeave(&pGVMM->CreateDestroyLock);
724 AssertRC(rc);
725 return rc;
726}
727
728
729/**
730 * Request wrapper for the GVMMR0CreateVM API.
731 *
732 * @returns VBox status code.
733 * @param pReq The request buffer.
734 * @param pSession The session handle. The VM will be associated with this.
735 */
736GVMMR0DECL(int) GVMMR0CreateVMReq(PGVMMCREATEVMREQ pReq, PSUPDRVSESSION pSession)
737{
738 /*
739 * Validate the request.
740 */
741 if (!RT_VALID_PTR(pReq))
742 return VERR_INVALID_POINTER;
743 if (pReq->Hdr.cbReq != sizeof(*pReq))
744 return VERR_INVALID_PARAMETER;
745 if (pReq->pSession != pSession)
746 return VERR_INVALID_POINTER;
747
748 /*
749 * Execute it.
750 */
751 PGVM pGVM;
752 pReq->pVMR0 = NULL;
753 pReq->pVMR3 = NIL_RTR3PTR;
754 int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &pGVM);
755 if (RT_SUCCESS(rc))
756 {
757 pReq->pVMR0 = pGVM; /** @todo don't expose this to ring-3, use a unique random number instead. */
758 pReq->pVMR3 = pGVM->pVMR3;
759 }
760 return rc;
761}
762
763
764/**
765 * Allocates the VM structure and registers it with GVM.
766 *
767 * The caller will become the VM owner and there by the EMT.
768 *
769 * @returns VBox status code.
770 * @param pSession The support driver session.
771 * @param cCpus Number of virtual CPUs for the new VM.
772 * @param ppGVM Where to store the pointer to the VM structure.
773 *
774 * @thread EMT.
775 */
776GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PGVM *ppGVM)
777{
778 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
779 PGVMM pGVMM;
780 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
781
782 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
783 *ppGVM = NULL;
784
785 if ( cCpus == 0
786 || cCpus > VMM_MAX_CPU_COUNT)
787 return VERR_INVALID_PARAMETER;
788
789 RTNATIVETHREAD hEMT0 = RTThreadNativeSelf();
790 AssertReturn(hEMT0 != NIL_RTNATIVETHREAD, VERR_GVMM_BROKEN_IPRT);
791 RTPROCESS ProcId = RTProcSelf();
792 AssertReturn(ProcId != NIL_RTPROCESS, VERR_GVMM_BROKEN_IPRT);
793
794 /*
795 * The whole allocation process is protected by the lock.
796 */
797 int rc = gvmmR0CreateDestroyLock(pGVMM);
798 AssertRCReturn(rc, rc);
799
800 /*
801 * Only one VM per session.
802 */
803 if (SUPR0GetSessionVM(pSession) != NULL)
804 {
805 gvmmR0CreateDestroyUnlock(pGVMM);
806 SUPR0Printf("GVMMR0CreateVM: The session %p already got a VM: %p\n", pSession, SUPR0GetSessionVM(pSession));
807 return VERR_ALREADY_EXISTS;
808 }
809
810 /*
811 * Allocate a handle first so we don't waste resources unnecessarily.
812 */
813 uint16_t iHandle = pGVMM->iFreeHead;
814 if (iHandle)
815 {
816 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
817
818 /* consistency checks, a bit paranoid as always. */
819 if ( !pHandle->pGVM
820 && !pHandle->pvObj
821 && pHandle->iSelf == iHandle)
822 {
823 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
824 if (pHandle->pvObj)
825 {
826 /*
827 * Move the handle from the free to used list and perform permission checks.
828 */
829 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
830 AssertRC(rc);
831
832 pGVMM->iFreeHead = pHandle->iNext;
833 pHandle->iNext = pGVMM->iUsedHead;
834 pGVMM->iUsedHead = iHandle;
835 pGVMM->cVMs++;
836
837 pHandle->pGVM = NULL;
838 pHandle->pSession = pSession;
839 pHandle->hEMT0 = NIL_RTNATIVETHREAD;
840 pHandle->ProcId = NIL_RTPROCESS;
841
842 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
843
844 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
845 if (RT_SUCCESS(rc))
846 {
847 /*
848 * Allocate memory for the VM structure (combined VM + GVM).
849 */
850 const uint32_t cbVM = RT_UOFFSETOF_DYN(GVM, aCpus[cCpus]);
851 const uint32_t cPages = RT_ALIGN_32(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
852 RTR0MEMOBJ hVMMemObj = NIL_RTR0MEMOBJ;
853 rc = RTR0MemObjAllocPage(&hVMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
854 if (RT_SUCCESS(rc))
855 {
856 PGVM pGVM = (PGVM)RTR0MemObjAddress(hVMMemObj);
857 AssertPtr(pGVM);
858
859 /*
860 * Initialise the structure.
861 */
862 RT_BZERO(pGVM, cPages << PAGE_SHIFT);
863 gvmmR0InitPerVMData(pGVM, iHandle, cCpus, pSession);
864 pGVM->gvmm.s.VMMemObj = hVMMemObj;
865 rc = GMMR0InitPerVMData(pGVM);
866 int rc2 = PGMR0InitPerVMData(pGVM);
867 int rc3 = VMMR0InitPerVMData(pGVM);
868 DBGFR0InitPerVMData(pGVM);
869 PDMR0InitPerVMData(pGVM);
870 IOMR0InitPerVMData(pGVM);
871 TMR0InitPerVMData(pGVM);
872 if (RT_SUCCESS(rc) && RT_SUCCESS(rc2) && RT_SUCCESS(rc3))
873 {
874 /*
875 * Allocate page array.
876 * This currently have to be made available to ring-3, but this is should change eventually.
877 */
878 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
879 if (RT_SUCCESS(rc))
880 {
881 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
882 for (uint32_t iPage = 0; iPage < cPages; iPage++)
883 {
884 paPages[iPage].uReserved = 0;
885 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
886 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
887 }
888
889 /*
890 * Map the page array, VM and VMCPU structures into ring-3.
891 */
892 AssertCompileSizeAlignment(VM, PAGE_SIZE);
893 rc = RTR0MemObjMapUserEx(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
894 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
895 0 /*offSub*/, sizeof(VM));
896 for (VMCPUID i = 0; i < cCpus && RT_SUCCESS(rc); i++)
897 {
898 AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
899 rc = RTR0MemObjMapUserEx(&pGVM->aCpus[i].gvmm.s.VMCpuMapObj, pGVM->gvmm.s.VMMemObj,
900 (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
901 RT_UOFFSETOF_DYN(GVM, aCpus[i]), sizeof(VMCPU));
902 }
903 if (RT_SUCCESS(rc))
904 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1,
905 0 /* uAlignment */, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
906 NIL_RTR0PROCESS);
907 if (RT_SUCCESS(rc))
908 {
909 /*
910 * Initialize all the VM pointers.
911 */
912 PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
913 AssertMsg(RTR0MemUserIsValidAddr(pVMR3) && pVMR3 != NIL_RTR3PTR, ("%p\n", pVMR3));
914
915 for (VMCPUID i = 0; i < cCpus; i++)
916 {
917 pGVM->aCpus[i].pVMR0 = pGVM;
918 pGVM->aCpus[i].pVMR3 = pVMR3;
919 pGVM->apCpusR3[i] = RTR0MemObjAddressR3(pGVM->aCpus[i].gvmm.s.VMCpuMapObj);
920 pGVM->aCpus[i].pVCpuR3 = pGVM->apCpusR3[i];
921 pGVM->apCpusR0[i] = &pGVM->aCpus[i];
922 AssertMsg(RTR0MemUserIsValidAddr(pGVM->apCpusR3[i]) && pGVM->apCpusR3[i] != NIL_RTR3PTR,
923 ("apCpusR3[%u]=%p\n", i, pGVM->apCpusR3[i]));
924 }
925
926 pGVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
927 AssertMsg(RTR0MemUserIsValidAddr(pGVM->paVMPagesR3) && pGVM->paVMPagesR3 != NIL_RTR3PTR,
928 ("%p\n", pGVM->paVMPagesR3));
929
930 /*
931 * Complete the handle - take the UsedLock sem just to be careful.
932 */
933 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
934 AssertRC(rc);
935
936 pHandle->pGVM = pGVM;
937 pHandle->hEMT0 = hEMT0;
938 pHandle->ProcId = ProcId;
939 pGVM->pVMR3 = pVMR3;
940 pGVM->pVMR3Unsafe = pVMR3;
941 pGVM->aCpus[0].hEMT = hEMT0;
942 pGVM->aCpus[0].hNativeThreadR0 = hEMT0;
943 pGVM->aCpus[0].cEmtHashCollisions = 0;
944 uint32_t const idxHash = GVMM_EMT_HASH_1(hEMT0);
945 pGVM->aCpus[0].gvmm.s.idxEmtHash = (uint16_t)idxHash;
946 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = hEMT0;
947 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = 0;
948 pGVMM->cEMTs += cCpus;
949
950 /* Associate it with the session and create the context hook for EMT0. */
951 rc = SUPR0SetSessionVM(pSession, pGVM, pGVM);
952 if (RT_SUCCESS(rc))
953 {
954 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[0]);
955 if (RT_SUCCESS(rc))
956 {
957 /*
958 * Done!
959 */
960 VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pGVM, ProcId, (void *)hEMT0, cCpus);
961
962 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
963 gvmmR0CreateDestroyUnlock(pGVMM);
964
965 CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
966
967 *ppGVM = pGVM;
968 Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
969 return VINF_SUCCESS;
970 }
971
972 SUPR0SetSessionVM(pSession, NULL, NULL);
973 }
974 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
975 }
976
977 /* Cleanup mappings. */
978 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
979 {
980 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
981 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
982 }
983 for (VMCPUID i = 0; i < cCpus; i++)
984 if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
985 {
986 RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */);
987 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
988 }
989 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
990 {
991 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */);
992 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
993 }
994 }
995 }
996 else
997 {
998 if (RT_SUCCESS_NP(rc))
999 rc = rc2;
1000 if (RT_SUCCESS_NP(rc))
1001 rc = rc3;
1002 }
1003 }
1004 }
1005 /* else: The user wasn't permitted to create this VM. */
1006
1007 /*
1008 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
1009 * object reference here. A little extra mess because of non-recursive lock.
1010 */
1011 void *pvObj = pHandle->pvObj;
1012 pHandle->pvObj = NULL;
1013 gvmmR0CreateDestroyUnlock(pGVMM);
1014
1015 SUPR0ObjRelease(pvObj, pSession);
1016
1017 SUPR0Printf("GVMMR0CreateVM: failed, rc=%Rrc\n", rc);
1018 return rc;
1019 }
1020
1021 rc = VERR_NO_MEMORY;
1022 }
1023 else
1024 rc = VERR_GVMM_IPE_1;
1025 }
1026 else
1027 rc = VERR_GVM_TOO_MANY_VMS;
1028
1029 gvmmR0CreateDestroyUnlock(pGVMM);
1030 return rc;
1031}
1032
1033
1034/**
1035 * Initializes the per VM data belonging to GVMM.
1036 *
1037 * @param pGVM Pointer to the global VM structure.
1038 * @param hSelf The handle.
1039 * @param cCpus The CPU count.
1040 * @param pSession The session this VM is associated with.
1041 */
1042static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession)
1043{
1044 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
1045 AssertCompile(RT_SIZEOFMEMB(GVMCPU,gvmm.s) <= RT_SIZEOFMEMB(GVMCPU,gvmm.padding));
1046 AssertCompileMemberAlignment(VM, cpum, 64);
1047 AssertCompileMemberAlignment(VM, tm, 64);
1048
1049 /* GVM: */
1050 pGVM->u32Magic = GVM_MAGIC;
1051 pGVM->hSelf = hSelf;
1052 pGVM->cCpus = cCpus;
1053 pGVM->pSession = pSession;
1054 pGVM->pSelf = pGVM;
1055
1056 /* VM: */
1057 pGVM->enmVMState = VMSTATE_CREATING;
1058 pGVM->hSelfUnsafe = hSelf;
1059 pGVM->pSessionUnsafe = pSession;
1060 pGVM->pVMR0ForCall = pGVM;
1061 pGVM->cCpusUnsafe = cCpus;
1062 pGVM->uCpuExecutionCap = 100; /* default is no cap. */
1063 pGVM->uStructVersion = 1;
1064 pGVM->cbSelf = sizeof(VM);
1065 pGVM->cbVCpu = sizeof(VMCPU);
1066
1067 /* GVMM: */
1068 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
1069 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1070 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
1071 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1072 pGVM->gvmm.s.fDoneVMMR0Init = false;
1073 pGVM->gvmm.s.fDoneVMMR0Term = false;
1074
1075 for (size_t i = 0; i < RT_ELEMENTS(pGVM->gvmm.s.aWorkerThreads); i++)
1076 {
1077 pGVM->gvmm.s.aWorkerThreads[i].hNativeThread = NIL_RTNATIVETHREAD;
1078 pGVM->gvmm.s.aWorkerThreads[i].hNativeThreadR3 = NIL_RTNATIVETHREAD;
1079 }
1080 pGVM->gvmm.s.aWorkerThreads[0].hNativeThread = GVMM_RTNATIVETHREAD_DESTROYED; /* invalid entry */
1081
1082 for (size_t i = 0; i < RT_ELEMENTS(pGVM->gvmm.s.aEmtHash); i++)
1083 {
1084 pGVM->gvmm.s.aEmtHash[i].hNativeEmt = NIL_RTNATIVETHREAD;
1085 pGVM->gvmm.s.aEmtHash[i].idVCpu = NIL_VMCPUID;
1086 }
1087
1088 /*
1089 * Per virtual CPU.
1090 */
1091 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1092 {
1093 pGVM->aCpus[i].idCpu = i;
1094 pGVM->aCpus[i].idCpuUnsafe = i;
1095 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1096 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1097 pGVM->aCpus[i].gvmm.s.idxEmtHash = UINT16_MAX;
1098 pGVM->aCpus[i].hEMT = NIL_RTNATIVETHREAD;
1099 pGVM->aCpus[i].pGVM = pGVM;
1100 pGVM->aCpus[i].idHostCpu = NIL_RTCPUID;
1101 pGVM->aCpus[i].iHostCpuSet = UINT32_MAX;
1102 pGVM->aCpus[i].hNativeThread = NIL_RTNATIVETHREAD;
1103 pGVM->aCpus[i].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1104 pGVM->aCpus[i].enmState = VMCPUSTATE_STOPPED;
1105 pGVM->aCpus[i].pVCpuR0ForVtg = &pGVM->aCpus[i];
1106 }
1107}
1108
1109
1110/**
1111 * Does the VM initialization.
1112 *
1113 * @returns VBox status code.
1114 * @param pGVM The global (ring-0) VM structure.
1115 */
1116GVMMR0DECL(int) GVMMR0InitVM(PGVM pGVM)
1117{
1118 LogFlow(("GVMMR0InitVM: pGVM=%p\n", pGVM));
1119
1120 int rc = VERR_INTERNAL_ERROR_3;
1121 if ( !pGVM->gvmm.s.fDoneVMMR0Init
1122 && pGVM->aCpus[0].gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
1123 {
1124 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1125 {
1126 rc = RTSemEventMultiCreate(&pGVM->aCpus[i].gvmm.s.HaltEventMulti);
1127 if (RT_FAILURE(rc))
1128 {
1129 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1130 break;
1131 }
1132 }
1133 }
1134 else
1135 rc = VERR_WRONG_ORDER;
1136
1137 LogFlow(("GVMMR0InitVM: returns %Rrc\n", rc));
1138 return rc;
1139}
1140
1141
1142/**
1143 * Indicates that we're done with the ring-0 initialization
1144 * of the VM.
1145 *
1146 * @param pGVM The global (ring-0) VM structure.
1147 * @thread EMT(0)
1148 */
1149GVMMR0DECL(void) GVMMR0DoneInitVM(PGVM pGVM)
1150{
1151 /* Set the indicator. */
1152 pGVM->gvmm.s.fDoneVMMR0Init = true;
1153}
1154
1155
1156/**
1157 * Indicates that we're doing the ring-0 termination of the VM.
1158 *
1159 * @returns true if termination hasn't been done already, false if it has.
1160 * @param pGVM Pointer to the global VM structure. Optional.
1161 * @thread EMT(0) or session cleanup thread.
1162 */
1163GVMMR0DECL(bool) GVMMR0DoingTermVM(PGVM pGVM)
1164{
1165 /* Validate the VM structure, state and handle. */
1166 AssertPtrReturn(pGVM, false);
1167
1168 /* Set the indicator. */
1169 if (pGVM->gvmm.s.fDoneVMMR0Term)
1170 return false;
1171 pGVM->gvmm.s.fDoneVMMR0Term = true;
1172 return true;
1173}
1174
1175
1176/**
1177 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
1178 *
1179 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
1180 * and the caller is not the EMT thread, unfortunately. For security reasons, it
1181 * would've been nice if the caller was actually the EMT thread or that we somehow
1182 * could've associated the calling thread with the VM up front.
1183 *
1184 * @returns VBox status code.
1185 * @param pGVM The global (ring-0) VM structure.
1186 *
1187 * @thread EMT(0) if it's associated with the VM, otherwise any thread.
1188 */
1189GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM)
1190{
1191 LogFlow(("GVMMR0DestroyVM: pGVM=%p\n", pGVM));
1192 PGVMM pGVMM;
1193 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1194
1195 /*
1196 * Validate the VM structure, state and caller.
1197 */
1198 AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
1199 AssertReturn(!((uintptr_t)pGVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1200 AssertMsgReturn(pGVM->enmVMState >= VMSTATE_CREATING && pGVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pGVM->enmVMState),
1201 VERR_WRONG_ORDER);
1202
1203 uint32_t hGVM = pGVM->hSelf;
1204 ASMCompilerBarrier();
1205 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_VM_HANDLE);
1206 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_VM_HANDLE);
1207
1208 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1209 AssertReturn(pHandle->pGVM == pGVM, VERR_NOT_OWNER);
1210
1211 RTPROCESS ProcId = RTProcSelf();
1212 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1213 AssertReturn( ( pHandle->hEMT0 == hSelf
1214 && pHandle->ProcId == ProcId)
1215 || pHandle->hEMT0 == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
1216
1217 /*
1218 * Lookup the handle and destroy the object.
1219 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
1220 * object, we take some precautions against racing callers just in case...
1221 */
1222 int rc = gvmmR0CreateDestroyLock(pGVMM);
1223 AssertRC(rc);
1224
1225 /* Be careful here because we might theoretically be racing someone else cleaning up. */
1226 if ( pHandle->pGVM == pGVM
1227 && ( ( pHandle->hEMT0 == hSelf
1228 && pHandle->ProcId == ProcId)
1229 || pHandle->hEMT0 == NIL_RTNATIVETHREAD)
1230 && RT_VALID_PTR(pHandle->pvObj)
1231 && RT_VALID_PTR(pHandle->pSession)
1232 && RT_VALID_PTR(pHandle->pGVM)
1233 && pHandle->pGVM->u32Magic == GVM_MAGIC)
1234 {
1235 /* Check that other EMTs have deregistered. */
1236 uint32_t cNotDeregistered = 0;
1237 for (VMCPUID idCpu = 1; idCpu < pGVM->cCpus; idCpu++)
1238 cNotDeregistered += pGVM->aCpus[idCpu].hEMT != GVMM_RTNATIVETHREAD_DESTROYED;
1239 if (cNotDeregistered == 0)
1240 {
1241 /* Grab the object pointer. */
1242 void *pvObj = pHandle->pvObj;
1243 pHandle->pvObj = NULL;
1244 gvmmR0CreateDestroyUnlock(pGVMM);
1245
1246 SUPR0ObjRelease(pvObj, pHandle->pSession);
1247 }
1248 else
1249 {
1250 gvmmR0CreateDestroyUnlock(pGVMM);
1251 rc = VERR_GVMM_NOT_ALL_EMTS_DEREGISTERED;
1252 }
1253 }
1254 else
1255 {
1256 SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.pGVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pGVM=%p hSelf=%p\n",
1257 pHandle, pHandle->pGVM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pGVM, hSelf);
1258 gvmmR0CreateDestroyUnlock(pGVMM);
1259 rc = VERR_GVMM_IPE_2;
1260 }
1261
1262 return rc;
1263}
1264
1265
1266/**
1267 * Performs VM cleanup task as part of object destruction.
1268 *
1269 * @param pGVM The GVM pointer.
1270 */
1271static void gvmmR0CleanupVM(PGVM pGVM)
1272{
1273 if ( pGVM->gvmm.s.fDoneVMMR0Init
1274 && !pGVM->gvmm.s.fDoneVMMR0Term)
1275 {
1276 if ( pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ
1277 && RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM)
1278 {
1279 LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
1280 VMMR0TermVM(pGVM, NIL_VMCPUID);
1281 }
1282 else
1283 AssertMsgFailed(("gvmmR0CleanupVM: VMMemObj=%p pGVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM));
1284 }
1285
1286 GMMR0CleanupVM(pGVM);
1287#ifdef VBOX_WITH_NEM_R0
1288 NEMR0CleanupVM(pGVM);
1289#endif
1290 PDMR0CleanupVM(pGVM);
1291 IOMR0CleanupVM(pGVM);
1292 DBGFR0CleanupVM(pGVM);
1293 PGMR0CleanupVM(pGVM);
1294 TMR0CleanupVM(pGVM);
1295 VMMR0CleanupVM(pGVM);
1296}
1297
1298
1299/**
1300 * @callback_method_impl{FNSUPDRVDESTRUCTOR,VM handle destructor}
1301 *
1302 * pvUser1 is the GVM instance pointer.
1303 * pvUser2 is the handle pointer.
1304 */
1305static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvUser1, void *pvUser2)
1306{
1307 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvUser1, pvUser2));
1308
1309 NOREF(pvObj);
1310
1311 /*
1312 * Some quick, paranoid, input validation.
1313 */
1314 PGVMHANDLE pHandle = (PGVMHANDLE)pvUser2;
1315 AssertPtr(pHandle);
1316 PGVMM pGVMM = (PGVMM)pvUser1;
1317 Assert(pGVMM == g_pGVMM);
1318 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
1319 if ( !iHandle
1320 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
1321 || iHandle != pHandle->iSelf)
1322 {
1323 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
1324 return;
1325 }
1326
1327 int rc = gvmmR0CreateDestroyLock(pGVMM);
1328 AssertRC(rc);
1329 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
1330 AssertRC(rc);
1331
1332 /*
1333 * This is a tad slow but a doubly linked list is too much hassle.
1334 */
1335 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
1336 {
1337 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
1338 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1339 gvmmR0CreateDestroyUnlock(pGVMM);
1340 return;
1341 }
1342
1343 if (pGVMM->iUsedHead == iHandle)
1344 pGVMM->iUsedHead = pHandle->iNext;
1345 else
1346 {
1347 uint16_t iPrev = pGVMM->iUsedHead;
1348 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
1349 while (iPrev)
1350 {
1351 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
1352 {
1353 SUPR0Printf("GVM: used list index %d is out of range!\n", iPrev);
1354 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1355 gvmmR0CreateDestroyUnlock(pGVMM);
1356 return;
1357 }
1358 if (RT_UNLIKELY(c-- <= 0))
1359 {
1360 iPrev = 0;
1361 break;
1362 }
1363
1364 if (pGVMM->aHandles[iPrev].iNext == iHandle)
1365 break;
1366 iPrev = pGVMM->aHandles[iPrev].iNext;
1367 }
1368 if (!iPrev)
1369 {
1370 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
1371 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1372 gvmmR0CreateDestroyUnlock(pGVMM);
1373 return;
1374 }
1375
1376 Assert(pGVMM->aHandles[iPrev].iNext == iHandle);
1377 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
1378 }
1379 pHandle->iNext = 0;
1380 pGVMM->cVMs--;
1381
1382 /*
1383 * Do the global cleanup round.
1384 */
1385 PGVM pGVM = pHandle->pGVM;
1386 if ( RT_VALID_PTR(pGVM)
1387 && pGVM->u32Magic == GVM_MAGIC)
1388 {
1389 pGVMM->cEMTs -= pGVM->cCpus;
1390
1391 if (pGVM->pSession)
1392 SUPR0SetSessionVM(pGVM->pSession, NULL, NULL);
1393
1394 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1395
1396 gvmmR0CleanupVM(pGVM);
1397
1398 /*
1399 * Do the GVMM cleanup - must be done last.
1400 */
1401 /* The VM and VM pages mappings/allocations. */
1402 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
1403 {
1404 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
1405 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
1406 }
1407
1408 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
1409 {
1410 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
1411 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
1412 }
1413
1414 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
1415 {
1416 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
1417 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
1418 }
1419
1420 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
1421 {
1422 if (pGVM->aCpus[i].gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI)
1423 {
1424 rc = RTSemEventMultiDestroy(pGVM->aCpus[i].gvmm.s.HaltEventMulti); AssertRC(rc);
1425 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
1426 }
1427 if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
1428 {
1429 rc = RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */); AssertRC(rc);
1430 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
1431 }
1432 }
1433
1434 /* the GVM structure itself. */
1435 pGVM->u32Magic |= UINT32_C(0x80000000);
1436 Assert(pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ);
1437 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, true /*fFreeMappings*/); AssertRC(rc);
1438 pGVM = NULL;
1439
1440 /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
1441 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
1442 AssertRC(rc);
1443 }
1444 /* else: GVMMR0CreateVM cleanup. */
1445
1446 /*
1447 * Free the handle.
1448 */
1449 pHandle->iNext = pGVMM->iFreeHead;
1450 pGVMM->iFreeHead = iHandle;
1451 ASMAtomicWriteNullPtr(&pHandle->pGVM);
1452 ASMAtomicWriteNullPtr(&pHandle->pvObj);
1453 ASMAtomicWriteNullPtr(&pHandle->pSession);
1454 ASMAtomicWriteHandle(&pHandle->hEMT0, NIL_RTNATIVETHREAD);
1455 ASMAtomicWriteU32(&pHandle->ProcId, NIL_RTPROCESS);
1456
1457 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
1458 gvmmR0CreateDestroyUnlock(pGVMM);
1459 LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
1460}
1461
1462
1463/**
1464 * Registers the calling thread as the EMT of a Virtual CPU.
1465 *
1466 * Note that VCPU 0 is automatically registered during VM creation.
1467 *
1468 * @returns VBox status code
1469 * @param pGVM The global (ring-0) VM structure.
1470 * @param idCpu VCPU id to register the current thread as.
1471 */
1472GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, VMCPUID idCpu)
1473{
1474 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
1475
1476 /*
1477 * Validate the VM structure, state and handle.
1478 */
1479 PGVMM pGVMM;
1480 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */);
1481 if (RT_SUCCESS(rc))
1482 {
1483 if (idCpu < pGVM->cCpus)
1484 {
1485 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
1486
1487 gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */
1488
1489 /* Check that the EMT isn't already assigned to a thread. */
1490 if (pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD)
1491 {
1492 Assert(pGVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD);
1493
1494 /* A thread may only be one EMT (this makes sure hNativeSelf isn't NIL). */
1495 for (VMCPUID iCpu = 0; iCpu < pGVM->cCpus; iCpu++)
1496 AssertBreakStmt(pGVM->aCpus[iCpu].hEMT != hNativeSelf, rc = VERR_INVALID_PARAMETER);
1497 if (RT_SUCCESS(rc))
1498 {
1499 /*
1500 * Do the assignment, then try setup the hook. Undo if that fails.
1501 */
1502 unsigned cCollisions = 0;
1503 uint32_t idxHash = GVMM_EMT_HASH_1(hNativeSelf);
1504 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt != NIL_RTNATIVETHREAD)
1505 {
1506 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hNativeSelf);
1507 do
1508 {
1509 cCollisions++;
1510 Assert(cCollisions < GVMM_EMT_HASH_SIZE);
1511 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE;
1512 } while (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt != NIL_RTNATIVETHREAD);
1513 }
1514 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = hNativeSelf;
1515 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = idCpu;
1516 pGVM->aCpus[idCpu].hNativeThreadR0 = hNativeSelf;
1517 pGVM->aCpus[idCpu].hEMT = hNativeSelf;
1518 pGVM->aCpus[idCpu].cEmtHashCollisions = (uint8_t)cCollisions;
1519 pGVM->aCpus[idCpu].gvmm.s.idxEmtHash = (uint16_t)idxHash;
1520
1521 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[idCpu]);
1522 if (RT_SUCCESS(rc))
1523 CPUMR0RegisterVCpuThread(&pGVM->aCpus[idCpu]);
1524 else
1525 {
1526 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1527 pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD;
1528 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = NIL_RTNATIVETHREAD;
1529 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = NIL_VMCPUID;
1530 pGVM->aCpus[idCpu].gvmm.s.idxEmtHash = UINT16_MAX;
1531 }
1532 }
1533 }
1534 else
1535 rc = VERR_ACCESS_DENIED;
1536
1537 gvmmR0CreateDestroyUnlock(pGVMM);
1538 }
1539 else
1540 rc = VERR_INVALID_CPU_ID;
1541 }
1542 return rc;
1543}
1544
1545
1546/**
1547 * Deregisters the calling thread as the EMT of a Virtual CPU.
1548 *
1549 * Note that VCPU 0 shall call GVMMR0DestroyVM intead of this API.
1550 *
1551 * @returns VBox status code
1552 * @param pGVM The global (ring-0) VM structure.
1553 * @param idCpu VCPU id to register the current thread as.
1554 */
1555GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, VMCPUID idCpu)
1556{
1557 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
1558
1559 /*
1560 * Validate the VM structure, state and handle.
1561 */
1562 PGVMM pGVMM;
1563 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
1564 if (RT_SUCCESS(rc))
1565 {
1566 /*
1567 * Take the destruction lock and recheck the handle state to
1568 * prevent racing GVMMR0DestroyVM.
1569 */
1570 gvmmR0CreateDestroyLock(pGVMM);
1571
1572 uint32_t hSelf = pGVM->hSelf;
1573 ASMCompilerBarrier();
1574 if ( hSelf < RT_ELEMENTS(pGVMM->aHandles)
1575 && pGVMM->aHandles[hSelf].pvObj != NULL
1576 && pGVMM->aHandles[hSelf].pGVM == pGVM)
1577 {
1578 /*
1579 * Do per-EMT cleanups.
1580 */
1581 VMMR0ThreadCtxHookDestroyForEmt(&pGVM->aCpus[idCpu]);
1582
1583 /*
1584 * Invalidate hEMT. We don't use NIL here as that would allow
1585 * GVMMR0RegisterVCpu to be called again, and we don't want that.
1586 */
1587 pGVM->aCpus[idCpu].hEMT = GVMM_RTNATIVETHREAD_DESTROYED;
1588 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
1589
1590 uint32_t const idxHash = pGVM->aCpus[idCpu].gvmm.s.idxEmtHash;
1591 if (idxHash < RT_ELEMENTS(pGVM->gvmm.s.aEmtHash))
1592 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = GVMM_RTNATIVETHREAD_DESTROYED;
1593 }
1594
1595 gvmmR0CreateDestroyUnlock(pGVMM);
1596 }
1597 return rc;
1598}
1599
1600
1601/**
1602 * Registers the caller as a given worker thread.
1603 *
1604 * This enables the thread to operate critical sections in ring-0.
1605 *
1606 * @returns VBox status code.
1607 * @param pGVM The global (ring-0) VM structure.
1608 * @param enmWorker The worker thread this is supposed to be.
1609 * @param hNativeSelfR3 The ring-3 native self of the caller.
1610 */
1611GVMMR0DECL(int) GVMMR0RegisterWorkerThread(PGVM pGVM, GVMMWORKERTHREAD enmWorker, RTNATIVETHREAD hNativeSelfR3)
1612{
1613 /*
1614 * Validate input.
1615 */
1616 AssertReturn(enmWorker > GVMMWORKERTHREAD_INVALID && enmWorker < GVMMWORKERTHREAD_END, VERR_INVALID_PARAMETER);
1617 AssertReturn(hNativeSelfR3 != NIL_RTNATIVETHREAD, VERR_INVALID_HANDLE);
1618 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
1619 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
1620 PGVMM pGVMM;
1621 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
1622 AssertRCReturn(rc, rc);
1623 AssertReturn(pGVM->enmVMState < VMSTATE_DESTROYING, VERR_VM_INVALID_VM_STATE);
1624
1625 /*
1626 * Grab the big lock and check the VM state again.
1627 */
1628 uint32_t const hSelf = pGVM->hSelf;
1629 gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */
1630 if ( hSelf < RT_ELEMENTS(pGVMM->aHandles)
1631 && pGVMM->aHandles[hSelf].pvObj != NULL
1632 && pGVMM->aHandles[hSelf].pGVM == pGVM
1633 && pGVMM->aHandles[hSelf].ProcId == RTProcSelf())
1634 {
1635 if (pGVM->enmVMState < VMSTATE_DESTROYING)
1636 {
1637 /*
1638 * Check that the thread isn't an EMT or serving in some other worker capacity.
1639 */
1640 for (VMCPUID iCpu = 0; iCpu < pGVM->cCpus; iCpu++)
1641 AssertBreakStmt(pGVM->aCpus[iCpu].hEMT != hNativeSelf, rc = VERR_INVALID_PARAMETER);
1642 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->gvmm.s.aWorkerThreads); idx++)
1643 AssertBreakStmt(idx == (size_t)enmWorker || pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread != hNativeSelf,
1644 rc = VERR_INVALID_PARAMETER);
1645 if (RT_SUCCESS(rc))
1646 {
1647 /*
1648 * Do the registration.
1649 */
1650 if ( pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread == NIL_RTNATIVETHREAD
1651 && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 == NIL_RTNATIVETHREAD)
1652 {
1653 pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread = hNativeSelf;
1654 pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 = hNativeSelfR3;
1655 rc = VINF_SUCCESS;
1656 }
1657 else if ( pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread == hNativeSelf
1658 && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 == hNativeSelfR3)
1659 rc = VERR_ALREADY_EXISTS;
1660 else
1661 rc = VERR_RESOURCE_BUSY;
1662 }
1663 }
1664 else
1665 rc = VERR_VM_INVALID_VM_STATE;
1666 }
1667 else
1668 rc = VERR_INVALID_VM_HANDLE;
1669 gvmmR0CreateDestroyUnlock(pGVMM);
1670 return rc;
1671}
1672
1673
1674/**
1675 * Deregisters a workinger thread (caller).
1676 *
1677 * The worker thread cannot be re-created and re-registered, instead the given
1678 * @a enmWorker slot becomes invalid.
1679 *
1680 * @returns VBox status code.
1681 * @param pGVM The global (ring-0) VM structure.
1682 * @param enmWorker The worker thread this is supposed to be.
1683 */
1684GVMMR0DECL(int) GVMMR0DeregisterWorkerThread(PGVM pGVM, GVMMWORKERTHREAD enmWorker)
1685{
1686 /*
1687 * Validate input.
1688 */
1689 AssertReturn(enmWorker > GVMMWORKERTHREAD_INVALID && enmWorker < GVMMWORKERTHREAD_END, VERR_INVALID_PARAMETER);
1690 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1691 AssertReturn(hNativeThread != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
1692 PGVMM pGVMM;
1693 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
1694 AssertRCReturn(rc, rc);
1695
1696 /*
1697 * Grab the big lock and check the VM state again.
1698 */
1699 uint32_t const hSelf = pGVM->hSelf;
1700 gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */
1701 if ( hSelf < RT_ELEMENTS(pGVMM->aHandles)
1702 && pGVMM->aHandles[hSelf].pvObj != NULL
1703 && pGVMM->aHandles[hSelf].pGVM == pGVM
1704 && pGVMM->aHandles[hSelf].ProcId == RTProcSelf())
1705 {
1706 /*
1707 * Do the deregistration.
1708 * This will prevent any other threads register as the worker later.
1709 */
1710 if (pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread == hNativeThread)
1711 {
1712 pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread = GVMM_RTNATIVETHREAD_DESTROYED;
1713 pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 = GVMM_RTNATIVETHREAD_DESTROYED;
1714 rc = VINF_SUCCESS;
1715 }
1716 else if ( pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread == GVMM_RTNATIVETHREAD_DESTROYED
1717 && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 == GVMM_RTNATIVETHREAD_DESTROYED)
1718 rc = VINF_SUCCESS;
1719 else
1720 rc = VERR_NOT_OWNER;
1721 }
1722 else
1723 rc = VERR_INVALID_VM_HANDLE;
1724 gvmmR0CreateDestroyUnlock(pGVMM);
1725 return rc;
1726}
1727
1728
1729/**
1730 * Lookup a GVM structure by its handle.
1731 *
1732 * @returns The GVM pointer on success, NULL on failure.
1733 * @param hGVM The global VM handle. Asserts on bad handle.
1734 */
1735GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
1736{
1737 PGVMM pGVMM;
1738 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
1739
1740 /*
1741 * Validate.
1742 */
1743 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
1744 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
1745
1746 /*
1747 * Look it up.
1748 */
1749 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1750 AssertPtrReturn(pHandle->pvObj, NULL);
1751 PGVM pGVM = pHandle->pGVM;
1752 AssertPtrReturn(pGVM, NULL);
1753
1754 return pGVM;
1755}
1756
1757
1758/**
1759 * Check that the given GVM and VM structures match up.
1760 *
1761 * The calling thread must be in the same process as the VM. All current lookups
1762 * are by threads inside the same process, so this will not be an issue.
1763 *
1764 * @returns VBox status code.
1765 * @param pGVM The global (ring-0) VM structure.
1766 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1767 * @param fTakeUsedLock Whether to take the used lock or not. We take it in
1768 * shared mode when requested.
1769 *
1770 * Be very careful if not taking the lock as it's
1771 * possible that the VM will disappear then!
1772 *
1773 * @remark This will not assert on an invalid pGVM but try return silently.
1774 */
1775static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
1776{
1777 /*
1778 * Check the pointers.
1779 */
1780 int rc;
1781 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1782 && ((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0 ))
1783 {
1784 /*
1785 * Get the pGVMM instance and check the VM handle.
1786 */
1787 PGVMM pGVMM;
1788 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1789
1790 uint16_t hGVM = pGVM->hSelf;
1791 if (RT_LIKELY( hGVM != NIL_GVM_HANDLE
1792 && hGVM < RT_ELEMENTS(pGVMM->aHandles)))
1793 {
1794 RTPROCESS const pidSelf = RTProcSelf();
1795 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1796 if (fTakeUsedLock)
1797 {
1798 rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
1799 AssertRCReturn(rc, rc);
1800 }
1801
1802 if (RT_LIKELY( pHandle->pGVM == pGVM
1803 && pHandle->ProcId == pidSelf
1804 && RT_VALID_PTR(pHandle->pvObj)))
1805 {
1806 /*
1807 * Some more VM data consistency checks.
1808 */
1809 if (RT_LIKELY( pGVM->cCpusUnsafe == pGVM->cCpus
1810 && pGVM->hSelfUnsafe == hGVM
1811 && pGVM->pSelf == pGVM))
1812 {
1813 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1814 && pGVM->enmVMState <= VMSTATE_TERMINATED))
1815 {
1816 *ppGVMM = pGVMM;
1817 return VINF_SUCCESS;
1818 }
1819 rc = VERR_INCONSISTENT_VM_HANDLE;
1820 }
1821 else
1822 rc = VERR_INCONSISTENT_VM_HANDLE;
1823 }
1824 else
1825 rc = VERR_INVALID_VM_HANDLE;
1826
1827 if (fTakeUsedLock)
1828 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
1829 }
1830 else
1831 rc = VERR_INVALID_VM_HANDLE;
1832 }
1833 else
1834 rc = VERR_INVALID_POINTER;
1835 return rc;
1836}
1837
1838
1839/**
1840 * Validates a GVM/VM pair.
1841 *
1842 * @returns VBox status code.
1843 * @param pGVM The global (ring-0) VM structure.
1844 */
1845GVMMR0DECL(int) GVMMR0ValidateGVM(PGVM pGVM)
1846{
1847 PGVMM pGVMM;
1848 return gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
1849}
1850
1851
1852/**
1853 * Check that the given GVM and VM structures match up.
1854 *
1855 * The calling thread must be in the same process as the VM. All current lookups
1856 * are by threads inside the same process, so this will not be an issue.
1857 *
1858 * @returns VBox status code.
1859 * @param pGVM The global (ring-0) VM structure.
1860 * @param idCpu The (alleged) Virtual CPU ID of the calling EMT.
1861 * @param ppGVMM Where to store the pointer to the GVMM instance data.
1862 * @thread EMT
1863 *
1864 * @remarks This will assert in all failure paths.
1865 */
1866static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM)
1867{
1868 /*
1869 * Check the pointers.
1870 */
1871 AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
1872 AssertReturn(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER);
1873
1874 /*
1875 * Get the pGVMM instance and check the VM handle.
1876 */
1877 PGVMM pGVMM;
1878 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
1879
1880 uint16_t hGVM = pGVM->hSelf;
1881 ASMCompilerBarrier();
1882 AssertReturn( hGVM != NIL_GVM_HANDLE
1883 && hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_VM_HANDLE);
1884
1885 RTPROCESS const pidSelf = RTProcSelf();
1886 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
1887 AssertReturn( pHandle->pGVM == pGVM
1888 && pHandle->ProcId == pidSelf
1889 && RT_VALID_PTR(pHandle->pvObj),
1890 VERR_INVALID_HANDLE);
1891
1892 /*
1893 * Check the EMT claim.
1894 */
1895 RTNATIVETHREAD const hAllegedEMT = RTThreadNativeSelf();
1896 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
1897 AssertReturn(pGVM->aCpus[idCpu].hEMT == hAllegedEMT, VERR_NOT_OWNER);
1898
1899 /*
1900 * Some more VM data consistency checks.
1901 */
1902 AssertReturn(pGVM->cCpusUnsafe == pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE);
1903 AssertReturn(pGVM->hSelfUnsafe == hGVM, VERR_INCONSISTENT_VM_HANDLE);
1904 AssertReturn( pGVM->enmVMState >= VMSTATE_CREATING
1905 && pGVM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE);
1906
1907 *ppGVMM = pGVMM;
1908 return VINF_SUCCESS;
1909}
1910
1911
1912/**
1913 * Validates a GVM/EMT pair.
1914 *
1915 * @returns VBox status code.
1916 * @param pGVM The global (ring-0) VM structure.
1917 * @param idCpu The Virtual CPU ID of the calling EMT.
1918 * @thread EMT(idCpu)
1919 */
1920GVMMR0DECL(int) GVMMR0ValidateGVMandEMT(PGVM pGVM, VMCPUID idCpu)
1921{
1922 PGVMM pGVMM;
1923 return gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
1924}
1925
1926
1927/**
1928 * Looks up the VM belonging to the specified EMT thread.
1929 *
1930 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1931 * unnecessary kernel panics when the EMT thread hits an assertion. The
1932 * call may or not be an EMT thread.
1933 *
1934 * @returns Pointer to the VM on success, NULL on failure.
1935 * @param hEMT The native thread handle of the EMT.
1936 * NIL_RTNATIVETHREAD means the current thread
1937 */
1938GVMMR0DECL(PVMCC) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
1939{
1940 /*
1941 * No Assertions here as we're usually called in a AssertMsgN or
1942 * RTAssert* context.
1943 */
1944 PGVMM pGVMM = g_pGVMM;
1945 if ( !RT_VALID_PTR(pGVMM)
1946 || pGVMM->u32Magic != GVMM_MAGIC)
1947 return NULL;
1948
1949 if (hEMT == NIL_RTNATIVETHREAD)
1950 hEMT = RTThreadNativeSelf();
1951 RTPROCESS ProcId = RTProcSelf();
1952
1953 /*
1954 * Search the handles in a linear fashion as we don't dare to take the lock (assert).
1955 */
1956/** @todo introduce some pid hash table here, please. */
1957 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
1958 {
1959 if ( pGVMM->aHandles[i].iSelf == i
1960 && pGVMM->aHandles[i].ProcId == ProcId
1961 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
1962 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
1963 {
1964 if (pGVMM->aHandles[i].hEMT0 == hEMT)
1965 return pGVMM->aHandles[i].pGVM;
1966
1967 /* This is fearly safe with the current process per VM approach. */
1968 PGVM pGVM = pGVMM->aHandles[i].pGVM;
1969 VMCPUID const cCpus = pGVM->cCpus;
1970 ASMCompilerBarrier();
1971 if ( cCpus < 1
1972 || cCpus > VMM_MAX_CPU_COUNT)
1973 continue;
1974 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
1975 if (pGVM->aCpus[idCpu].hEMT == hEMT)
1976 return pGVMM->aHandles[i].pGVM;
1977 }
1978 }
1979 return NULL;
1980}
1981
1982
1983/**
1984 * Looks up the GVMCPU belonging to the specified EMT thread.
1985 *
1986 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
1987 * unnecessary kernel panics when the EMT thread hits an assertion. The
1988 * call may or not be an EMT thread.
1989 *
1990 * @returns Pointer to the VM on success, NULL on failure.
1991 * @param hEMT The native thread handle of the EMT.
1992 * NIL_RTNATIVETHREAD means the current thread
1993 */
1994GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByEMT(RTNATIVETHREAD hEMT)
1995{
1996 /*
1997 * No Assertions here as we're usually called in a AssertMsgN,
1998 * RTAssert*, Log and LogRel contexts.
1999 */
2000 PGVMM pGVMM = g_pGVMM;
2001 if ( !RT_VALID_PTR(pGVMM)
2002 || pGVMM->u32Magic != GVMM_MAGIC)
2003 return NULL;
2004
2005 if (hEMT == NIL_RTNATIVETHREAD)
2006 hEMT = RTThreadNativeSelf();
2007 RTPROCESS ProcId = RTProcSelf();
2008
2009 /*
2010 * Search the handles in a linear fashion as we don't dare to take the lock (assert).
2011 */
2012/** @todo introduce some pid hash table here, please. */
2013 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
2014 {
2015 if ( pGVMM->aHandles[i].iSelf == i
2016 && pGVMM->aHandles[i].ProcId == ProcId
2017 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
2018 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
2019 {
2020 PGVM pGVM = pGVMM->aHandles[i].pGVM;
2021 if (pGVMM->aHandles[i].hEMT0 == hEMT)
2022 return &pGVM->aCpus[0];
2023
2024 /* This is fearly safe with the current process per VM approach. */
2025 VMCPUID const cCpus = pGVM->cCpus;
2026 ASMCompilerBarrier();
2027 ASMCompilerBarrier();
2028 if ( cCpus < 1
2029 || cCpus > VMM_MAX_CPU_COUNT)
2030 continue;
2031 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
2032 if (pGVM->aCpus[idCpu].hEMT == hEMT)
2033 return &pGVM->aCpus[idCpu];
2034 }
2035 }
2036 return NULL;
2037}
2038
2039
2040/**
2041 * Get the GVMCPU structure for the given EMT.
2042 *
2043 * @returns The VCpu structure for @a hEMT, NULL if not an EMT.
2044 * @param pGVM The global (ring-0) VM structure.
2045 * @param hEMT The native thread handle of the EMT.
2046 * NIL_RTNATIVETHREAD means the current thread
2047 */
2048GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByGVMandEMT(PGVM pGVM, RTNATIVETHREAD hEMT)
2049{
2050 /*
2051 * Validate & adjust input.
2052 */
2053 AssertPtr(pGVM);
2054 Assert(pGVM->u32Magic == GVM_MAGIC);
2055 if (hEMT == NIL_RTNATIVETHREAD /* likely */)
2056 {
2057 hEMT = RTThreadNativeSelf();
2058 AssertReturn(hEMT != NIL_RTNATIVETHREAD, NULL);
2059 }
2060
2061 /*
2062 * Find the matching hash table entry.
2063 * See similar code in GVMMR0GetRing3ThreadForSelf.
2064 */
2065 uint32_t idxHash = GVMM_EMT_HASH_1(hEMT);
2066 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hEMT)
2067 { /* likely */ }
2068 else
2069 {
2070#ifdef VBOX_STRICT
2071 unsigned cCollisions = 0;
2072#endif
2073 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hEMT);
2074 for (;;)
2075 {
2076 Assert(cCollisions++ < GVMM_EMT_HASH_SIZE);
2077 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE;
2078 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hEMT)
2079 break;
2080 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == NIL_RTNATIVETHREAD)
2081 {
2082#ifdef VBOX_STRICT
2083 uint32_t idxCpu = pGVM->cCpus;
2084 AssertStmt(idxCpu < VMM_MAX_CPU_COUNT, idxCpu = VMM_MAX_CPU_COUNT);
2085 while (idxCpu-- > 0)
2086 Assert(pGVM->aCpus[idxCpu].hNativeThreadR0 != hEMT);
2087#endif
2088 return NULL;
2089 }
2090 }
2091 }
2092
2093 /*
2094 * Validate the VCpu number and translate it into a pointer.
2095 */
2096 VMCPUID const idCpu = pGVM->gvmm.s.aEmtHash[idxHash].idVCpu;
2097 AssertReturn(idCpu < pGVM->cCpus, NULL);
2098 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2099 Assert(pGVCpu->hNativeThreadR0 == hEMT);
2100 Assert(pGVCpu->gvmm.s.idxEmtHash == idxHash);
2101 return pGVCpu;
2102}
2103
2104
2105/**
2106 * Get the native ring-3 thread handle for the caller.
2107 *
2108 * This works for EMTs and registered workers.
2109 *
2110 * @returns ring-3 native thread handle or NIL_RTNATIVETHREAD.
2111 * @param pGVM The global (ring-0) VM structure.
2112 */
2113GVMMR0DECL(RTNATIVETHREAD) GVMMR0GetRing3ThreadForSelf(PGVM pGVM)
2114{
2115 /*
2116 * Validate input.
2117 */
2118 AssertPtr(pGVM);
2119 AssertReturn(pGVM->u32Magic == GVM_MAGIC, NIL_RTNATIVETHREAD);
2120 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
2121 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, NIL_RTNATIVETHREAD);
2122
2123 /*
2124 * Find the matching hash table entry.
2125 * See similar code in GVMMR0GetGVCpuByGVMandEMT.
2126 */
2127 uint32_t idxHash = GVMM_EMT_HASH_1(hNativeSelf);
2128 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hNativeSelf)
2129 { /* likely */ }
2130 else
2131 {
2132#ifdef VBOX_STRICT
2133 unsigned cCollisions = 0;
2134#endif
2135 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hNativeSelf);
2136 for (;;)
2137 {
2138 Assert(cCollisions++ < GVMM_EMT_HASH_SIZE);
2139 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE;
2140 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hNativeSelf)
2141 break;
2142 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == NIL_RTNATIVETHREAD)
2143 {
2144#ifdef VBOX_STRICT
2145 uint32_t idxCpu = pGVM->cCpus;
2146 AssertStmt(idxCpu < VMM_MAX_CPU_COUNT, idxCpu = VMM_MAX_CPU_COUNT);
2147 while (idxCpu-- > 0)
2148 Assert(pGVM->aCpus[idxCpu].hNativeThreadR0 != hNativeSelf);
2149#endif
2150
2151 /*
2152 * Not an EMT, so see if it's a worker thread.
2153 */
2154 size_t idx = RT_ELEMENTS(pGVM->gvmm.s.aWorkerThreads);
2155 while (--idx > GVMMWORKERTHREAD_INVALID)
2156 if (pGVM->gvmm.s.aWorkerThreads[idx].hNativeThread == hNativeSelf)
2157 return pGVM->gvmm.s.aWorkerThreads[idx].hNativeThreadR3;
2158
2159 return NIL_RTNATIVETHREAD;
2160 }
2161 }
2162 }
2163
2164 /*
2165 * Validate the VCpu number and translate it into a pointer.
2166 */
2167 VMCPUID const idCpu = pGVM->gvmm.s.aEmtHash[idxHash].idVCpu;
2168 AssertReturn(idCpu < pGVM->cCpus, NIL_RTNATIVETHREAD);
2169 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2170 Assert(pGVCpu->hNativeThreadR0 == hNativeSelf);
2171 Assert(pGVCpu->gvmm.s.idxEmtHash == idxHash);
2172 return pGVCpu->hNativeThread;
2173}
2174
2175
2176/**
2177 * Converts a pointer with the GVM structure to a host physical address.
2178 *
2179 * @returns Host physical address.
2180 * @param pGVM The global (ring-0) VM structure.
2181 * @param pv The address to convert.
2182 * @thread EMT
2183 */
2184GVMMR0DECL(RTHCPHYS) GVMMR0ConvertGVMPtr2HCPhys(PGVM pGVM, void *pv)
2185{
2186 AssertPtr(pGVM);
2187 Assert(pGVM->u32Magic == GVM_MAGIC);
2188 uintptr_t const off = (uintptr_t)pv - (uintptr_t)pGVM;
2189 Assert(off < RT_UOFFSETOF_DYN(GVM, aCpus[pGVM->cCpus]));
2190 return RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, off >> PAGE_SHIFT) | ((uintptr_t)pv & PAGE_OFFSET_MASK);
2191}
2192
2193
2194/**
2195 * This is will wake up expired and soon-to-be expired VMs.
2196 *
2197 * @returns Number of VMs that has been woken up.
2198 * @param pGVMM Pointer to the GVMM instance data.
2199 * @param u64Now The current time.
2200 */
2201static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
2202{
2203 /*
2204 * Skip this if we've got disabled because of high resolution wakeups or by
2205 * the user.
2206 */
2207 if (!pGVMM->fDoEarlyWakeUps)
2208 return 0;
2209
2210/** @todo Rewrite this algorithm. See performance defect XYZ. */
2211
2212 /*
2213 * A cheap optimization to stop wasting so much time here on big setups.
2214 */
2215 const uint64_t uNsEarlyWakeUp2 = u64Now + pGVMM->nsEarlyWakeUp2;
2216 if ( pGVMM->cHaltedEMTs == 0
2217 || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup)
2218 return 0;
2219
2220 /*
2221 * Only one thread doing this at a time.
2222 */
2223 if (!ASMAtomicCmpXchgBool(&pGVMM->fDoingEarlyWakeUps, true, false))
2224 return 0;
2225
2226 /*
2227 * The first pass will wake up VMs which have actually expired
2228 * and look for VMs that should be woken up in the 2nd and 3rd passes.
2229 */
2230 const uint64_t uNsEarlyWakeUp1 = u64Now + pGVMM->nsEarlyWakeUp1;
2231 uint64_t u64Min = UINT64_MAX;
2232 unsigned cWoken = 0;
2233 unsigned cHalted = 0;
2234 unsigned cTodo2nd = 0;
2235 unsigned cTodo3rd = 0;
2236 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2237 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2238 i = pGVMM->aHandles[i].iNext)
2239 {
2240 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2241 if ( RT_VALID_PTR(pCurGVM)
2242 && pCurGVM->u32Magic == GVM_MAGIC)
2243 {
2244 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2245 {
2246 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2247 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2248 if (u64)
2249 {
2250 if (u64 <= u64Now)
2251 {
2252 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2253 {
2254 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2255 AssertRC(rc);
2256 cWoken++;
2257 }
2258 }
2259 else
2260 {
2261 cHalted++;
2262 if (u64 <= uNsEarlyWakeUp1)
2263 cTodo2nd++;
2264 else if (u64 <= uNsEarlyWakeUp2)
2265 cTodo3rd++;
2266 else if (u64 < u64Min)
2267 u64 = u64Min;
2268 }
2269 }
2270 }
2271 }
2272 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2273 }
2274
2275 if (cTodo2nd)
2276 {
2277 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2278 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2279 i = pGVMM->aHandles[i].iNext)
2280 {
2281 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2282 if ( RT_VALID_PTR(pCurGVM)
2283 && pCurGVM->u32Magic == GVM_MAGIC)
2284 {
2285 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2286 {
2287 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2288 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2289 if ( u64
2290 && u64 <= uNsEarlyWakeUp1)
2291 {
2292 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2293 {
2294 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2295 AssertRC(rc);
2296 cWoken++;
2297 }
2298 }
2299 }
2300 }
2301 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2302 }
2303 }
2304
2305 if (cTodo3rd)
2306 {
2307 for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
2308 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
2309 i = pGVMM->aHandles[i].iNext)
2310 {
2311 PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
2312 if ( RT_VALID_PTR(pCurGVM)
2313 && pCurGVM->u32Magic == GVM_MAGIC)
2314 {
2315 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
2316 {
2317 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
2318 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
2319 if ( u64
2320 && u64 <= uNsEarlyWakeUp2)
2321 {
2322 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
2323 {
2324 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
2325 AssertRC(rc);
2326 cWoken++;
2327 }
2328 }
2329 }
2330 }
2331 AssertLogRelBreak(cGuard++ < RT_ELEMENTS(pGVMM->aHandles));
2332 }
2333 }
2334
2335 /*
2336 * Set the minimum value.
2337 */
2338 pGVMM->uNsNextEmtWakeup = u64Min;
2339
2340 ASMAtomicWriteBool(&pGVMM->fDoingEarlyWakeUps, false);
2341 return cWoken;
2342}
2343
2344
2345/**
2346 * Halt the EMT thread.
2347 *
2348 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
2349 * VERR_INTERRUPTED if a signal was scheduled for the thread.
2350 * @param pGVM The global (ring-0) VM structure.
2351 * @param pGVCpu The global (ring-0) CPU structure of the calling
2352 * EMT.
2353 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
2354 * @thread EMT(pGVCpu).
2355 */
2356GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime)
2357{
2358 LogFlow(("GVMMR0SchedHalt: pGVM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n",
2359 pGVM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime));
2360 PGVMM pGVMM;
2361 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
2362
2363 pGVM->gvmm.s.StatsSched.cHaltCalls++;
2364 Assert(!pGVCpu->gvmm.s.u64HaltExpire);
2365
2366 /*
2367 * If we're doing early wake-ups, we must take the UsedList lock before we
2368 * start querying the current time.
2369 * Note! Interrupts must NOT be disabled at this point because we ask for GIP time!
2370 */
2371 bool const fDoEarlyWakeUps = pGVMM->fDoEarlyWakeUps;
2372 if (fDoEarlyWakeUps)
2373 {
2374 int rc2 = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc2);
2375 }
2376
2377 pGVCpu->gvmm.s.iCpuEmt = ASMGetApicId();
2378
2379 /* GIP hack: We might are frequently sleeping for short intervals where the
2380 difference between GIP and system time matters on systems with high resolution
2381 system time. So, convert the input from GIP to System time in that case. */
2382 Assert(ASMGetFlags() & X86_EFL_IF);
2383 const uint64_t u64NowSys = RTTimeSystemNanoTS();
2384 const uint64_t u64NowGip = RTTimeNanoTS();
2385
2386 if (fDoEarlyWakeUps)
2387 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
2388
2389 /*
2390 * Go to sleep if we must...
2391 * Cap the sleep time to 1 second to be on the safe side.
2392 */
2393 int rc;
2394 uint64_t cNsInterval = u64ExpireGipTime - u64NowGip;
2395 if ( u64NowGip < u64ExpireGipTime
2396 && cNsInterval >= (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany
2397 ? pGVMM->nsMinSleepCompany
2398 : pGVMM->nsMinSleepAlone))
2399 {
2400 pGVM->gvmm.s.StatsSched.cHaltBlocking++;
2401 if (cNsInterval > RT_NS_1SEC)
2402 u64ExpireGipTime = u64NowGip + RT_NS_1SEC;
2403 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
2404 ASMAtomicIncU32(&pGVMM->cHaltedEMTs);
2405 if (fDoEarlyWakeUps)
2406 {
2407 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
2408 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
2409 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2410 }
2411
2412 rc = RTSemEventMultiWaitEx(pGVCpu->gvmm.s.HaltEventMulti,
2413 RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE,
2414 u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval);
2415
2416 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
2417 ASMAtomicDecU32(&pGVMM->cHaltedEMTs);
2418
2419 /* Reset the semaphore to try prevent a few false wake-ups. */
2420 if (rc == VINF_SUCCESS)
2421 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
2422 else if (rc == VERR_TIMEOUT)
2423 {
2424 pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
2425 rc = VINF_SUCCESS;
2426 }
2427 }
2428 else
2429 {
2430 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
2431 if (fDoEarlyWakeUps)
2432 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2433 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
2434 rc = VINF_SUCCESS;
2435 }
2436
2437 return rc;
2438}
2439
2440
2441/**
2442 * Halt the EMT thread.
2443 *
2444 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread).
2445 * VERR_INTERRUPTED if a signal was scheduled for the thread.
2446 * @param pGVM The global (ring-0) VM structure.
2447 * @param idCpu The Virtual CPU ID of the calling EMT.
2448 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time.
2449 * @thread EMT(idCpu).
2450 */
2451GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
2452{
2453 PGVMM pGVMM;
2454 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
2455 if (RT_SUCCESS(rc))
2456 rc = GVMMR0SchedHalt(pGVM, &pGVM->aCpus[idCpu], u64ExpireGipTime);
2457 return rc;
2458}
2459
2460
2461
2462/**
2463 * Worker for GVMMR0SchedWakeUp and GVMMR0SchedWakeUpAndPokeCpus that wakes up
2464 * the a sleeping EMT.
2465 *
2466 * @retval VINF_SUCCESS if successfully woken up.
2467 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2468 *
2469 * @param pGVM The global (ring-0) VM structure.
2470 * @param pGVCpu The global (ring-0) VCPU structure.
2471 */
2472DECLINLINE(int) gvmmR0SchedWakeUpOne(PGVM pGVM, PGVMCPU pGVCpu)
2473{
2474 pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
2475
2476 /*
2477 * Signal the semaphore regardless of whether it's current blocked on it.
2478 *
2479 * The reason for this is that there is absolutely no way we can be 100%
2480 * certain that it isn't *about* go to go to sleep on it and just got
2481 * delayed a bit en route. So, we will always signal the semaphore when
2482 * the it is flagged as halted in the VMM.
2483 */
2484/** @todo we can optimize some of that by means of the pVCpu->enmState now. */
2485 int rc;
2486 if (pGVCpu->gvmm.s.u64HaltExpire)
2487 {
2488 rc = VINF_SUCCESS;
2489 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
2490 }
2491 else
2492 {
2493 rc = VINF_GVM_NOT_BLOCKED;
2494 pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
2495 }
2496
2497 int rc2 = RTSemEventMultiSignal(pGVCpu->gvmm.s.HaltEventMulti);
2498 AssertRC(rc2);
2499
2500 return rc;
2501}
2502
2503
2504/**
2505 * Wakes up the halted EMT thread so it can service a pending request.
2506 *
2507 * @returns VBox status code.
2508 * @retval VINF_SUCCESS if successfully woken up.
2509 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2510 *
2511 * @param pGVM The global (ring-0) VM structure.
2512 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2513 * @param fTakeUsedLock Take the used lock or not
2514 * @thread Any but EMT(idCpu).
2515 */
2516GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
2517{
2518 /*
2519 * Validate input and take the UsedLock.
2520 */
2521 PGVMM pGVMM;
2522 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
2523 if (RT_SUCCESS(rc))
2524 {
2525 if (idCpu < pGVM->cCpus)
2526 {
2527 /*
2528 * Do the actual job.
2529 */
2530 rc = gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2531
2532 if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps)
2533 {
2534 /*
2535 * While we're here, do a round of scheduling.
2536 */
2537 Assert(ASMGetFlags() & X86_EFL_IF);
2538 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
2539 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
2540 }
2541 }
2542 else
2543 rc = VERR_INVALID_CPU_ID;
2544
2545 if (fTakeUsedLock)
2546 {
2547 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2548 AssertRC(rc2);
2549 }
2550 }
2551
2552 LogFlow(("GVMMR0SchedWakeUpEx: returns %Rrc\n", rc));
2553 return rc;
2554}
2555
2556
2557/**
2558 * Wakes up the halted EMT thread so it can service a pending request.
2559 *
2560 * @returns VBox status code.
2561 * @retval VINF_SUCCESS if successfully woken up.
2562 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2563 *
2564 * @param pGVM The global (ring-0) VM structure.
2565 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2566 * @thread Any but EMT(idCpu).
2567 */
2568GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, VMCPUID idCpu)
2569{
2570 return GVMMR0SchedWakeUpEx(pGVM, idCpu, true /* fTakeUsedLock */);
2571}
2572
2573
2574/**
2575 * Wakes up the halted EMT thread so it can service a pending request, no GVM
2576 * parameter and no used locking.
2577 *
2578 * @returns VBox status code.
2579 * @retval VINF_SUCCESS if successfully woken up.
2580 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
2581 *
2582 * @param pGVM The global (ring-0) VM structure.
2583 * @param idCpu The Virtual CPU ID of the EMT to wake up.
2584 * @thread Any but EMT(idCpu).
2585 * @deprecated Don't use in new code if possible! Use the GVM variant.
2586 */
2587GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
2588{
2589 PGVMM pGVMM;
2590 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
2591 if (RT_SUCCESS(rc))
2592 rc = GVMMR0SchedWakeUpEx(pGVM, idCpu, false /*fTakeUsedLock*/);
2593 return rc;
2594}
2595
2596
2597/**
2598 * Worker common to GVMMR0SchedPoke and GVMMR0SchedWakeUpAndPokeCpus that pokes
2599 * the Virtual CPU if it's still busy executing guest code.
2600 *
2601 * @returns VBox status code.
2602 * @retval VINF_SUCCESS if poked successfully.
2603 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2604 *
2605 * @param pGVM The global (ring-0) VM structure.
2606 * @param pVCpu The cross context virtual CPU structure.
2607 */
2608DECLINLINE(int) gvmmR0SchedPokeOne(PGVM pGVM, PVMCPUCC pVCpu)
2609{
2610 pGVM->gvmm.s.StatsSched.cPokeCalls++;
2611
2612 RTCPUID idHostCpu = pVCpu->idHostCpu;
2613 if ( idHostCpu == NIL_RTCPUID
2614 || VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_EXEC)
2615 {
2616 pGVM->gvmm.s.StatsSched.cPokeNotBusy++;
2617 return VINF_GVM_NOT_BUSY_IN_GC;
2618 }
2619
2620 /* Note: this function is not implemented on Darwin and Linux (kernel < 2.6.19) */
2621 RTMpPokeCpu(idHostCpu);
2622 return VINF_SUCCESS;
2623}
2624
2625
2626/**
2627 * Pokes an EMT if it's still busy running guest code.
2628 *
2629 * @returns VBox status code.
2630 * @retval VINF_SUCCESS if poked successfully.
2631 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2632 *
2633 * @param pGVM The global (ring-0) VM structure.
2634 * @param idCpu The ID of the virtual CPU to poke.
2635 * @param fTakeUsedLock Take the used lock or not
2636 */
2637GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
2638{
2639 /*
2640 * Validate input and take the UsedLock.
2641 */
2642 PGVMM pGVMM;
2643 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
2644 if (RT_SUCCESS(rc))
2645 {
2646 if (idCpu < pGVM->cCpus)
2647 rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2648 else
2649 rc = VERR_INVALID_CPU_ID;
2650
2651 if (fTakeUsedLock)
2652 {
2653 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2654 AssertRC(rc2);
2655 }
2656 }
2657
2658 LogFlow(("GVMMR0SchedWakeUpAndPokeCpus: returns %Rrc\n", rc));
2659 return rc;
2660}
2661
2662
2663/**
2664 * Pokes an EMT if it's still busy running guest code.
2665 *
2666 * @returns VBox status code.
2667 * @retval VINF_SUCCESS if poked successfully.
2668 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2669 *
2670 * @param pGVM The global (ring-0) VM structure.
2671 * @param idCpu The ID of the virtual CPU to poke.
2672 */
2673GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, VMCPUID idCpu)
2674{
2675 return GVMMR0SchedPokeEx(pGVM, idCpu, true /* fTakeUsedLock */);
2676}
2677
2678
2679/**
2680 * Pokes an EMT if it's still busy running guest code, no GVM parameter and no
2681 * used locking.
2682 *
2683 * @returns VBox status code.
2684 * @retval VINF_SUCCESS if poked successfully.
2685 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
2686 *
2687 * @param pGVM The global (ring-0) VM structure.
2688 * @param idCpu The ID of the virtual CPU to poke.
2689 *
2690 * @deprecated Don't use in new code if possible! Use the GVM variant.
2691 */
2692GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
2693{
2694 PGVMM pGVMM;
2695 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
2696 if (RT_SUCCESS(rc))
2697 {
2698 if (idCpu < pGVM->cCpus)
2699 rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2700 else
2701 rc = VERR_INVALID_CPU_ID;
2702 }
2703 return rc;
2704}
2705
2706
2707/**
2708 * Wakes up a set of halted EMT threads so they can service pending request.
2709 *
2710 * @returns VBox status code, no informational stuff.
2711 *
2712 * @param pGVM The global (ring-0) VM structure.
2713 * @param pSleepSet The set of sleepers to wake up.
2714 * @param pPokeSet The set of CPUs to poke.
2715 */
2716GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet)
2717{
2718 AssertPtrReturn(pSleepSet, VERR_INVALID_POINTER);
2719 AssertPtrReturn(pPokeSet, VERR_INVALID_POINTER);
2720 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
2721
2722 /*
2723 * Validate input and take the UsedLock.
2724 */
2725 PGVMM pGVMM;
2726 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /* fTakeUsedLock */);
2727 if (RT_SUCCESS(rc))
2728 {
2729 rc = VINF_SUCCESS;
2730 VMCPUID idCpu = pGVM->cCpus;
2731 while (idCpu-- > 0)
2732 {
2733 /* Don't try poke or wake up ourselves. */
2734 if (pGVM->aCpus[idCpu].hEMT == hSelf)
2735 continue;
2736
2737 /* just ignore errors for now. */
2738 if (VMCPUSET_IS_PRESENT(pSleepSet, idCpu))
2739 gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
2740 else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu))
2741 gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
2742 }
2743
2744 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2745 AssertRC(rc2);
2746 }
2747
2748 LogFlow(("GVMMR0SchedWakeUpAndPokeCpus: returns %Rrc\n", rc));
2749 return rc;
2750}
2751
2752
2753/**
2754 * VMMR0 request wrapper for GVMMR0SchedWakeUpAndPokeCpus.
2755 *
2756 * @returns see GVMMR0SchedWakeUpAndPokeCpus.
2757 * @param pGVM The global (ring-0) VM structure.
2758 * @param pReq Pointer to the request packet.
2759 */
2760GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq)
2761{
2762 /*
2763 * Validate input and pass it on.
2764 */
2765 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2766 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
2767
2768 return GVMMR0SchedWakeUpAndPokeCpus(pGVM, &pReq->SleepSet, &pReq->PokeSet);
2769}
2770
2771
2772
2773/**
2774 * Poll the schedule to see if someone else should get a chance to run.
2775 *
2776 * This is a bit hackish and will not work too well if the machine is
2777 * under heavy load from non-VM processes.
2778 *
2779 * @returns VINF_SUCCESS if not yielded.
2780 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
2781 * @param pGVM The global (ring-0) VM structure.
2782 * @param idCpu The Virtual CPU ID of the calling EMT.
2783 * @param fYield Whether to yield or not.
2784 * This is for when we're spinning in the halt loop.
2785 * @thread EMT(idCpu).
2786 */
2787GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, VMCPUID idCpu, bool fYield)
2788{
2789 /*
2790 * Validate input.
2791 */
2792 PGVMM pGVMM;
2793 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
2794 if (RT_SUCCESS(rc))
2795 {
2796 /*
2797 * We currently only implement helping doing wakeups (fYield = false), so don't
2798 * bother taking the lock if gvmmR0SchedDoWakeUps is not going to do anything.
2799 */
2800 if (!fYield && pGVMM->fDoEarlyWakeUps)
2801 {
2802 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
2803 pGVM->gvmm.s.StatsSched.cPollCalls++;
2804
2805 Assert(ASMGetFlags() & X86_EFL_IF);
2806 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
2807
2808 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
2809
2810 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
2811 }
2812 /*
2813 * Not quite sure what we could do here...
2814 */
2815 else if (fYield)
2816 rc = VERR_NOT_IMPLEMENTED; /** @todo implement this... */
2817 else
2818 rc = VINF_SUCCESS;
2819 }
2820
2821 LogFlow(("GVMMR0SchedWakeUp: returns %Rrc\n", rc));
2822 return rc;
2823}
2824
2825
2826#ifdef GVMM_SCHED_WITH_PPT
2827/**
2828 * Timer callback for the periodic preemption timer.
2829 *
2830 * @param pTimer The timer handle.
2831 * @param pvUser Pointer to the per cpu structure.
2832 * @param iTick The current tick.
2833 */
2834static DECLCALLBACK(void) gvmmR0SchedPeriodicPreemptionTimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
2835{
2836 PGVMMHOSTCPU pCpu = (PGVMMHOSTCPU)pvUser;
2837 NOREF(pTimer); NOREF(iTick);
2838
2839 /*
2840 * Termination check
2841 */
2842 if (pCpu->u32Magic != GVMMHOSTCPU_MAGIC)
2843 return;
2844
2845 /*
2846 * Do the house keeping.
2847 */
2848 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2849
2850 if (++pCpu->Ppt.iTickHistorization >= pCpu->Ppt.cTicksHistoriziationInterval)
2851 {
2852 /*
2853 * Historicize the max frequency.
2854 */
2855 uint32_t iHzHistory = ++pCpu->Ppt.iHzHistory % RT_ELEMENTS(pCpu->Ppt.aHzHistory);
2856 pCpu->Ppt.aHzHistory[iHzHistory] = pCpu->Ppt.uDesiredHz;
2857 pCpu->Ppt.iTickHistorization = 0;
2858 pCpu->Ppt.uDesiredHz = 0;
2859
2860 /*
2861 * Check if the current timer frequency.
2862 */
2863 uint32_t uHistMaxHz = 0;
2864 for (uint32_t i = 0; i < RT_ELEMENTS(pCpu->Ppt.aHzHistory); i++)
2865 if (pCpu->Ppt.aHzHistory[i] > uHistMaxHz)
2866 uHistMaxHz = pCpu->Ppt.aHzHistory[i];
2867 if (uHistMaxHz == pCpu->Ppt.uTimerHz)
2868 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2869 else if (uHistMaxHz)
2870 {
2871 /*
2872 * Reprogram it.
2873 */
2874 pCpu->Ppt.cChanges++;
2875 pCpu->Ppt.iTickHistorization = 0;
2876 pCpu->Ppt.uTimerHz = uHistMaxHz;
2877 uint32_t const cNsInterval = RT_NS_1SEC / uHistMaxHz;
2878 pCpu->Ppt.cNsInterval = cNsInterval;
2879 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
2880 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
2881 + GVMMHOSTCPU_PPT_HIST_INTERVAL_NS / 2 - 1)
2882 / cNsInterval;
2883 else
2884 pCpu->Ppt.cTicksHistoriziationInterval = 1;
2885 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2886
2887 /*SUPR0Printf("Cpu%u: change to %u Hz / %u ns\n", pCpu->idxCpuSet, uHistMaxHz, cNsInterval);*/
2888 RTTimerChangeInterval(pTimer, cNsInterval);
2889 }
2890 else
2891 {
2892 /*
2893 * Stop it.
2894 */
2895 pCpu->Ppt.fStarted = false;
2896 pCpu->Ppt.uTimerHz = 0;
2897 pCpu->Ppt.cNsInterval = 0;
2898 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2899
2900 /*SUPR0Printf("Cpu%u: stopping (%u Hz)\n", pCpu->idxCpuSet, uHistMaxHz);*/
2901 RTTimerStop(pTimer);
2902 }
2903 }
2904 else
2905 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2906}
2907#endif /* GVMM_SCHED_WITH_PPT */
2908
2909
2910/**
2911 * Updates the periodic preemption timer for the calling CPU.
2912 *
2913 * The caller must have disabled preemption!
2914 * The caller must check that the host can do high resolution timers.
2915 *
2916 * @param pGVM The global (ring-0) VM structure.
2917 * @param idHostCpu The current host CPU id.
2918 * @param uHz The desired frequency.
2919 */
2920GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(PGVM pGVM, RTCPUID idHostCpu, uint32_t uHz)
2921{
2922 NOREF(pGVM);
2923#ifdef GVMM_SCHED_WITH_PPT
2924 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2925 Assert(RTTimerCanDoHighResolution());
2926
2927 /*
2928 * Resolve the per CPU data.
2929 */
2930 uint32_t iCpu = RTMpCpuIdToSetIndex(idHostCpu);
2931 PGVMM pGVMM = g_pGVMM;
2932 if ( !RT_VALID_PTR(pGVMM)
2933 || pGVMM->u32Magic != GVMM_MAGIC)
2934 return;
2935 AssertMsgReturnVoid(iCpu < pGVMM->cHostCpus, ("iCpu=%d cHostCpus=%d\n", iCpu, pGVMM->cHostCpus));
2936 PGVMMHOSTCPU pCpu = &pGVMM->aHostCpus[iCpu];
2937 AssertMsgReturnVoid( pCpu->u32Magic == GVMMHOSTCPU_MAGIC
2938 && pCpu->idCpu == idHostCpu,
2939 ("u32Magic=%#x idCpu=% idHostCpu=%d\n", pCpu->u32Magic, pCpu->idCpu, idHostCpu));
2940
2941 /*
2942 * Check whether we need to do anything about the timer.
2943 * We have to be a little bit careful since we might be race the timer
2944 * callback here.
2945 */
2946 if (uHz > 16384)
2947 uHz = 16384; /** @todo add a query method for this! */
2948 if (RT_UNLIKELY( uHz > ASMAtomicReadU32(&pCpu->Ppt.uDesiredHz)
2949 && uHz >= pCpu->Ppt.uMinHz
2950 && !pCpu->Ppt.fStarting /* solaris paranoia */))
2951 {
2952 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2953
2954 pCpu->Ppt.uDesiredHz = uHz;
2955 uint32_t cNsInterval = 0;
2956 if (!pCpu->Ppt.fStarted)
2957 {
2958 pCpu->Ppt.cStarts++;
2959 pCpu->Ppt.fStarted = true;
2960 pCpu->Ppt.fStarting = true;
2961 pCpu->Ppt.iTickHistorization = 0;
2962 pCpu->Ppt.uTimerHz = uHz;
2963 pCpu->Ppt.cNsInterval = cNsInterval = RT_NS_1SEC / uHz;
2964 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
2965 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
2966 + GVMMHOSTCPU_PPT_HIST_INTERVAL_NS / 2 - 1)
2967 / cNsInterval;
2968 else
2969 pCpu->Ppt.cTicksHistoriziationInterval = 1;
2970 }
2971
2972 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2973
2974 if (cNsInterval)
2975 {
2976 RTTimerChangeInterval(pCpu->Ppt.pTimer, cNsInterval);
2977 int rc = RTTimerStart(pCpu->Ppt.pTimer, cNsInterval);
2978 AssertRC(rc);
2979
2980 RTSpinlockAcquire(pCpu->Ppt.hSpinlock);
2981 if (RT_FAILURE(rc))
2982 pCpu->Ppt.fStarted = false;
2983 pCpu->Ppt.fStarting = false;
2984 RTSpinlockRelease(pCpu->Ppt.hSpinlock);
2985 }
2986 }
2987#else /* !GVMM_SCHED_WITH_PPT */
2988 NOREF(idHostCpu); NOREF(uHz);
2989#endif /* !GVMM_SCHED_WITH_PPT */
2990}
2991
2992
2993/**
2994 * Calls @a pfnCallback for each VM in the system.
2995 *
2996 * This will enumerate the VMs while holding the global VM used list lock in
2997 * shared mode. So, only suitable for simple work. If more expensive work
2998 * needs doing, a different approach must be taken as using this API would
2999 * otherwise block VM creation and destruction.
3000 *
3001 * @returns VBox status code.
3002 * @param pfnCallback The callback function.
3003 * @param pvUser User argument to the callback.
3004 */
3005GVMMR0DECL(int) GVMMR0EnumVMs(PFNGVMMR0ENUMCALLBACK pfnCallback, void *pvUser)
3006{
3007 PGVMM pGVMM;
3008 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
3009
3010 int rc = VINF_SUCCESS;
3011 GVMMR0_USED_SHARED_LOCK(pGVMM);
3012 for (unsigned i = pGVMM->iUsedHead, cLoops = 0;
3013 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
3014 i = pGVMM->aHandles[i].iNext, cLoops++)
3015 {
3016 PGVM pGVM = pGVMM->aHandles[i].pGVM;
3017 if ( RT_VALID_PTR(pGVM)
3018 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
3019 && pGVM->u32Magic == GVM_MAGIC)
3020 {
3021 rc = pfnCallback(pGVM, pvUser);
3022 if (rc != VINF_SUCCESS)
3023 break;
3024 }
3025
3026 AssertBreak(cLoops < RT_ELEMENTS(pGVMM->aHandles) * 4); /* paranoia */
3027 }
3028 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
3029 return rc;
3030}
3031
3032
3033/**
3034 * Retrieves the GVMM statistics visible to the caller.
3035 *
3036 * @returns VBox status code.
3037 *
3038 * @param pStats Where to put the statistics.
3039 * @param pSession The current session.
3040 * @param pGVM The GVM to obtain statistics for. Optional.
3041 */
3042GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
3043{
3044 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
3045
3046 /*
3047 * Validate input.
3048 */
3049 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
3050 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
3051 pStats->cVMs = 0; /* (crash before taking the sem...) */
3052
3053 /*
3054 * Take the lock and get the VM statistics.
3055 */
3056 PGVMM pGVMM;
3057 if (pGVM)
3058 {
3059 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
3060 if (RT_FAILURE(rc))
3061 return rc;
3062 pStats->SchedVM = pGVM->gvmm.s.StatsSched;
3063 }
3064 else
3065 {
3066 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
3067 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
3068
3069 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
3070 AssertRCReturn(rc, rc);
3071 }
3072
3073 /*
3074 * Enumerate the VMs and add the ones visible to the statistics.
3075 */
3076 pStats->cVMs = 0;
3077 pStats->cEMTs = 0;
3078 memset(&pStats->SchedSum, 0, sizeof(pStats->SchedSum));
3079
3080 for (unsigned i = pGVMM->iUsedHead;
3081 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
3082 i = pGVMM->aHandles[i].iNext)
3083 {
3084 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
3085 void *pvObj = pGVMM->aHandles[i].pvObj;
3086 if ( RT_VALID_PTR(pvObj)
3087 && RT_VALID_PTR(pOtherGVM)
3088 && pOtherGVM->u32Magic == GVM_MAGIC
3089 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
3090 {
3091 pStats->cVMs++;
3092 pStats->cEMTs += pOtherGVM->cCpus;
3093
3094 pStats->SchedSum.cHaltCalls += pOtherGVM->gvmm.s.StatsSched.cHaltCalls;
3095 pStats->SchedSum.cHaltBlocking += pOtherGVM->gvmm.s.StatsSched.cHaltBlocking;
3096 pStats->SchedSum.cHaltTimeouts += pOtherGVM->gvmm.s.StatsSched.cHaltTimeouts;
3097 pStats->SchedSum.cHaltNotBlocking += pOtherGVM->gvmm.s.StatsSched.cHaltNotBlocking;
3098 pStats->SchedSum.cHaltWakeUps += pOtherGVM->gvmm.s.StatsSched.cHaltWakeUps;
3099
3100 pStats->SchedSum.cWakeUpCalls += pOtherGVM->gvmm.s.StatsSched.cWakeUpCalls;
3101 pStats->SchedSum.cWakeUpNotHalted += pOtherGVM->gvmm.s.StatsSched.cWakeUpNotHalted;
3102 pStats->SchedSum.cWakeUpWakeUps += pOtherGVM->gvmm.s.StatsSched.cWakeUpWakeUps;
3103
3104 pStats->SchedSum.cPokeCalls += pOtherGVM->gvmm.s.StatsSched.cPokeCalls;
3105 pStats->SchedSum.cPokeNotBusy += pOtherGVM->gvmm.s.StatsSched.cPokeNotBusy;
3106
3107 pStats->SchedSum.cPollCalls += pOtherGVM->gvmm.s.StatsSched.cPollCalls;
3108 pStats->SchedSum.cPollHalts += pOtherGVM->gvmm.s.StatsSched.cPollHalts;
3109 pStats->SchedSum.cPollWakeUps += pOtherGVM->gvmm.s.StatsSched.cPollWakeUps;
3110 }
3111 }
3112
3113 /*
3114 * Copy out the per host CPU statistics.
3115 */
3116 uint32_t iDstCpu = 0;
3117 uint32_t cSrcCpus = pGVMM->cHostCpus;
3118 for (uint32_t iSrcCpu = 0; iSrcCpu < cSrcCpus; iSrcCpu++)
3119 {
3120 if (pGVMM->aHostCpus[iSrcCpu].idCpu != NIL_RTCPUID)
3121 {
3122 pStats->aHostCpus[iDstCpu].idCpu = pGVMM->aHostCpus[iSrcCpu].idCpu;
3123 pStats->aHostCpus[iDstCpu].idxCpuSet = pGVMM->aHostCpus[iSrcCpu].idxCpuSet;
3124#ifdef GVMM_SCHED_WITH_PPT
3125 pStats->aHostCpus[iDstCpu].uDesiredHz = pGVMM->aHostCpus[iSrcCpu].Ppt.uDesiredHz;
3126 pStats->aHostCpus[iDstCpu].uTimerHz = pGVMM->aHostCpus[iSrcCpu].Ppt.uTimerHz;
3127 pStats->aHostCpus[iDstCpu].cChanges = pGVMM->aHostCpus[iSrcCpu].Ppt.cChanges;
3128 pStats->aHostCpus[iDstCpu].cStarts = pGVMM->aHostCpus[iSrcCpu].Ppt.cStarts;
3129#else
3130 pStats->aHostCpus[iDstCpu].uDesiredHz = 0;
3131 pStats->aHostCpus[iDstCpu].uTimerHz = 0;
3132 pStats->aHostCpus[iDstCpu].cChanges = 0;
3133 pStats->aHostCpus[iDstCpu].cStarts = 0;
3134#endif
3135 iDstCpu++;
3136 if (iDstCpu >= RT_ELEMENTS(pStats->aHostCpus))
3137 break;
3138 }
3139 }
3140 pStats->cHostCpus = iDstCpu;
3141
3142 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
3143
3144 return VINF_SUCCESS;
3145}
3146
3147
3148/**
3149 * VMMR0 request wrapper for GVMMR0QueryStatistics.
3150 *
3151 * @returns see GVMMR0QueryStatistics.
3152 * @param pGVM The global (ring-0) VM structure. Optional.
3153 * @param pReq Pointer to the request packet.
3154 * @param pSession The current session.
3155 */
3156GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
3157{
3158 /*
3159 * Validate input and pass it on.
3160 */
3161 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3162 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3163 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
3164
3165 return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM);
3166}
3167
3168
3169/**
3170 * Resets the specified GVMM statistics.
3171 *
3172 * @returns VBox status code.
3173 *
3174 * @param pStats Which statistics to reset, that is, non-zero fields indicates which to reset.
3175 * @param pSession The current session.
3176 * @param pGVM The GVM to reset statistics for. Optional.
3177 */
3178GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
3179{
3180 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
3181
3182 /*
3183 * Validate input.
3184 */
3185 AssertPtrReturn(pSession, VERR_INVALID_POINTER);
3186 AssertPtrReturn(pStats, VERR_INVALID_POINTER);
3187
3188 /*
3189 * Take the lock and get the VM statistics.
3190 */
3191 PGVMM pGVMM;
3192 if (pGVM)
3193 {
3194 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
3195 if (RT_FAILURE(rc))
3196 return rc;
3197# define MAYBE_RESET_FIELD(field) \
3198 do { if (pStats->SchedVM. field ) { pGVM->gvmm.s.StatsSched. field = 0; } } while (0)
3199 MAYBE_RESET_FIELD(cHaltCalls);
3200 MAYBE_RESET_FIELD(cHaltBlocking);
3201 MAYBE_RESET_FIELD(cHaltTimeouts);
3202 MAYBE_RESET_FIELD(cHaltNotBlocking);
3203 MAYBE_RESET_FIELD(cHaltWakeUps);
3204 MAYBE_RESET_FIELD(cWakeUpCalls);
3205 MAYBE_RESET_FIELD(cWakeUpNotHalted);
3206 MAYBE_RESET_FIELD(cWakeUpWakeUps);
3207 MAYBE_RESET_FIELD(cPokeCalls);
3208 MAYBE_RESET_FIELD(cPokeNotBusy);
3209 MAYBE_RESET_FIELD(cPollCalls);
3210 MAYBE_RESET_FIELD(cPollHalts);
3211 MAYBE_RESET_FIELD(cPollWakeUps);
3212# undef MAYBE_RESET_FIELD
3213 }
3214 else
3215 {
3216 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
3217
3218 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
3219 AssertRCReturn(rc, rc);
3220 }
3221
3222 /*
3223 * Enumerate the VMs and add the ones visible to the statistics.
3224 */
3225 if (!ASMMemIsZero(&pStats->SchedSum, sizeof(pStats->SchedSum)))
3226 {
3227 for (unsigned i = pGVMM->iUsedHead;
3228 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
3229 i = pGVMM->aHandles[i].iNext)
3230 {
3231 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
3232 void *pvObj = pGVMM->aHandles[i].pvObj;
3233 if ( RT_VALID_PTR(pvObj)
3234 && RT_VALID_PTR(pOtherGVM)
3235 && pOtherGVM->u32Magic == GVM_MAGIC
3236 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
3237 {
3238# define MAYBE_RESET_FIELD(field) \
3239 do { if (pStats->SchedSum. field ) { pOtherGVM->gvmm.s.StatsSched. field = 0; } } while (0)
3240 MAYBE_RESET_FIELD(cHaltCalls);
3241 MAYBE_RESET_FIELD(cHaltBlocking);
3242 MAYBE_RESET_FIELD(cHaltTimeouts);
3243 MAYBE_RESET_FIELD(cHaltNotBlocking);
3244 MAYBE_RESET_FIELD(cHaltWakeUps);
3245 MAYBE_RESET_FIELD(cWakeUpCalls);
3246 MAYBE_RESET_FIELD(cWakeUpNotHalted);
3247 MAYBE_RESET_FIELD(cWakeUpWakeUps);
3248 MAYBE_RESET_FIELD(cPokeCalls);
3249 MAYBE_RESET_FIELD(cPokeNotBusy);
3250 MAYBE_RESET_FIELD(cPollCalls);
3251 MAYBE_RESET_FIELD(cPollHalts);
3252 MAYBE_RESET_FIELD(cPollWakeUps);
3253# undef MAYBE_RESET_FIELD
3254 }
3255 }
3256 }
3257
3258 GVMMR0_USED_SHARED_UNLOCK(pGVMM);
3259
3260 return VINF_SUCCESS;
3261}
3262
3263
3264/**
3265 * VMMR0 request wrapper for GVMMR0ResetStatistics.
3266 *
3267 * @returns see GVMMR0ResetStatistics.
3268 * @param pGVM The global (ring-0) VM structure. Optional.
3269 * @param pReq Pointer to the request packet.
3270 * @param pSession The current session.
3271 */
3272GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
3273{
3274 /*
3275 * Validate input and pass it on.
3276 */
3277 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3278 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3279 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
3280
3281 return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM);
3282}
3283
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette