VirtualBox

source: vbox/trunk/include/VBox/vmm/vm.h@ 37517

Last change on this file since 37517 was 37517, checked in by vboxsync, 14 years ago

TM: Simplified the virtual sync timers by requiring changes to be done while holding the virtual sync lock. This means we can skip all the pending states and move timers on and off the active list immediately, avoiding the problems with timers being on the pending-scheduling list. Also made u64VirtualSync keep track of the last time stamp all the time (when under the lock) and thus really making sure time does not jump backwards.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.9 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2011 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_vm_h
27#define ___VBox_vmm_vm_h
28
29#include <VBox/types.h>
30#include <VBox/vmm/cpum.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/vmapi.h>
33#include <VBox/vmm/vmm.h>
34#include <VBox/sup.h>
35
36
37/** @defgroup grp_vm The Virtual Machine
38 * @{
39 */
40
41/**
42 * The state of a Virtual CPU.
43 *
44 * The basic state indicated here is whether the CPU has been started or not. In
45 * addition, there are sub-states when started for assisting scheduling (GVMM
46 * mostly).
47 *
48 * The transision out of the STOPPED state is done by a vmR3PowerOn.
49 * The transision back to the STOPPED state is done by vmR3PowerOff.
50 *
51 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
52 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
53 */
54typedef enum VMCPUSTATE
55{
56 /** The customary invalid zero. */
57 VMCPUSTATE_INVALID = 0,
58
59 /** Virtual CPU has not yet been started. */
60 VMCPUSTATE_STOPPED,
61
62 /** CPU started. */
63 VMCPUSTATE_STARTED,
64 /** Executing guest code and can be poked. */
65 VMCPUSTATE_STARTED_EXEC,
66 /** Executing guest code in the recompiler. */
67 VMCPUSTATE_STARTED_EXEC_REM,
68 /** Halted. */
69 VMCPUSTATE_STARTED_HALTED,
70
71 /** The end of valid virtual CPU states. */
72 VMCPUSTATE_END,
73
74 /** Ensure 32-bit type. */
75 VMCPUSTATE_32BIT_HACK = 0x7fffffff
76} VMCPUSTATE;
77
78
79/**
80 * Per virtual CPU data.
81 */
82typedef struct VMCPU
83{
84 /** Per CPU forced action.
85 * See the VMCPU_FF_* \#defines. Updated atomically. */
86 uint32_t volatile fLocalForcedActions;
87 /** The CPU state. */
88 VMCPUSTATE volatile enmState;
89
90 /** Pointer to the ring-3 UVMCPU structure. */
91 PUVMCPU pUVCpu;
92 /** Ring-3 Host Context VM Pointer. */
93 PVMR3 pVMR3;
94 /** Ring-0 Host Context VM Pointer. */
95 PVMR0 pVMR0;
96 /** Alignment padding. */
97 RTR0PTR pvR0Padding;
98 /** Raw-mode Context VM Pointer. */
99 PVMRC pVMRC;
100 /** The CPU ID.
101 * This is the index into the VM::aCpu array. */
102 VMCPUID idCpu;
103 /** The native thread handle. */
104 RTNATIVETHREAD hNativeThread;
105 /** The native R0 thread handle. (different from the R3 handle!) */
106 RTNATIVETHREAD hNativeThreadR0;
107 /** Which host CPU ID is this EMT running on.
108 * Only valid when in RC or HWACCMR0 with scheduling disabled. */
109 RTCPUID volatile idHostCpu;
110 /** State data for use by ad hoc profiling. */
111 uint32_t uAdHoc;
112 /** Profiling samples for use by ad hoc profiling. */
113 STAMPROFILEADV aStatAdHoc[8];
114
115 /** Align the next bit on a 64-byte boundary and make sure it starts at the same
116 * offset in both 64-bit and 32-bit builds.
117 *
118 * @remarks The alignments of the members that are larger than 48 bytes should be
119 * 64-byte for cache line reasons. structs containing small amounts of
120 * data could be lumped together at the end with a < 64 byte padding
121 * following it (to grow into and align the struct size).
122 * */
123 uint8_t abAlignment1[HC_ARCH_BITS == 32 ? 16+64 : 56];
124
125 /** CPUM part. */
126 union
127 {
128#ifdef ___CPUMInternal_h
129 struct CPUMCPU s;
130#endif
131 uint8_t padding[3456]; /* multiple of 64 */
132 } cpum;
133
134 /** HWACCM part. */
135 union
136 {
137#ifdef ___HWACCMInternal_h
138 struct HWACCMCPU s;
139#endif
140 uint8_t padding[5312]; /* multiple of 64 */
141 } hwaccm;
142
143 /** EM part. */
144 union
145 {
146#ifdef ___EMInternal_h
147 struct EMCPU s;
148#endif
149 uint8_t padding[1472]; /* multiple of 64 */
150 } em;
151
152 /** IEM part. */
153 union
154 {
155#ifdef ___IEMInternal_h
156 struct IEMCPU s;
157#endif
158 uint8_t padding[1024]; /* multiple of 64 */
159 } iem;
160
161 /** TRPM part. */
162 union
163 {
164#ifdef ___TRPMInternal_h
165 struct TRPMCPU s;
166#endif
167 uint8_t padding[128]; /* multiple of 64 */
168 } trpm;
169
170 /** TM part. */
171 union
172 {
173#ifdef ___TMInternal_h
174 struct TMCPU s;
175#endif
176 uint8_t padding[384]; /* multiple of 64 */
177 } tm;
178
179 /** VMM part. */
180 union
181 {
182#ifdef ___VMMInternal_h
183 struct VMMCPU s;
184#endif
185 uint8_t padding[384]; /* multiple of 64 */
186 } vmm;
187
188 /** PDM part. */
189 union
190 {
191#ifdef ___PDMInternal_h
192 struct PDMCPU s;
193#endif
194 uint8_t padding[128]; /* multiple of 64 */
195 } pdm;
196
197 /** IOM part. */
198 union
199 {
200#ifdef ___IOMInternal_h
201 struct IOMCPU s;
202#endif
203 uint8_t padding[512]; /* multiple of 64 */
204 } iom;
205
206 /** DBGF part.
207 * @todo Combine this with other tiny structures. */
208 union
209 {
210#ifdef ___DBGFInternal_h
211 struct DBGFCPU s;
212#endif
213 uint8_t padding[64]; /* multiple of 64 */
214 } dbgf;
215
216 /** Align the following members on page boundary. */
217 uint8_t abAlignment2[3072];
218
219 /** PGM part. */
220 union
221 {
222#ifdef ___PGMInternal_h
223 struct PGMCPU s;
224#endif
225 uint8_t padding[4096]; /* multiple of 4096 */
226 } pgm;
227
228} VMCPU;
229
230
231/** @name Operations on VMCPU::enmState
232 * @{ */
233/** Gets the VMCPU state. */
234#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
235/** Sets the VMCPU state. */
236#define VMCPU_SET_STATE(pVCpu, enmNewState) \
237 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
238/** Cmpares and sets the VMCPU state. */
239#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
240 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
241/** Checks the VMCPU state. */
242#define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
243 do { \
244 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
245 AssertMsg(enmState == (enmExpectedState), \
246 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
247 enmState, enmExpectedState, (pVCpu)->idCpu)); \
248 } while (0)
249/** Tests if the state means that the CPU is started. */
250#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
251/** Tests if the state means that the CPU is stopped. */
252#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
253/** @} */
254
255
256/** The name of the Guest Context VMM Core module. */
257#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
258/** The name of the Ring 0 Context VMM Core module. */
259#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
260
261/** VM Forced Action Flags.
262 *
263 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
264 * action mask of a VM.
265 *
266 * @{
267 */
268/** The virtual sync clock has been stopped, go to TM until it has been
269 * restarted... */
270#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(2)
271/** PDM Queues are pending. */
272#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
273/** The bit number for VM_FF_PDM_QUEUES. */
274#define VM_FF_PDM_QUEUES_BIT 3
275/** PDM DMA transfers are pending. */
276#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
277/** The bit number for VM_FF_PDM_DMA. */
278#define VM_FF_PDM_DMA_BIT 4
279/** This action forces the VM to call DBGF so DBGF can service debugger
280 * requests in the emulation thread.
281 * This action flag stays asserted till DBGF clears it.*/
282#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
283/** The bit number for VM_FF_DBGF. */
284#define VM_FF_DBGF_BIT 8
285/** This action forces the VM to service pending requests from other
286 * thread or requests which must be executed in another context. */
287#define VM_FF_REQUEST RT_BIT_32(9)
288/** Check for VM state changes and take appropriate action. */
289#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
290/** The bit number for VM_FF_CHECK_VM_STATE. */
291#define VM_FF_CHECK_VM_STATE_BIT 10
292/** Reset the VM. (postponed) */
293#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
294/** The bit number for VM_FF_RESET. */
295#define VM_FF_RESET_BIT 11
296/** EMT rendezvous in VMM. */
297#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
298/** The bit number for VM_FF_EMT_RENDEZVOUS. */
299#define VM_FF_EMT_RENDEZVOUS_BIT 12
300
301/** PGM needs to allocate handy pages. */
302#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
303/** PGM is out of memory.
304 * Abandon all loops and code paths which can be resumed and get up to the EM
305 * loops. */
306#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
307 /** PGM is about to perform a lightweight pool flush
308 * Guest SMP: all EMT threads should return to ring 3
309 */
310#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(20)
311/** REM needs to be informed about handler changes. */
312#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(VM_FF_REM_HANDLER_NOTIFY_BIT)
313/** The bit number for VM_FF_REM_HANDLER_NOTIFY. */
314#define VM_FF_REM_HANDLER_NOTIFY_BIT 29
315/** Suspend the VM - debug only. */
316#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
317
318
319/** This action forces the VM to check any pending interrups on the APIC. */
320#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
321/** This action forces the VM to check any pending interrups on the PIC. */
322#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
323/** This action forces the VM to schedule and run pending timer (TM).
324 * @remarks Don't move - PATM compatibility. */
325#define VMCPU_FF_TIMER RT_BIT_32(2)
326/** This action forces the VM to check any pending NMIs. */
327#define VMCPU_FF_INTERRUPT_NMI_BIT 3
328#define VMCPU_FF_INTERRUPT_NMI RT_BIT_32(VMCPU_FF_INTERRUPT_NMI_BIT)
329/** This action forces the VM to check any pending SMIs. */
330#define VMCPU_FF_INTERRUPT_SMI_BIT 4
331#define VMCPU_FF_INTERRUPT_SMI RT_BIT_32(VMCPU_FF_INTERRUPT_SMI_BIT)
332/** PDM critical section unlocking is pending, process promptly upon return to R3. */
333#define VMCPU_FF_PDM_CRITSECT RT_BIT_32(5)
334/** This action forces the VM to service pending requests from other
335 * thread or requests which must be executed in another context. */
336#define VMCPU_FF_REQUEST RT_BIT_32(9)
337/** This action forces the VM to resync the page tables before going
338 * back to execute guest code. (GLOBAL FLUSH) */
339#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
340/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
341 * (NON-GLOBAL FLUSH) */
342#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
343/** Check for pending TLB shootdown actions.
344 * Consumer: HWACCM
345 * @todo rename to VMCPU_FF_HWACCM_TLB_SHOOTDOWN */
346#define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(18)
347/** Check for pending TLB flush action.
348 * Consumer: HWACCM
349 * @todo rename to VMCPU_FF_HWACCM_TLB_FLUSH */
350#define VMCPU_FF_TLB_FLUSH RT_BIT_32(VMCPU_FF_TLB_FLUSH_BIT)
351/** The bit number for VMCPU_FF_TLB_FLUSH. */
352#define VMCPU_FF_TLB_FLUSH_BIT 19
353/** Check the interrupt and trap gates */
354#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
355/** Check Guest's TSS ring 0 stack */
356#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
357/** Check Guest's GDT table */
358#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
359/** Check Guest's LDT table */
360#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
361/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
362#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
363/** CSAM needs to scan the page that's being executed */
364#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
365/** CSAM needs to do some homework. */
366#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
367/** Force return to Ring-3. */
368#define VMCPU_FF_TO_R3 RT_BIT_32(28)
369
370/** Externally VM forced actions. Used to quit the idle/wait loop. */
371#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS)
372/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
373#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
374
375/** Externally forced VM actions. Used to quit the idle/wait loop. */
376#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
377 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
378/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
379#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_TIMER)
380
381/** High priority VM pre-execution actions. */
382#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
383 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
384/** High priority VMCPU pre-execution actions. */
385#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 \
386 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
387 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
388
389/** High priority VM pre raw-mode execution mask. */
390#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
391/** High priority VMCPU pre raw-mode execution mask. */
392#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
393 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
394
395/** High priority post-execution actions. */
396#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PGM_NO_MEMORY)
397/** High priority post-execution actions. */
398#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_PDM_CRITSECT|VMCPU_FF_CSAM_PENDING_ACTION)
399
400/** Normal priority VM post-execution actions. */
401#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
402 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
403/** Normal priority VMCPU post-execution actions. */
404#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
405
406/** Normal priority VM actions. */
407#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS)
408/** Normal priority VMCPU actions. */
409#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
410
411/** Flags to clear before resuming guest execution. */
412#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
413
414/** VM Flags that cause the HWACCM loops to go back to ring-3. */
415#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
416/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
417#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3 | VMCPU_FF_TIMER)
418
419/** All the forced VM flags. */
420#define VM_FF_ALL_MASK (~0U)
421/** All the forced VMCPU flags. */
422#define VMCPU_FF_ALL_MASK (~0U)
423
424/** All the forced VM flags except those related to raw-mode and hardware
425 * assisted execution. */
426#define VM_FF_ALL_REM_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NO_MEMORY)
427/** All the forced VMCPU flags except those related to raw-mode and hardware
428 * assisted execution. */
429#define VMCPU_FF_ALL_REM_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_TLB_FLUSH | VMCPU_FF_TLB_SHOOTDOWN))
430
431/** @} */
432
433/** @def VM_FF_SET
434 * Sets a force action flag.
435 *
436 * @param pVM VM Handle.
437 * @param fFlag The flag to set.
438 */
439#if 1
440# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
441#else
442# define VM_FF_SET(pVM, fFlag) \
443 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
444 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
445 } while (0)
446#endif
447
448/** @def VMCPU_FF_SET
449 * Sets a force action flag for the given VCPU.
450 *
451 * @param pVCpu VMCPU Handle.
452 * @param fFlag The flag to set.
453 */
454#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
455
456/** @def VM_FF_CLEAR
457 * Clears a force action flag.
458 *
459 * @param pVM VM Handle.
460 * @param fFlag The flag to clear.
461 */
462#if 1
463# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
464#else
465# define VM_FF_CLEAR(pVM, fFlag) \
466 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
467 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
468 } while (0)
469#endif
470
471/** @def VMCPU_FF_CLEAR
472 * Clears a force action flag for the given VCPU.
473 *
474 * @param pVCpu VMCPU Handle.
475 * @param fFlag The flag to clear.
476 */
477#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
478
479/** @def VM_FF_ISSET
480 * Checks if a force action flag is set.
481 *
482 * @param pVM VM Handle.
483 * @param fFlag The flag to check.
484 */
485#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
486
487/** @def VMCPU_FF_ISSET
488 * Checks if a force action flag is set for the given VCPU.
489 *
490 * @param pVCpu VMCPU Handle.
491 * @param fFlag The flag to check.
492 */
493#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
494
495/** @def VM_FF_ISPENDING
496 * Checks if one or more force action in the specified set is pending.
497 *
498 * @param pVM VM Handle.
499 * @param fFlags The flags to check for.
500 */
501#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
502
503/** @def VM_FF_TESTANDCLEAR
504 * Checks if one (!) force action in the specified set is pending and clears it atomically
505 *
506 * @returns true if the bit was set.
507 * @returns false if the bit was clear.
508 * @param pVM VM Handle.
509 * @param iBit Bit position to check and clear
510 */
511#define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
512
513/** @def VMCPU_FF_TESTANDCLEAR
514 * Checks if one (!) force action in the specified set is pending and clears it atomically
515 *
516 * @returns true if the bit was set.
517 * @returns false if the bit was clear.
518 * @param pVCpu VMCPU Handle.
519 * @param iBit Bit position to check and clear
520 */
521#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
522
523/** @def VMCPU_FF_ISPENDING
524 * Checks if one or more force action in the specified set is pending for the given VCPU.
525 *
526 * @param pVCpu VMCPU Handle.
527 * @param fFlags The flags to check for.
528 */
529#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
530
531/** @def VM_FF_ISPENDING
532 * Checks if one or more force action in the specified set is pending while one
533 * or more other ones are not.
534 *
535 * @param pVM VM Handle.
536 * @param fFlags The flags to check for.
537 * @param fExcpt The flags that should not be set.
538 */
539#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
540
541/** @def VMCPU_FF_IS_PENDING_EXCEPT
542 * Checks if one or more force action in the specified set is pending for the given
543 * VCPU while one or more other ones are not.
544 *
545 * @param pVCpu VMCPU Handle.
546 * @param fFlags The flags to check for.
547 * @param fExcpt The flags that should not be set.
548 */
549#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
550
551/** @def VM_IS_EMT
552 * Checks if the current thread is the emulation thread (EMT).
553 *
554 * @remark The ring-0 variation will need attention if we expand the ring-0
555 * code to let threads other than EMT mess around with the VM.
556 */
557#ifdef IN_RC
558# define VM_IS_EMT(pVM) true
559#else
560# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
561#endif
562
563/** @def VMCPU_IS_EMT
564 * Checks if the current thread is the emulation thread (EMT) for the specified
565 * virtual CPU.
566 */
567#ifdef IN_RC
568# define VMCPU_IS_EMT(pVCpu) true
569#else
570# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
571#endif
572
573/** @def VM_ASSERT_EMT
574 * Asserts that the current thread IS the emulation thread (EMT).
575 */
576#ifdef IN_RC
577# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
578#elif defined(IN_RING0)
579# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
580#else
581# define VM_ASSERT_EMT(pVM) \
582 AssertMsg(VM_IS_EMT(pVM), \
583 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
584#endif
585
586/** @def VMCPU_ASSERT_EMT
587 * Asserts that the current thread IS the emulation thread (EMT) of the
588 * specified virtual CPU.
589 */
590#ifdef IN_RC
591# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
592#elif defined(IN_RING0)
593# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
594#else
595# define VMCPU_ASSERT_EMT(pVCpu) \
596 AssertMsg(VMCPU_IS_EMT(pVCpu), \
597 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
598 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
599#endif
600
601/** @def VM_ASSERT_EMT_RETURN
602 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
603 */
604#ifdef IN_RC
605# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
606#elif defined(IN_RING0)
607# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
608#else
609# define VM_ASSERT_EMT_RETURN(pVM, rc) \
610 AssertMsgReturn(VM_IS_EMT(pVM), \
611 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
612 (rc))
613#endif
614
615/** @def VMCPU_ASSERT_EMT_RETURN
616 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
617 */
618#ifdef IN_RC
619# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
620#elif defined(IN_RING0)
621# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
622#else
623# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
624 AssertMsg(VMCPU_IS_EMT(pVCpu), \
625 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
626 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
627 (rc))
628#endif
629
630/** @def VMCPU_ASSERT_EMT_OR_GURU
631 * Asserts that the current thread IS the emulation thread (EMT) of the
632 * specified virtual CPU.
633 */
634#if defined(IN_RC) || defined(IN_RING0)
635# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) Assert( VMCPU_IS_EMT(pVCpu) \
636 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
637 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS )
638#else
639# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) \
640 AssertMsg( VMCPU_IS_EMT(pVCpu) \
641 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
642 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS, \
643 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
644 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
645#endif
646
647/** @def VMCPU_ASSERT_EMT_OR_NOT_RUNNING
648 * Asserts that the current thread IS the emulation thread (EMT) of the
649 * specified virtual CPU when the VM is running.
650 */
651#if defined(IN_RC) || defined(IN_RING0)
652# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
653 Assert( VMCPU_IS_EMT(pVCpu) \
654 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_RUNNING \
655 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_RUNNING_LS \
656 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_RUNNING_FT )
657#else
658# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
659 AssertMsg( VMCPU_IS_EMT(pVCpu) \
660 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_RUNNING \
661 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_RUNNING_LS \
662 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_RUNNING_FT, \
663 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
664 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
665#endif
666
667/** @def VM_ASSERT_EMT0
668 * Asserts that the current thread IS emulation thread \#0 (EMT0).
669 */
670#define VM_ASSERT_EMT0(pVM) VMCPU_ASSERT_EMT(&(pVM)->aCpus[0])
671
672/** @def VM_ASSERT_EMT0_RETURN
673 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
674 * it isn't.
675 */
676#define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
677
678
679/**
680 * Asserts that the current thread is NOT the emulation thread.
681 */
682#define VM_ASSERT_OTHER_THREAD(pVM) \
683 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
684
685
686/** @def VM_ASSERT_STATE_RETURN
687 * Asserts a certain VM state.
688 */
689#define VM_ASSERT_STATE(pVM, _enmState) \
690 AssertMsg((pVM)->enmVMState == (_enmState), \
691 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
692
693/** @def VM_ASSERT_STATE_RETURN
694 * Asserts a certain VM state and returns if it doesn't match.
695 */
696#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
697 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
698 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
699 (rc))
700
701/** @def VM_ASSERT_VALID_EXT_RETURN
702 * Asserts a the VM handle is valid for external access, i.e. not being
703 * destroy or terminated.
704 */
705#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
706 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
707 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
708 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
709 && VM_IS_EMT(pVM))), \
710 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
711 ? VMGetStateName(pVM->enmVMState) : ""), \
712 (rc))
713
714/** @def VMCPU_ASSERT_VALID_EXT_RETURN
715 * Asserts a the VMCPU handle is valid for external access, i.e. not being
716 * destroy or terminated.
717 */
718#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
719 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
720 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
721 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
722 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
723 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
724 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
725 (rc))
726
727
728/** This is the VM structure.
729 *
730 * It contains (nearly?) all the VM data which have to be available in all
731 * contexts. Even if it contains all the data the idea is to use APIs not
732 * to modify all the members all around the place. Therefore we make use of
733 * unions to hide everything which isn't local to the current source module.
734 * This means we'll have to pay a little bit of attention when adding new
735 * members to structures in the unions and make sure to keep the padding sizes
736 * up to date.
737 *
738 * Run tstVMStructSize after update!
739 */
740typedef struct VM
741{
742 /** The state of the VM.
743 * This field is read only to everyone except the VM and EM. */
744 VMSTATE volatile enmVMState;
745 /** Forced action flags.
746 * See the VM_FF_* \#defines. Updated atomically.
747 */
748 volatile uint32_t fGlobalForcedActions;
749 /** Pointer to the array of page descriptors for the VM structure allocation. */
750 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
751 /** Session handle. For use when calling SUPR0 APIs. */
752 PSUPDRVSESSION pSession;
753 /** Pointer to the ring-3 VM structure. */
754 PUVM pUVM;
755 /** Ring-3 Host Context VM Pointer. */
756 R3PTRTYPE(struct VM *) pVMR3;
757 /** Ring-0 Host Context VM Pointer. */
758 R0PTRTYPE(struct VM *) pVMR0;
759 /** Raw-mode Context VM Pointer. */
760 RCPTRTYPE(struct VM *) pVMRC;
761
762 /** The GVM VM handle. Only the GVM should modify this field. */
763 uint32_t hSelf;
764 /** Number of virtual CPUs. */
765 uint32_t cCpus;
766 /** CPU excution cap (1-100) */
767 uint32_t uCpuExecutionCap;
768
769 /** Size of the VM structure including the VMCPU array. */
770 uint32_t cbSelf;
771
772 /** Offset to the VMCPU array starting from beginning of this structure. */
773 uint32_t offVMCPU;
774
775 /** Reserved; alignment. */
776 uint32_t u32Reserved[5];
777
778 /** @name Public VMM Switcher APIs
779 * @{ */
780 /**
781 * Assembly switch entry point for returning to host context.
782 * This function will clean up the stack frame.
783 *
784 * @param eax The return code, register.
785 * @param Ctx The guest core context.
786 * @remark Assume interrupts disabled.
787 */
788 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
789
790 /**
791 * Assembly switch entry point for returning to host context.
792 *
793 * This is an alternative entry point which we'll be using when the we have the
794 * hypervisor context and need to save that before going to the host.
795 *
796 * This is typically useful when abandoning the hypervisor because of a trap
797 * and want the trap state to be saved.
798 *
799 * @param eax The return code, register.
800 * @param ecx Pointer to the hypervisor core context, register.
801 * @remark Assume interrupts disabled.
802 */
803 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
804
805 /**
806 * Assembly switch entry point for returning to host context.
807 *
808 * This is an alternative to the two *Ctx APIs and implies that the context has already
809 * been saved, or that it's just a brief return to HC and that the caller intends to resume
810 * whatever it is doing upon 'return' from this call.
811 *
812 * @param eax The return code, register.
813 * @remark Assume interrupts disabled.
814 */
815 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
816 /** @} */
817
818
819 /** @name Various VM data owned by VM.
820 * @{ */
821 RTTHREAD uPadding1;
822 /** The native handle of ThreadEMT. Getting the native handle
823 * is generally faster than getting the IPRT one (except on OS/2 :-). */
824 RTNATIVETHREAD uPadding2;
825 /** @} */
826
827
828 /** @name Various items that are frequently accessed.
829 * @{ */
830 /** Raw ring-3 indicator. */
831 bool fRawR3Enabled;
832 /** Raw ring-0 indicator. */
833 bool fRawR0Enabled;
834 /** PATM enabled flag.
835 * This is placed here for performance reasons. */
836 bool fPATMEnabled;
837 /** CSAM enabled flag.
838 * This is placed here for performance reasons. */
839 bool fCSAMEnabled;
840 /** Hardware VM support is available and enabled.
841 * This is placed here for performance reasons. */
842 bool fHWACCMEnabled;
843 /** Hardware VM support is required and non-optional.
844 * This is initialized together with the rest of the VM structure. */
845 bool fHwVirtExtForced;
846 /** Set when this VM is the master FT node. */
847 bool fFaultTolerantMaster;
848 /** Large page enabled flag. */
849 bool fUseLargePages;
850 /** @} */
851
852 /** @name Debugging
853 * @{ */
854 /** Raw-mode Context VM Pointer. */
855 RCPTRTYPE(RTTRACEBUF) hTraceBufRC;
856 /** Alignment padding */
857 uint32_t uPadding3;
858 /** Ring-3 Host Context VM Pointer. */
859 R3PTRTYPE(RTTRACEBUF) hTraceBufR3;
860 /** Ring-0 Host Context VM Pointer. */
861 R0PTRTYPE(RTTRACEBUF) hTraceBufR0;
862 /** @} */
863
864#if HC_ARCH_BITS == 32
865 /** Alignment padding.. */
866 uint32_t uPadding4;
867#endif
868
869 /** @name Switcher statistics (remove)
870 * @{ */
871 /** Profiling the total time from Qemu to GC. */
872 STAMPROFILEADV StatTotalQemuToGC;
873 /** Profiling the total time from GC to Qemu. */
874 STAMPROFILEADV StatTotalGCToQemu;
875 /** Profiling the total time spent in GC. */
876 STAMPROFILEADV StatTotalInGC;
877 /** Profiling the total time spent not in Qemu. */
878 STAMPROFILEADV StatTotalInQemu;
879 /** Profiling the VMMSwitcher code for going to GC. */
880 STAMPROFILEADV StatSwitcherToGC;
881 /** Profiling the VMMSwitcher code for going to HC. */
882 STAMPROFILEADV StatSwitcherToHC;
883 STAMPROFILEADV StatSwitcherSaveRegs;
884 STAMPROFILEADV StatSwitcherSysEnter;
885 STAMPROFILEADV StatSwitcherDebug;
886 STAMPROFILEADV StatSwitcherCR0;
887 STAMPROFILEADV StatSwitcherCR4;
888 STAMPROFILEADV StatSwitcherJmpCR3;
889 STAMPROFILEADV StatSwitcherRstrRegs;
890 STAMPROFILEADV StatSwitcherLgdt;
891 STAMPROFILEADV StatSwitcherLidt;
892 STAMPROFILEADV StatSwitcherLldt;
893 STAMPROFILEADV StatSwitcherTSS;
894 /** @} */
895
896#if HC_ARCH_BITS != 64
897 /** Padding - the unions must be aligned on a 64 bytes boundary and the unions
898 * must start at the same offset on both 64-bit and 32-bit hosts. */
899 uint8_t abAlignment1[HC_ARCH_BITS == 32 ? 32 : 0];
900#endif
901
902 /** CPUM part. */
903 union
904 {
905#ifdef ___CPUMInternal_h
906 struct CPUM s;
907#endif
908 uint8_t padding[1472]; /* multiple of 64 */
909 } cpum;
910
911 /** VMM part. */
912 union
913 {
914#ifdef ___VMMInternal_h
915 struct VMM s;
916#endif
917 uint8_t padding[1600]; /* multiple of 64 */
918 } vmm;
919
920 /** PGM part. */
921 union
922 {
923#ifdef ___PGMInternal_h
924 struct PGM s;
925#endif
926 uint8_t padding[4096*2+6080]; /* multiple of 64 */
927 } pgm;
928
929 /** HWACCM part. */
930 union
931 {
932#ifdef ___HWACCMInternal_h
933 struct HWACCM s;
934#endif
935 uint8_t padding[5376]; /* multiple of 64 */
936 } hwaccm;
937
938 /** TRPM part. */
939 union
940 {
941#ifdef ___TRPMInternal_h
942 struct TRPM s;
943#endif
944 uint8_t padding[5184]; /* multiple of 64 */
945 } trpm;
946
947 /** SELM part. */
948 union
949 {
950#ifdef ___SELMInternal_h
951 struct SELM s;
952#endif
953 uint8_t padding[576]; /* multiple of 64 */
954 } selm;
955
956 /** MM part. */
957 union
958 {
959#ifdef ___MMInternal_h
960 struct MM s;
961#endif
962 uint8_t padding[192]; /* multiple of 64 */
963 } mm;
964
965 /** PDM part. */
966 union
967 {
968#ifdef ___PDMInternal_h
969 struct PDM s;
970#endif
971 uint8_t padding[1920]; /* multiple of 64 */
972 } pdm;
973
974 /** IOM part. */
975 union
976 {
977#ifdef ___IOMInternal_h
978 struct IOM s;
979#endif
980 uint8_t padding[832]; /* multiple of 64 */
981 } iom;
982
983 /** PATM part. */
984 union
985 {
986#ifdef ___PATMInternal_h
987 struct PATM s;
988#endif
989 uint8_t padding[768]; /* multiple of 64 */
990 } patm;
991
992 /** CSAM part. */
993 union
994 {
995#ifdef ___CSAMInternal_h
996 struct CSAM s;
997#endif
998 uint8_t padding[1088]; /* multiple of 64 */
999 } csam;
1000
1001 /** EM part. */
1002 union
1003 {
1004#ifdef ___EMInternal_h
1005 struct EM s;
1006#endif
1007 uint8_t padding[256]; /* multiple of 64 */
1008 } em;
1009
1010 /** TM part. */
1011 union
1012 {
1013#ifdef ___TMInternal_h
1014 struct TM s;
1015#endif
1016 uint8_t padding[2432]; /* multiple of 64 */
1017 } tm;
1018
1019 /** DBGF part. */
1020 union
1021 {
1022#ifdef ___DBGFInternal_h
1023 struct DBGF s;
1024#endif
1025 uint8_t padding[2368]; /* multiple of 64 */
1026 } dbgf;
1027
1028 /** SSM part. */
1029 union
1030 {
1031#ifdef ___SSMInternal_h
1032 struct SSM s;
1033#endif
1034 uint8_t padding[128]; /* multiple of 64 */
1035 } ssm;
1036
1037 /** FTM part. */
1038 union
1039 {
1040#ifdef ___FTMInternal_h
1041 struct FTM s;
1042#endif
1043 uint8_t padding[512]; /* multiple of 64 */
1044 } ftm;
1045
1046 /** REM part. */
1047 union
1048 {
1049#ifdef ___REMInternal_h
1050 struct REM s;
1051#endif
1052 uint8_t padding[0x11100]; /* multiple of 64 */
1053 } rem;
1054
1055 /* ---- begin small stuff ---- */
1056
1057 /** VM part. */
1058 union
1059 {
1060#ifdef ___VMInternal_h
1061 struct VMINT s;
1062#endif
1063 uint8_t padding[24]; /* multiple of 8 */
1064 } vm;
1065
1066 /** CFGM part. */
1067 union
1068 {
1069#ifdef ___CFGMInternal_h
1070 struct CFGM s;
1071#endif
1072 uint8_t padding[8]; /* multiple of 8 */
1073 } cfgm;
1074
1075
1076 /** Padding for aligning the cpu array on a page boundary. */
1077 uint8_t abAlignment2[862];
1078
1079 /* ---- end small stuff ---- */
1080
1081 /** VMCPU array for the configured number of virtual CPUs.
1082 * Must be aligned on a page boundary for TLB hit reasons as well as
1083 * alignment of VMCPU members. */
1084 VMCPU aCpus[1];
1085} VM;
1086
1087
1088#ifdef IN_RC
1089RT_C_DECLS_BEGIN
1090
1091/** The VM structure.
1092 * This is imported from the VMMGCBuiltin module, i.e. it's a one
1093 * of those magic globals which we should avoid using.
1094 */
1095extern DECLIMPORT(VM) g_VM;
1096
1097RT_C_DECLS_END
1098#endif
1099
1100/** @} */
1101
1102#endif
1103
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette