VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 22707

Last change on this file since 22707 was 22707, checked in by vboxsync, 15 years ago

REM: Double the size of aHandlerNotifications and make sure there are at least 48 free entries in REMNotifyHandlerPhysicalFlushIfAlmostFull. This should hopefully get rid of the rare cases where we need to flush the notifications during pgmMapActivateCR3 in RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.4 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39#include <VBox/vmm.h>
40
41
42/** @defgroup grp_vm The Virtual Machine
43 * @{
44 */
45
46/**
47 * The state of a Virtual CPU.
48 *
49 * The basic state indicated here is whether the CPU has been started or not. In
50 * addition, there are sub-states when started for assisting scheduling (GVMM
51 * mostly).
52 *
53 * The transision out of the STOPPED state is done by a vmR3PowerOn.
54 * The transision back to the STOPPED state is done by vmR3PowerOff.
55 *
56 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
57 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
58 */
59typedef enum VMCPUSTATE
60{
61 /** The customary invalid zero. */
62 VMCPUSTATE_INVALID = 0,
63
64 /** Virtual CPU has not yet been started. */
65 VMCPUSTATE_STOPPED,
66
67 /** CPU started. */
68 VMCPUSTATE_STARTED,
69 /** Executing guest code and can be poked. */
70 VMCPUSTATE_STARTED_EXEC,
71 /** Executing guest code in the recompiler. */
72 VMCPUSTATE_STARTED_EXEC_REM,
73 /** Halted. */
74 VMCPUSTATE_STARTED_HALTED,
75
76 /** The end of valid virtual CPU states. */
77 VMCPUSTATE_END,
78
79 /** Ensure 32-bit type. */
80 VMCPUSTATE_32BIT_HACK = 0x7fffffff
81} VMCPUSTATE;
82
83
84/**
85 * Per virtual CPU data.
86 */
87typedef struct VMCPU
88{
89 /** Per CPU forced action.
90 * See the VMCPU_FF_* \#defines. Updated atomically. */
91 uint32_t volatile fLocalForcedActions;
92 /** The CPU state. */
93 VMCPUSTATE volatile enmState;
94
95 /** Pointer to the ring-3 UVMCPU structure. */
96 PUVMCPU pUVCpu;
97 /** Ring-3 Host Context VM Pointer. */
98 PVMR3 pVMR3;
99 /** Ring-0 Host Context VM Pointer. */
100 PVMR0 pVMR0;
101 /** Raw-mode Context VM Pointer. */
102 PVMRC pVMRC;
103 /** The CPU ID.
104 * This is the index into the VM::aCpu array. */
105 VMCPUID idCpu;
106 /** The native thread handle. */
107 RTNATIVETHREAD hNativeThread;
108 /** Which host CPU ID is this EMT running on.
109 * Only valid when in RC or HWACCMR0 with scheduling disabled. */
110 RTCPUID volatile idHostCpu;
111
112 /** Align the next bit on a 64-byte boundary.
113 *
114 * @remarks The aligments of the members that are larger than 48 bytes should be
115 * 64-byte for cache line reasons. structs containing small amounts of
116 * data could be lumped together at the end with a < 64 byte padding
117 * following it (to grow into and align the struct size).
118 * */
119 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 7 : 3];
120
121 /** CPUM part. */
122 union
123 {
124#ifdef ___CPUMInternal_h
125 struct CPUMCPU s;
126#endif
127 char padding[4096]; /* multiple of 64 */
128 } cpum;
129
130 /** PGM part. */
131 union
132 {
133#ifdef ___PGMInternal_h
134 struct PGMCPU s;
135#endif
136 char padding[32*1024]; /* multiple of 64 */
137 } pgm;
138
139 /** HWACCM part. */
140 union
141 {
142#ifdef ___HWACCMInternal_h
143 struct HWACCMCPU s;
144#endif
145 char padding[6144]; /* multiple of 64 */
146 } hwaccm;
147
148 /** EM part. */
149 union
150 {
151#ifdef ___EMInternal_h
152 struct EMCPU s;
153#endif
154 char padding[2048]; /* multiple of 64 */
155 } em;
156
157 /** TRPM part. */
158 union
159 {
160#ifdef ___TRPMInternal_h
161 struct TRPMCPU s;
162#endif
163 char padding[128]; /* multiple of 64 */
164 } trpm;
165
166 /** TM part. */
167 union
168 {
169#ifdef ___TMInternal_h
170 struct TMCPU s;
171#endif
172 char padding[64]; /* multiple of 64 */
173 } tm;
174
175 /** VMM part. */
176 union
177 {
178#ifdef ___VMMInternal_h
179 struct VMMCPU s;
180#endif
181 char padding[384]; /* multiple of 64 */
182 } vmm;
183
184 /** PDM part. */
185 union
186 {
187#ifdef ___PDMInternal_h
188 struct PDMCPU s;
189#endif
190 char padding[128]; /* multiple of 64 */
191 } pdm;
192
193 /** IOM part. */
194 union
195 {
196#ifdef ___IOMInternal_h
197 struct IOMCPU s;
198#endif
199 char padding[512]; /* multiple of 64 */
200 } iom;
201
202 /** DBGF part.
203 * @todo Combine this with other tiny structures. */
204 union
205 {
206#ifdef ___DBGFInternal_h
207 struct DBGFCPU s;
208#endif
209 uint8_t padding[64]; /* multiple of 64 */
210 } dbgf;
211
212} VMCPU;
213
214
215/** @name Operations on VMCPU::enmState
216 * @{ */
217/** Gets the VMCPU state. */
218#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
219/** Sets the VMCPU state. */
220#define VMCPU_SET_STATE(pVCpu, enmNewState) \
221 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
222/** Cmpares and sets the VMCPU state. */
223#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
224 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
225/** Checks the VMCPU state. */
226#define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
227 do { \
228 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
229 AssertMsg(enmState == (enmExpectedState), \
230 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
231 enmState, enmExpectedState, (pVCpu)->idCpu)); \
232 } while (0)
233/** Tests if the state means that the CPU is started. */
234#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
235/** Tests if the state means that the CPU is stopped. */
236#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
237/** @} */
238
239
240/** The name of the Guest Context VMM Core module. */
241#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
242/** The name of the Ring 0 Context VMM Core module. */
243#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
244
245/** VM Forced Action Flags.
246 *
247 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
248 * action mask of a VM.
249 *
250 * @{
251 */
252/** The virtual sync clock has been stopped, go to TM until it has been
253 * restarted... */
254#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(2)
255/** PDM Queues are pending. */
256#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
257/** The bit number for VM_FF_PDM_QUEUES. */
258#define VM_FF_PDM_QUEUES_BIT 3
259/** PDM DMA transfers are pending. */
260#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
261/** The bit number for VM_FF_PDM_DMA. */
262#define VM_FF_PDM_DMA_BIT 4
263/** This action forces the VM to call DBGF so DBGF can service debugger
264 * requests in the emulation thread.
265 * This action flag stays asserted till DBGF clears it.*/
266#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
267/** The bit number for VM_FF_DBGF. */
268#define VM_FF_DBGF_BIT 8
269/** This action forces the VM to service pending requests from other
270 * thread or requests which must be executed in another context. */
271#define VM_FF_REQUEST RT_BIT_32(9)
272/** Terminate the VM immediately. */
273#define VM_FF_TERMINATE RT_BIT_32(10)
274/** Reset the VM. (postponed) */
275#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
276/** The bit number for VM_FF_RESET. */
277#define VM_FF_RESET_BIT 11
278/** EMT rendezvous in VMM. */
279#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
280#define VM_FF_EMT_RENDEZVOUS_BIT 12
281
282/** PGM needs to allocate handy pages. */
283#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
284/** PGM is out of memory.
285 * Abandon all loops and code paths which can be resumed and get up to the EM
286 * loops. */
287#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
288/** REM needs to be informed about handler changes. */
289#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(VM_FF_REM_HANDLER_NOTIFY_BIT)
290/** The bit number for VM_FF_REM_HANDLER_NOTIFY. */
291#define VM_FF_REM_HANDLER_NOTIFY_BIT 29
292/** Suspend the VM - debug only. */
293#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
294
295
296/** This action forces the VM to check any pending interrups on the APIC. */
297#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
298/** This action forces the VM to check any pending interrups on the PIC. */
299#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
300/** This action forces the VM to schedule and run pending timer (TM).
301 * @remarks Don't move - PATM compatability. */
302#define VMCPU_FF_TIMER RT_BIT_32(2)
303/** This action forces the VM to check any pending NMIs. */
304#define VMCPU_FF_INTERRUPT_NMI_BIT 3
305#define VMCPU_FF_INTERRUPT_NMI RT_BIT_32(VMCPU_FF_INTERRUPT_NMI_BIT)
306/** This action forces the VM to check any pending SMIs. */
307#define VMCPU_FF_INTERRUPT_SMI_BIT 4
308#define VMCPU_FF_INTERRUPT_SMI RT_BIT_32(VMCPU_FF_INTERRUPT_SMI_BIT)
309/** PDM critical section unlocking is pending, process promptly upon return to R3. */
310#define VMCPU_FF_PDM_CRITSECT RT_BIT_32(5)
311/** This action forces the VM to service pending requests from other
312 * thread or requests which must be executed in another context. */
313#define VMCPU_FF_REQUEST RT_BIT_32(9)
314/** This action forces the VM to resync the page tables before going
315 * back to execute guest code. (GLOBAL FLUSH) */
316#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
317/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
318 * (NON-GLOBAL FLUSH) */
319#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
320/** Check for pending TLB shootdown actions. */
321#define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(18)
322/** Check for pending TLB flush action. */
323#define VMCPU_FF_TLB_FLUSH RT_BIT_32(VMCPU_FF_TLB_FLUSH_BIT)
324/** The bit number for VMCPU_FF_TLB_FLUSH. */
325#define VMCPU_FF_TLB_FLUSH_BIT 19
326/** Check the interupt and trap gates */
327#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
328/** Check Guest's TSS ring 0 stack */
329#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
330/** Check Guest's GDT table */
331#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
332/** Check Guest's LDT table */
333#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
334/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
335#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
336/** CSAM needs to scan the page that's being executed */
337#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
338/** CSAM needs to do some homework. */
339#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
340/** Force return to Ring-3. */
341#define VMCPU_FF_TO_R3 RT_BIT_32(28)
342
343/** Externally VM forced actions. Used to quit the idle/wait loop. */
344#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS)
345/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
346#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
347
348/** Externally forced VM actions. Used to quit the idle/wait loop. */
349#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
350/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
351#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_TIMER)
352
353/** High priority VM pre-execution actions. */
354#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC | VM_FF_DEBUG_SUSPEND \
355 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
356/** High priority VMCPU pre-execution actions. */
357#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 \
358 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
359 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
360
361/** High priority VM pre raw-mode execution mask. */
362#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
363/** High priority VMCPU pre raw-mode execution mask. */
364#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
365 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
366
367/** High priority post-execution actions. */
368#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PGM_NO_MEMORY)
369/** High priority post-execution actions. */
370#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_PDM_CRITSECT|VMCPU_FF_CSAM_PENDING_ACTION)
371
372/** Normal priority VM post-execution actions. */
373#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
374/** Normal priority VMCPU post-execution actions. */
375#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
376
377/** Normal priority VM actions. */
378#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS)
379/** Normal priority VMCPU actions. */
380#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
381
382/** Flags to clear before resuming guest execution. */
383#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
384
385/** VM Flags that cause the HWACCM loops to go back to ring-3. */
386#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_QUEUES)
387/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
388#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3 | VMCPU_FF_TIMER)
389
390/** All the forced VM flags. */
391#define VM_FF_ALL_MASK (~0U)
392/** All the forced VMCPU flags. */
393#define VMCPU_FF_ALL_MASK (~0U)
394
395/** All the forced VM flags. */
396#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NO_MEMORY)
397/** All the forced VMCPU flags. */
398#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_PDM_CRITSECT))
399
400/** @} */
401
402/** @def VM_FF_SET
403 * Sets a force action flag.
404 *
405 * @param pVM VM Handle.
406 * @param fFlag The flag to set.
407 */
408#if 1
409# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
410#else
411# define VM_FF_SET(pVM, fFlag) \
412 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
413 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
414 } while (0)
415#endif
416
417/** @def VMCPU_FF_SET
418 * Sets a force action flag for the given VCPU.
419 *
420 * @param pVCpu VMCPU Handle.
421 * @param fFlag The flag to set.
422 */
423#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
424
425/** @def VM_FF_CLEAR
426 * Clears a force action flag.
427 *
428 * @param pVM VM Handle.
429 * @param fFlag The flag to clear.
430 */
431#if 1
432# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
433#else
434# define VM_FF_CLEAR(pVM, fFlag) \
435 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
436 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
437 } while (0)
438#endif
439
440/** @def VMCPU_FF_CLEAR
441 * Clears a force action flag for the given VCPU.
442 *
443 * @param pVCpu VMCPU Handle.
444 * @param fFlag The flag to clear.
445 */
446#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
447
448/** @def VM_FF_ISSET
449 * Checks if a force action flag is set.
450 *
451 * @param pVM VM Handle.
452 * @param fFlag The flag to check.
453 */
454#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
455
456/** @def VMCPU_FF_ISSET
457 * Checks if a force action flag is set for the given VCPU.
458 *
459 * @param pVCpu VMCPU Handle.
460 * @param fFlag The flag to check.
461 */
462#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
463
464/** @def VM_FF_ISPENDING
465 * Checks if one or more force action in the specified set is pending.
466 *
467 * @param pVM VM Handle.
468 * @param fFlags The flags to check for.
469 */
470#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
471
472/** @def VM_FF_TESTANDCLEAR
473 * Checks if one (!) force action in the specified set is pending and clears it atomically
474 *
475 * @returns true if the bit was set.
476 * @returns false if the bit was clear.
477 * @param pVM VM Handle.
478 * @param iBit Bit position to check and clear
479 */
480#define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
481
482/** @def VMCPU_FF_TESTANDCLEAR
483 * Checks if one (!) force action in the specified set is pending and clears it atomically
484 *
485 * @returns true if the bit was set.
486 * @returns false if the bit was clear.
487 * @param pVCpu VMCPU Handle.
488 * @param iBit Bit position to check and clear
489 */
490#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
491
492/** @def VMCPU_FF_ISPENDING
493 * Checks if one or more force action in the specified set is pending for the given VCPU.
494 *
495 * @param pVCpu VMCPU Handle.
496 * @param fFlags The flags to check for.
497 */
498#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
499
500/** @def VM_FF_ISPENDING
501 * Checks if one or more force action in the specified set is pending while one
502 * or more other ones are not.
503 *
504 * @param pVM VM Handle.
505 * @param fFlags The flags to check for.
506 * @param fExcpt The flags that should not be set.
507 */
508#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
509
510/** @def VMCPU_FF_IS_PENDING_EXCEPT
511 * Checks if one or more force action in the specified set is pending for the given
512 * VCPU while one or more other ones are not.
513 *
514 * @param pVCpu VMCPU Handle.
515 * @param fFlags The flags to check for.
516 * @param fExcpt The flags that should not be set.
517 */
518#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
519
520/** @def VM_IS_EMT
521 * Checks if the current thread is the emulation thread (EMT).
522 *
523 * @remark The ring-0 variation will need attention if we expand the ring-0
524 * code to let threads other than EMT mess around with the VM.
525 */
526#ifdef IN_RC
527# define VM_IS_EMT(pVM) true
528#else
529# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
530#endif
531
532/** @def VMCPU_IS_EMT
533 * Checks if the current thread is the emulation thread (EMT) for the specified
534 * virtual CPU.
535 */
536#ifdef IN_RC
537# define VMCPU_IS_EMT(pVCpu) true
538#else
539# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
540#endif
541
542/** @def VM_ASSERT_EMT
543 * Asserts that the current thread IS the emulation thread (EMT).
544 */
545#ifdef IN_RC
546# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
547#elif defined(IN_RING0)
548# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
549#else
550# define VM_ASSERT_EMT(pVM) \
551 AssertMsg(VM_IS_EMT(pVM), \
552 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
553#endif
554
555/** @def VMCPU_ASSERT_EMT
556 * Asserts that the current thread IS the emulation thread (EMT) of the
557 * specified virtual CPU.
558 */
559#ifdef IN_RC
560# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
561#elif defined(IN_RING0)
562# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
563#else
564# define VMCPU_ASSERT_EMT(pVCpu) \
565 AssertMsg(VMCPU_IS_EMT(pVCpu), \
566 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
567 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
568#endif
569
570/** @def VM_ASSERT_EMT_RETURN
571 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
572 */
573#ifdef IN_RC
574# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
575#elif defined(IN_RING0)
576# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
577#else
578# define VM_ASSERT_EMT_RETURN(pVM, rc) \
579 AssertMsgReturn(VM_IS_EMT(pVM), \
580 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
581 (rc))
582#endif
583
584/** @def VMCPU_ASSERT_EMT_RETURN
585 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
586 */
587#ifdef IN_RC
588# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
589#elif defined(IN_RING0)
590# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
591#else
592# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
593 AssertMsg(VMCPU_IS_EMT(pVCpu), \
594 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
595 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
596 (rc))
597#endif
598
599
600/**
601 * Asserts that the current thread is NOT the emulation thread.
602 */
603#define VM_ASSERT_OTHER_THREAD(pVM) \
604 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
605
606
607/** @def VM_ASSERT_STATE_RETURN
608 * Asserts a certain VM state.
609 */
610#define VM_ASSERT_STATE(pVM, _enmState) \
611 AssertMsg((pVM)->enmVMState == (_enmState), \
612 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
613
614/** @def VM_ASSERT_STATE_RETURN
615 * Asserts a certain VM state and returns if it doesn't match.
616 */
617#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
618 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
619 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
620 (rc))
621
622/** @def VM_ASSERT_VALID_EXT_RETURN
623 * Asserts a the VM handle is valid for external access, i.e. not being
624 * destroy or terminated.
625 */
626#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
627 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
628 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
629 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
630 && VM_IS_EMT(pVM))), \
631 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
632 ? VMGetStateName(pVM->enmVMState) : ""), \
633 (rc))
634
635/** @def VMCPU_ASSERT_VALID_EXT_RETURN
636 * Asserts a the VMCPU handle is valid for external access, i.e. not being
637 * destroy or terminated.
638 */
639#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
640 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
641 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
642 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
643 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
644 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
645 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
646 (rc))
647
648
649/** This is the VM structure.
650 *
651 * It contains (nearly?) all the VM data which have to be available in all
652 * contexts. Even if it contains all the data the idea is to use APIs not
653 * to modify all the members all around the place. Therefore we make use of
654 * unions to hide everything which isn't local to the current source module.
655 * This means we'll have to pay a little bit of attention when adding new
656 * members to structures in the unions and make sure to keep the padding sizes
657 * up to date.
658 *
659 * Run tstVMStructSize after update!
660 */
661typedef struct VM
662{
663 /** The state of the VM.
664 * This field is read only to everyone except the VM and EM. */
665 VMSTATE enmVMState;
666 /** Forced action flags.
667 * See the VM_FF_* \#defines. Updated atomically.
668 */
669 volatile uint32_t fGlobalForcedActions;
670 /** Pointer to the array of page descriptors for the VM structure allocation. */
671 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
672 /** Session handle. For use when calling SUPR0 APIs. */
673 PSUPDRVSESSION pSession;
674 /** Pointer to the ring-3 VM structure. */
675 PUVM pUVM;
676 /** Ring-3 Host Context VM Pointer. */
677 R3PTRTYPE(struct VM *) pVMR3;
678 /** Ring-0 Host Context VM Pointer. */
679 R0PTRTYPE(struct VM *) pVMR0;
680 /** Raw-mode Context VM Pointer. */
681 RCPTRTYPE(struct VM *) pVMRC;
682
683 /** The GVM VM handle. Only the GVM should modify this field. */
684 uint32_t hSelf;
685 /** Number of virtual CPUs. */
686 uint32_t cCPUs;
687
688 /** Size of the VM structure including the VMCPU array. */
689 uint32_t cbSelf;
690
691 /** Offset to the VMCPU array starting from beginning of this structure. */
692 uint32_t offVMCPU;
693
694 /** Reserved; alignment. */
695 uint32_t u32Reserved[6];
696
697 /** @name Public VMM Switcher APIs
698 * @{ */
699 /**
700 * Assembly switch entry point for returning to host context.
701 * This function will clean up the stack frame.
702 *
703 * @param eax The return code, register.
704 * @param Ctx The guest core context.
705 * @remark Assume interrupts disabled.
706 */
707 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
708
709 /**
710 * Assembly switch entry point for returning to host context.
711 *
712 * This is an alternative entry point which we'll be using when the we have the
713 * hypervisor context and need to save that before going to the host.
714 *
715 * This is typically useful when abandoning the hypervisor because of a trap
716 * and want the trap state to be saved.
717 *
718 * @param eax The return code, register.
719 * @param ecx Pointer to the hypervisor core context, register.
720 * @remark Assume interrupts disabled.
721 */
722 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
723
724 /**
725 * Assembly switch entry point for returning to host context.
726 *
727 * This is an alternative to the two *Ctx APIs and implies that the context has already
728 * been saved, or that it's just a brief return to HC and that the caller intends to resume
729 * whatever it is doing upon 'return' from this call.
730 *
731 * @param eax The return code, register.
732 * @remark Assume interrupts disabled.
733 */
734 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
735 /** @} */
736
737
738 /** @name Various VM data owned by VM.
739 * @{ */
740 RTTHREAD uPadding1;
741 /** The native handle of ThreadEMT. Getting the native handle
742 * is generally faster than getting the IPRT one (except on OS/2 :-). */
743 RTNATIVETHREAD uPadding2;
744 /** @} */
745
746
747 /** @name Various items that are frequently accessed.
748 * @{ */
749 /** Raw ring-3 indicator. */
750 bool fRawR3Enabled;
751 /** Raw ring-0 indicator. */
752 bool fRawR0Enabled;
753 /** PATM enabled flag.
754 * This is placed here for performance reasons. */
755 bool fPATMEnabled;
756 /** CSAM enabled flag.
757 * This is placed here for performance reasons. */
758 bool fCSAMEnabled;
759 /** Hardware VM support is available and enabled.
760 * This is placed here for performance reasons. */
761 bool fHWACCMEnabled;
762 /** Hardware VM support is required and non-optional.
763 * This is initialized together with the rest of the VM structure. */
764 bool fHwVirtExtForced;
765 /** PARAV enabled flag. */
766 bool fPARAVEnabled;
767 /** @} */
768
769
770 /* padding to make gnuc put the StatQemuToGC where msc does. */
771#if HC_ARCH_BITS == 32
772 uint32_t padding0;
773#endif
774
775 /** Profiling the total time from Qemu to GC. */
776 STAMPROFILEADV StatTotalQemuToGC;
777 /** Profiling the total time from GC to Qemu. */
778 STAMPROFILEADV StatTotalGCToQemu;
779 /** Profiling the total time spent in GC. */
780 STAMPROFILEADV StatTotalInGC;
781 /** Profiling the total time spent not in Qemu. */
782 STAMPROFILEADV StatTotalInQemu;
783 /** Profiling the VMMSwitcher code for going to GC. */
784 STAMPROFILEADV StatSwitcherToGC;
785 /** Profiling the VMMSwitcher code for going to HC. */
786 STAMPROFILEADV StatSwitcherToHC;
787 STAMPROFILEADV StatSwitcherSaveRegs;
788 STAMPROFILEADV StatSwitcherSysEnter;
789 STAMPROFILEADV StatSwitcherDebug;
790 STAMPROFILEADV StatSwitcherCR0;
791 STAMPROFILEADV StatSwitcherCR4;
792 STAMPROFILEADV StatSwitcherJmpCR3;
793 STAMPROFILEADV StatSwitcherRstrRegs;
794 STAMPROFILEADV StatSwitcherLgdt;
795 STAMPROFILEADV StatSwitcherLidt;
796 STAMPROFILEADV StatSwitcherLldt;
797 STAMPROFILEADV StatSwitcherTSS;
798
799/** @todo Realign everything on 64 byte boundaries to better match the
800 * cache-line size. */
801 /* padding - the unions must be aligned on 32 bytes boundraries. */
802 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
803
804 /** CPUM part. */
805 union
806 {
807#ifdef ___CPUMInternal_h
808 struct CPUM s;
809#endif
810 char padding[2048]; /* multiple of 32 */
811 } cpum;
812
813 /** VMM part. */
814 union
815 {
816#ifdef ___VMMInternal_h
817 struct VMM s;
818#endif
819 char padding[1600]; /* multiple of 32 */
820 } vmm;
821
822 /** PGM part. */
823 union
824 {
825#ifdef ___PGMInternal_h
826 struct PGM s;
827#endif
828 char padding[16*1024]; /* multiple of 32 */
829 } pgm;
830
831 /** HWACCM part. */
832 union
833 {
834#ifdef ___HWACCMInternal_h
835 struct HWACCM s;
836#endif
837 char padding[8192]; /* multiple of 32 */
838 } hwaccm;
839
840 /** TRPM part. */
841 union
842 {
843#ifdef ___TRPMInternal_h
844 struct TRPM s;
845#endif
846 char padding[5344]; /* multiple of 32 */
847 } trpm;
848
849 /** SELM part. */
850 union
851 {
852#ifdef ___SELMInternal_h
853 struct SELM s;
854#endif
855 char padding[544]; /* multiple of 32 */
856 } selm;
857
858 /** MM part. */
859 union
860 {
861#ifdef ___MMInternal_h
862 struct MM s;
863#endif
864 char padding[192]; /* multiple of 32 */
865 } mm;
866
867 /** CFGM part. */
868 union
869 {
870#ifdef ___CFGMInternal_h
871 struct CFGM s;
872#endif
873 char padding[32]; /* multiple of 32 */
874 } cfgm;
875
876 /** PDM part. */
877 union
878 {
879#ifdef ___PDMInternal_h
880 struct PDM s;
881#endif
882 char padding[1824]; /* multiple of 32 */
883 } pdm;
884
885 /** IOM part. */
886 union
887 {
888#ifdef ___IOMInternal_h
889 struct IOM s;
890#endif
891 char padding[4544]; /* multiple of 32 */
892 } iom;
893
894 /** PATM part. */
895 union
896 {
897#ifdef ___PATMInternal_h
898 struct PATM s;
899#endif
900 char padding[768]; /* multiple of 32 */
901 } patm;
902
903 /** CSAM part. */
904 union
905 {
906#ifdef ___CSAMInternal_h
907 struct CSAM s;
908#endif
909 char padding[3328]; /* multiple of 32 */
910 } csam;
911
912 /** PARAV part. */
913 union
914 {
915#ifdef ___PARAVInternal_h
916 struct PARAV s;
917#endif
918 char padding[128];
919 } parav;
920
921 /** EM part. */
922 union
923 {
924#ifdef ___EMInternal_h
925 struct EM s;
926#endif
927 char padding[256]; /* multiple of 32 */
928 } em;
929
930 /** TM part. */
931 union
932 {
933#ifdef ___TMInternal_h
934 struct TM s;
935#endif
936 char padding[2112]; /* multiple of 32 */
937 } tm;
938
939 /** DBGF part. */
940 union
941 {
942#ifdef ___DBGFInternal_h
943 struct DBGF s;
944#endif
945 char padding[2368]; /* multiple of 32 */
946 } dbgf;
947
948 /** SSM part. */
949 union
950 {
951#ifdef ___SSMInternal_h
952 struct SSM s;
953#endif
954 char padding[32]; /* multiple of 32 */
955 } ssm;
956
957 /** VM part. */
958 union
959 {
960#ifdef ___VMInternal_h
961 struct VMINT s;
962#endif
963 char padding[768]; /* multiple of 32 */
964 } vm;
965
966 /** REM part. */
967 union
968 {
969#ifdef ___REMInternal_h
970 struct REM s;
971#endif
972
973/** @def VM_REM_SIZE
974 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
975# define VM_REM_SIZE 0x11100
976 char padding[VM_REM_SIZE]; /* multiple of 32 */
977 } rem;
978
979 /** Padding for aligning the cpu array on a 64 byte boundrary. */
980 uint32_t u32Reserved2[8];
981
982 /** VMCPU array for the configured number of virtual CPUs.
983 * Must be aligned on a 64-byte boundrary. */
984 VMCPU aCpus[1];
985} VM;
986
987
988#ifdef IN_RC
989RT_C_DECLS_BEGIN
990
991/** The VM structure.
992 * This is imported from the VMMGCBuiltin module, i.e. it's a one
993 * of those magic globals which we should avoid using.
994 */
995extern DECLIMPORT(VM) g_VM;
996
997RT_C_DECLS_END
998#endif
999
1000/** @} */
1001
1002#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette