VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 13841

Last change on this file since 13841 was 13832, checked in by vboxsync, 16 years ago

IN_GC -> IN_RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.7 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39
40
41/** @defgroup grp_vm The Virtual Machine
42 * @{
43 */
44
45/** Maximum number of virtual CPUs per VM. */
46#define VMCPU_MAX_CPU_COUNT 255
47
48/**
49 * The state of a virtual CPU.
50 *
51 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
52 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
53 */
54typedef enum VMCPUSTATE
55{
56 /** The customary invalid zero. */
57 VMCPUSTATE_INVALID = 0,
58
59 /** Running guest code (VM running). */
60 VMCPUSTATE_RUN_EXEC,
61 /** Running guest code in the recompiler (VM running). */
62 VMCPUSTATE_RUN_EXEC_REM,
63 /** Halted (VM running). */
64 VMCPUSTATE_RUN_HALTED,
65 /** All the other bits we do while running a VM (VM running). */
66 VMCPUSTATE_RUN_MISC,
67 /** VM not running, we're servicing requests or whatever. */
68 VMCPUSTATE_NOT_RUNNING,
69 /** The end of valid virtual CPU states. */
70 VMCPUSTATE_END,
71
72 /** Ensure 32-bit type. */
73 VMCPUSTATE_32BIT_HACK = 0x7fffffff
74} VMCPUSTATE;
75
76
77/**
78 * Per virtual CPU data.
79 */
80typedef struct VMCPU
81{
82 /** Per CPU forced action.
83 * See the VMCPU_FF_* \#defines. Updated atomically. */
84 uint32_t volatile fForcedActions;
85 /** The CPU state. */
86 VMCPUSTATE volatile enmState;
87
88 /** Ring-3 Host Context VM Pointer. */
89 PVMR3 pVMR3;
90 /** Ring-0 Host Context VM Pointer. */
91 PVMR0 pVMR0;
92 /** Raw-mode Context VM Pointer. */
93 PVMRC pVMRC;
94 /** The CPU ID.
95 * This is the index into the VM::aCpu array. */
96 VMCPUID idCpu;
97 /** The native thread handle. */
98 RTNATIVETHREAD hNativeThread;
99
100 /** Align the next bit on a 64-byte boundary. */
101 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 9 : 6];
102
103 /** CPUM part. */
104 union
105 {
106#ifdef ___CPUMInternal_h
107 struct CPUMCPU s;
108#endif
109 char padding[4096]; /* multiple of 32 */
110 } cpum;
111 /** VMM part. */
112 union
113 {
114#ifdef ___VMMInternal_h
115 struct VMMCPU s;
116#endif
117 char padding[32]; /* multiple of 32 */
118 } vmm;
119
120 /** PGM part. */
121 union
122 {
123#ifdef ___PGMInternal_h
124 struct PGMCPU s;
125#endif
126 char padding[32]; /* multiple of 32 */
127 } pgm;
128
129 /** HWACCM part. */
130 union
131 {
132#ifdef ___HWACCMInternal_h
133 struct HWACCMCPU s;
134#endif
135 char padding[32]; /* multiple of 32 */
136 } hwaccm;
137
138 /** EM part. */
139 union
140 {
141#ifdef ___EMInternal_h
142 struct EMCPU s;
143#endif
144 char padding[32]; /* multiple of 32 */
145 } em;
146
147 /** TM part. */
148 union
149 {
150#ifdef ___TMInternal_h
151 struct TMCPU s;
152#endif
153 char padding[32]; /* multiple of 32 */
154 } tm;
155} VMCPU;
156
157/** Pointer to a VMCPU. */
158#ifndef ___VBox_types_h
159typedef struct VMCPU *PVMCPU;
160#endif
161
162/** The name of the Guest Context VMM Core module. */
163#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
164/** The name of the Ring 0 Context VMM Core module. */
165#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
166
167/** VM Forced Action Flags.
168 *
169 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
170 * action mask of a VM.
171 *
172 * @{
173 */
174/** This action forces the VM to service check and pending interrups on the APIC. */
175#define VM_FF_INTERRUPT_APIC RT_BIT_32(0)
176/** This action forces the VM to service check and pending interrups on the PIC. */
177#define VM_FF_INTERRUPT_PIC RT_BIT_32(1)
178/** This action forces the VM to schedule and run pending timer (TM). */
179#define VM_FF_TIMER RT_BIT_32(2)
180/** PDM Queues are pending. */
181#define VM_FF_PDM_QUEUES RT_BIT_32(3)
182/** PDM DMA transfers are pending. */
183#define VM_FF_PDM_DMA RT_BIT_32(4)
184/** PDM critical section unlocking is pending, process promptly upon return to R3. */
185#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
186
187/** This action forces the VM to call DBGF so DBGF can service debugger
188 * requests in the emulation thread.
189 * This action flag stays asserted till DBGF clears it.*/
190#define VM_FF_DBGF RT_BIT_32(8)
191/** This action forces the VM to service pending requests from other
192 * thread or requests which must be executed in another context. */
193#define VM_FF_REQUEST RT_BIT_32(9)
194/** Terminate the VM immediately. */
195#define VM_FF_TERMINATE RT_BIT_32(10)
196/** Reset the VM. (postponed) */
197#define VM_FF_RESET RT_BIT_32(11)
198
199/** This action forces the VM to resync the page tables before going
200 * back to execute guest code. (GLOBAL FLUSH) */
201#define VM_FF_PGM_SYNC_CR3 RT_BIT_32(16)
202/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
203 * (NON-GLOBAL FLUSH) */
204#define VM_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
205/** PGM needs to allocate handy pages. */
206#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
207/** Check the interupt and trap gates */
208#define VM_FF_TRPM_SYNC_IDT RT_BIT_32(19)
209/** Check Guest's TSS ring 0 stack */
210#define VM_FF_SELM_SYNC_TSS RT_BIT_32(20)
211/** Check Guest's GDT table */
212#define VM_FF_SELM_SYNC_GDT RT_BIT_32(21)
213/** Check Guest's LDT table */
214#define VM_FF_SELM_SYNC_LDT RT_BIT_32(22)
215/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
216#define VM_FF_INHIBIT_INTERRUPTS RT_BIT_32(23)
217
218/** CSAM needs to scan the page that's being executed */
219#define VM_FF_CSAM_SCAN_PAGE RT_BIT_32(24)
220/** CSAM needs to do some homework. */
221#define VM_FF_CSAM_PENDING_ACTION RT_BIT_32(25)
222
223/** Force return to Ring-3. */
224#define VM_FF_TO_R3 RT_BIT_32(28)
225
226/** REM needs to be informed about handler changes. */
227#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
228
229/** Suspend the VM - debug only. */
230#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
231
232/** Externally forced actions. Used to quit the idle/wait loop. */
233#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
234/** Externally forced actions. Used to quit the idle/wait loop. */
235#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
236/** High priority pre-execution actions. */
237#define VM_FF_HIGH_PRIORITY_PRE_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
238 | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_PGM_NEED_HANDY_PAGES)
239/** High priority pre raw-mode execution mask. */
240#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_PGM_NEED_HANDY_PAGES \
241 | VM_FF_INHIBIT_INTERRUPTS)
242/** High priority post-execution actions. */
243#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_CSAM_PENDING_ACTION)
244/** Normal priority post-execution actions. */
245#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)
246/** Normal priority actions. */
247#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
248/** Flags to check before resuming guest execution. */
249#define VM_FF_RESUME_GUEST_MASK (VM_FF_TO_R3)
250/** All the forced flags. */
251#define VM_FF_ALL_MASK (~0U)
252/** All the forced flags. */
253#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_CSAM_PENDING_ACTION | VM_FF_PDM_CRITSECT))
254
255/** @} */
256
257/** @def VM_FF_SET
258 * Sets a force action flag.
259 *
260 * @param pVM VM Handle.
261 * @param fFlag The flag to set.
262 */
263#if 1
264# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag))
265#else
266# define VM_FF_SET(pVM, fFlag) \
267 do { ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag)); \
268 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
269 } while (0)
270#endif
271
272/** @def VMCPU_FF_SET
273 * Sets a force action flag for given VCPU.
274 *
275 * @param pVM VM Handle.
276 * @param idCpu Virtual CPU ID.
277 * @param fFlag The flag to set.
278 */
279#ifdef VBOX_WITH_SMP_GUESTS
280# define VMCPU_FF_SET(pVM, idCpu, fFlag) ASMAtomicOrU32(&(pVM)->aCpu[idCpu].fForcedActions, (fFlag))
281#else
282# define VMCPU_FF_SET(pVM, idCpu, fFlag) VM_FF_SET(pVM, fFlag)
283#endif
284
285/** @def VM_FF_CLEAR
286 * Clears a force action flag.
287 *
288 * @param pVM VM Handle.
289 * @param fFlag The flag to clear.
290 */
291#if 1
292# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag))
293#else
294# define VM_FF_CLEAR(pVM, fFlag) \
295 do { ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag)); \
296 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
297 } while (0)
298#endif
299
300/** @def VMCPU_FF_CLEAR
301 * Clears a force action flag for given VCPU.
302 *
303 * @param pVM VM Handle.
304 * @param idCpu Virtual CPU ID.
305 * @param fFlag The flag to clear.
306 */
307#ifdef VBOX_WITH_SMP_GUESTS
308# define VMCPU_FF_CLEAR(pVM, idCpu, fFlag) ASMAtomicAndU32(&(pVM)->aCpu[idCpu].fForcedActions, ~(fFlag))
309#else
310# define VMCPU_FF_CLEAR(pVM, idCpu, fFlag) VM_FF_CLEAR(pVM, fFlag)
311#endif
312
313/** @def VM_FF_ISSET
314 * Checks if a force action flag is set.
315 *
316 * @param pVM VM Handle.
317 * @param fFlag The flag to check.
318 */
319#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fForcedActions & (fFlag)) == (fFlag))
320
321/** @def VMCPU_FF_ISSET
322 * Checks if a force action flag is set for given VCPU.
323 *
324 * @param pVM VM Handle.
325 * @param idCpu Virtual CPU ID.
326 * @param fFlag The flag to check.
327 */
328#ifdef VBOX_WITH_SMP_GUESTS
329# define VMCPU_FF_ISSET(pVM, idCpu, fFlag) (((pVM)->aCpu[idCpu].fForcedActions & (fFlag)) == (fFlag))
330#else
331# define VMCPU_FF_ISSET(pVM, idCpu, fFlag) VM_FF_ISSET(pVM, fFlag)
332#endif
333
334/** @def VM_FF_ISPENDING
335 * Checks if one or more force action in the specified set is pending.
336 *
337 * @param pVM VM Handle.
338 * @param fFlags The flags to check for.
339 */
340#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fForcedActions & (fFlags))
341
342/** @def VMCPU_FF_ISPENDING
343 * Checks if one or more force action in the specified set is pending for given VCPU.
344 *
345 * @param pVM VM Handle.
346 * @param idCpu Virtual CPU ID.
347 * @param fFlags The flags to check for.
348 */
349#ifdef VBOX_WITH_SMP_GUESTS
350# define VMCPU_FF_ISPENDING(pVM, idCpu, fFlags) ((pVM)->aCpu[idCpu].fForcedActions & (fFlags))
351#else
352# define VMCPU_FF_ISPENDING(pVM, idCpu, fFlags) VM_FF_ISPENDING(pVM, fFlags)
353#endif
354
355/** @def VM_IS_EMT
356 * Checks if the current thread is the emulation thread (EMT).
357 *
358 * @remark The ring-0 variation will need attention if we expand the ring-0
359 * code to let threads other than EMT mess around with the VM.
360 */
361#ifdef IN_RC
362# define VM_IS_EMT(pVM) true
363#elif defined(IN_RING0)
364# define VM_IS_EMT(pVM) true
365#else
366/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
367# define VM_IS_EMT(pVM) (VMR3GetVMCPUNativeThread(pVM) == RTThreadNativeSelf())
368#endif
369
370/** @def VM_ASSERT_EMT
371 * Asserts that the current thread IS the emulation thread (EMT).
372 */
373#ifdef IN_RC
374# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
375#elif defined(IN_RING0)
376# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
377#else
378# define VM_ASSERT_EMT(pVM) \
379 AssertMsg(VM_IS_EMT(pVM), \
380 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
381#endif
382
383/** @def VM_ASSERT_EMT_RETURN
384 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
385 */
386#ifdef IN_RC
387# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
388#elif defined(IN_RING0)
389# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
390#else
391# define VM_ASSERT_EMT_RETURN(pVM, rc) \
392 AssertMsgReturn(VM_IS_EMT(pVM), \
393 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
394 (rc))
395#endif
396
397
398/** @def VM_GET_VMCPUID
399 * Returns the VMCPU id of the current EMT.
400 * @todo r=bird: See VMMGetCpuId().
401 */
402#ifdef IN_RC
403# define VM_GET_VMCPUID(pVM) 0
404#elif defined(IN_RING0)
405# define VM_GET_VMCPUID(pVM) HWACCMGetVMCPUId(pVM)
406#else
407# define VM_GET_VMCPUID(pVM) VMR3GetVMCPUId(pVM)
408#endif
409
410/**
411 * Asserts that the current thread is NOT the emulation thread.
412 */
413#define VM_ASSERT_OTHER_THREAD(pVM) \
414 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
415
416
417/** @def VM_ASSERT_STATE_RETURN
418 * Asserts a certain VM state.
419 */
420#define VM_ASSERT_STATE(pVM, _enmState) \
421 AssertMsg((pVM)->enmVMState == (_enmState), \
422 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
423
424/** @def VM_ASSERT_STATE_RETURN
425 * Asserts a certain VM state and returns if it doesn't match.
426 */
427#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
428 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
429 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
430 (rc))
431
432
433
434
435/** This is the VM structure.
436 *
437 * It contains (nearly?) all the VM data which have to be available in all
438 * contexts. Even if it contains all the data the idea is to use APIs not
439 * to modify all the members all around the place. Therefore we make use of
440 * unions to hide everything which isn't local to the current source module.
441 * This means we'll have to pay a little bit of attention when adding new
442 * members to structures in the unions and make sure to keep the padding sizes
443 * up to date.
444 *
445 * Run tstVMStructSize after update!
446 */
447typedef struct VM
448{
449 /** The state of the VM.
450 * This field is read only to everyone except the VM and EM. */
451 VMSTATE enmVMState;
452 /** Forced action flags.
453 * See the VM_FF_* \#defines. Updated atomically.
454 */
455 volatile uint32_t fForcedActions;
456 /** Pointer to the array of page descriptors for the VM structure allocation. */
457 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
458 /** Session handle. For use when calling SUPR0 APIs. */
459 PSUPDRVSESSION pSession;
460 /** Pointer to the ring-3 VM structure. */
461 PUVM pUVM;
462 /** Ring-3 Host Context VM Pointer. */
463 R3PTRTYPE(struct VM *) pVMR3;
464 /** Ring-0 Host Context VM Pointer. */
465 R0PTRTYPE(struct VM *) pVMR0;
466 /** Raw-mode Context VM Pointer. */
467 RCPTRTYPE(struct VM *) pVMRC;
468
469 /** The GVM VM handle. Only the GVM should modify this field. */
470 uint32_t hSelf;
471 /** Number of virtual CPUs. */
472 uint32_t cCPUs;
473
474 /** Size of the VM structure including the VMCPU array. */
475 uint32_t cbSelf;
476
477 /** Offset to the VMCPU array starting from beginning of this structure. */
478 uint32_t offVMCPU;
479
480 /** Reserved; alignment. */
481 uint32_t u32Reserved[6];
482
483 /** @name Public VMM Switcher APIs
484 * @{ */
485 /**
486 * Assembly switch entry point for returning to host context.
487 * This function will clean up the stack frame.
488 *
489 * @param eax The return code, register.
490 * @param Ctx The guest core context.
491 * @remark Assume interrupts disabled.
492 */
493 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
494
495 /**
496 * Assembly switch entry point for returning to host context.
497 *
498 * This is an alternative entry point which we'll be using when the we have the
499 * hypervisor context and need to save that before going to the host.
500 *
501 * This is typically useful when abandoning the hypervisor because of a trap
502 * and want the trap state to be saved.
503 *
504 * @param eax The return code, register.
505 * @param ecx Pointer to the hypervisor core context, register.
506 * @remark Assume interrupts disabled.
507 */
508 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
509
510 /**
511 * Assembly switch entry point for returning to host context.
512 *
513 * This is an alternative to the two *Ctx APIs and implies that the context has already
514 * been saved, or that it's just a brief return to HC and that the caller intends to resume
515 * whatever it is doing upon 'return' from this call.
516 *
517 * @param eax The return code, register.
518 * @remark Assume interrupts disabled.
519 */
520 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
521 /** @} */
522
523
524 /** @name Various VM data owned by VM.
525 * @{ */
526 RTTHREAD uPadding1;
527 /** The native handle of ThreadEMT. Getting the native handle
528 * is generally faster than getting the IPRT one (except on OS/2 :-). */
529 RTNATIVETHREAD uPadding2;
530 /** @} */
531
532
533 /** @name Various items that are frequently accessed.
534 * @{ */
535 /** Raw ring-3 indicator. */
536 bool fRawR3Enabled;
537 /** Raw ring-0 indicator. */
538 bool fRawR0Enabled;
539 /** PATM enabled flag.
540 * This is placed here for performance reasons. */
541 bool fPATMEnabled;
542 /** CSAM enabled flag.
543 * This is placed here for performance reasons. */
544 bool fCSAMEnabled;
545
546 /** Hardware VM support is available and enabled.
547 * This is placed here for performance reasons. */
548 bool fHWACCMEnabled;
549
550 /** PARAV enabled flag. */
551 bool fPARAVEnabled;
552 /** @} */
553
554
555 /* padding to make gnuc put the StatQemuToGC where msc does. */
556#if HC_ARCH_BITS == 32
557 uint32_t padding0;
558#endif
559
560 /** Profiling the total time from Qemu to GC. */
561 STAMPROFILEADV StatTotalQemuToGC;
562 /** Profiling the total time from GC to Qemu. */
563 STAMPROFILEADV StatTotalGCToQemu;
564 /** Profiling the total time spent in GC. */
565 STAMPROFILEADV StatTotalInGC;
566 /** Profiling the total time spent not in Qemu. */
567 STAMPROFILEADV StatTotalInQemu;
568 /** Profiling the VMMSwitcher code for going to GC. */
569 STAMPROFILEADV StatSwitcherToGC;
570 /** Profiling the VMMSwitcher code for going to HC. */
571 STAMPROFILEADV StatSwitcherToHC;
572 STAMPROFILEADV StatSwitcherSaveRegs;
573 STAMPROFILEADV StatSwitcherSysEnter;
574 STAMPROFILEADV StatSwitcherDebug;
575 STAMPROFILEADV StatSwitcherCR0;
576 STAMPROFILEADV StatSwitcherCR4;
577 STAMPROFILEADV StatSwitcherJmpCR3;
578 STAMPROFILEADV StatSwitcherRstrRegs;
579 STAMPROFILEADV StatSwitcherLgdt;
580 STAMPROFILEADV StatSwitcherLidt;
581 STAMPROFILEADV StatSwitcherLldt;
582 STAMPROFILEADV StatSwitcherTSS;
583
584/** @todo Realign everything on 64 byte boundraries to better match the
585 * cache-line size. */
586 /* padding - the unions must be aligned on 32 bytes boundraries. */
587 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
588
589 /** CPUM part. */
590 union
591 {
592#ifdef ___CPUMInternal_h
593 struct CPUM s;
594#endif
595 char padding[4416]; /* multiple of 32 */
596 } cpum;
597
598 /** VMM part. */
599 union
600 {
601#ifdef ___VMMInternal_h
602 struct VMM s;
603#endif
604 char padding[1536]; /* multiple of 32 */
605 } vmm;
606
607 /** PGM part. */
608 union
609 {
610#ifdef ___PGMInternal_h
611 struct PGM s;
612#endif
613 char padding[50*1024]; /* multiple of 32 */
614 } pgm;
615
616 /** HWACCM part. */
617 union
618 {
619#ifdef ___HWACCMInternal_h
620 struct HWACCM s;
621#endif
622 char padding[1536]; /* multiple of 32 */
623 } hwaccm;
624
625 /** TRPM part. */
626 union
627 {
628#ifdef ___TRPMInternal_h
629 struct TRPM s;
630#endif
631 char padding[5344]; /* multiple of 32 */
632 } trpm;
633
634 /** SELM part. */
635 union
636 {
637#ifdef ___SELMInternal_h
638 struct SELM s;
639#endif
640 char padding[544]; /* multiple of 32 */
641 } selm;
642
643 /** MM part. */
644 union
645 {
646#ifdef ___MMInternal_h
647 struct MM s;
648#endif
649 char padding[192]; /* multiple of 32 */
650 } mm;
651
652 /** CFGM part. */
653 union
654 {
655#ifdef ___CFGMInternal_h
656 struct CFGM s;
657#endif
658 char padding[32]; /* multiple of 32 */
659 } cfgm;
660
661 /** PDM part. */
662 union
663 {
664#ifdef ___PDMInternal_h
665 struct PDM s;
666#endif
667 char padding[1824]; /* multiple of 32 */
668 } pdm;
669
670 /** IOM part. */
671 union
672 {
673#ifdef ___IOMInternal_h
674 struct IOM s;
675#endif
676 char padding[4544]; /* multiple of 32 */
677 } iom;
678
679 /** PATM part. */
680 union
681 {
682#ifdef ___PATMInternal_h
683 struct PATM s;
684#endif
685 char padding[768]; /* multiple of 32 */
686 } patm;
687
688 /** CSAM part. */
689 union
690 {
691#ifdef ___CSAMInternal_h
692 struct CSAM s;
693#endif
694 char padding[3328]; /* multiple of 32 */
695 } csam;
696
697 /** PARAV part. */
698 union
699 {
700#ifdef ___PARAVInternal_h
701 struct PARAV s;
702#endif
703 char padding[128];
704 } parav;
705
706 /** EM part. */
707 union
708 {
709#ifdef ___EMInternal_h
710 struct EM s;
711#endif
712 char padding[1344]; /* multiple of 32 */
713 } em;
714
715 /** TM part. */
716 union
717 {
718#ifdef ___TMInternal_h
719 struct TM s;
720#endif
721 char padding[1536]; /* multiple of 32 */
722 } tm;
723
724 /** DBGF part. */
725 union
726 {
727#ifdef ___DBGFInternal_h
728 struct DBGF s;
729#endif
730 char padding[2368]; /* multiple of 32 */
731 } dbgf;
732
733 /** SSM part. */
734 union
735 {
736#ifdef ___SSMInternal_h
737 struct SSM s;
738#endif
739 char padding[32]; /* multiple of 32 */
740 } ssm;
741
742 /** VM part. */
743 union
744 {
745#ifdef ___VMInternal_h
746 struct VMINT s;
747#endif
748 char padding[768]; /* multiple of 32 */
749 } vm;
750
751 /** REM part. */
752 union
753 {
754#ifdef ___REMInternal_h
755 struct REM s;
756#endif
757
758#ifdef VBOX_WITH_NEW_RECOMPILER
759/** @def VM_REM_SIZE
760 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
761#if GC_ARCH_BITS == 32
762# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
763#else
764# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
765#endif
766#else /* !VBOX_WITH_NEW_RECOMILER */
767#if GC_ARCH_BITS == 32
768# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x6f00 : 0xbf00)
769#else
770# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x9f00 : 0xdf00)
771#endif
772#endif /* !VBOX_WITH_NEW_RECOMILER */
773 char padding[VM_REM_SIZE]; /* multiple of 32 */
774 } rem;
775
776 /** Padding for aligning the cpu array on a 64 byte boundrary. */
777 uint32_t u32Reserved2[8];
778
779 /** VMCPU array for the configured number of virtual CPUs.
780 * Must be aligned on a 64-byte boundrary. */
781 VMCPU aCpus[1];
782} VM;
783
784/** Pointer to a VM. */
785#ifndef ___VBox_types_h
786typedef struct VM *PVM;
787#endif
788
789
790#ifdef IN_RC
791__BEGIN_DECLS
792
793/** The VM structure.
794 * This is imported from the VMMGCBuiltin module, i.e. it's a one
795 * of those magic globals which we should avoid using.
796 */
797extern DECLIMPORT(VM) g_VM;
798
799__END_DECLS
800#endif
801
802/** @} */
803
804#endif
805
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette