VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 3741

Last change on this file since 3741 was 3723, checked in by vboxsync, 17 years ago

Double underscore cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.9 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 innotek GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License as published by the Free Software Foundation,
12 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
13 * distribution. VirtualBox OSE is distributed in the hope that it will
14 * be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * If you received this file as part of a commercial VirtualBox
17 * distribution, then only the terms of your commercial VirtualBox
18 * license agreement apply instead of the previous paragraph.
19 */
20
21#ifndef ___VBox_vm_h
22#define ___VBox_vm_h
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/vmapi.h>
29#include <VBox/sup.h>
30
31
32/** @defgroup grp_vm The Virtual Machine
33 * @{
34 */
35
36/** The name of the Guest Context VMM Core module. */
37#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
38/** The name of the Ring 0 Context VMM Core module. */
39#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
40
41/** VM Forced Action Flags.
42 *
43 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
44 * action mask of a VM.
45 *
46 * @{
47 */
48/** This action forces the VM to service check and pending interrups on the APIC. */
49#define VM_FF_INTERRUPT_APIC BIT(0)
50/** This action forces the VM to service check and pending interrups on the PIC. */
51#define VM_FF_INTERRUPT_PIC BIT(1)
52/** This action forces the VM to schedule and run pending timer (TM). */
53#define VM_FF_TIMER BIT(2)
54/** PDM Queues are pending. */
55#define VM_FF_PDM_QUEUES BIT(3)
56/** PDM DMA transfers are pending. */
57#define VM_FF_PDM_DMA BIT(4)
58/** PDM critical section unlocking is pending, process promptly upon return to R3. */
59#define VM_FF_PDM_CRITSECT BIT(5)
60
61/** This action forces the VM to call DBGF so DBGF can service debugger
62 * requests in the emulation thread.
63 * This action flag stays asserted till DBGF clears it.*/
64#define VM_FF_DBGF BIT(8)
65/** This action forces the VM to service pending requests from other
66 * thread or requests which must be executed in another context. */
67#define VM_FF_REQUEST BIT(9)
68/** Terminate the VM immediately. */
69#define VM_FF_TERMINATE BIT(10)
70/** Reset the VM. (postponed) */
71#define VM_FF_RESET BIT(11)
72
73/** This action forces the VM to resync the page tables before going
74 * back to execute guest code. (GLOBAL FLUSH) */
75#define VM_FF_PGM_SYNC_CR3 BIT(16)
76/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
77 * (NON-GLOBAL FLUSH) */
78#define VM_FF_PGM_SYNC_CR3_NON_GLOBAL BIT(17)
79/** Check the interupt and trap gates */
80#define VM_FF_TRPM_SYNC_IDT BIT(18)
81/** Check Guest's TSS ring 0 stack */
82#define VM_FF_SELM_SYNC_TSS BIT(19)
83/** Check Guest's GDT table */
84#define VM_FF_SELM_SYNC_GDT BIT(20)
85/** Check Guest's LDT table */
86#define VM_FF_SELM_SYNC_LDT BIT(21)
87/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
88#define VM_FF_INHIBIT_INTERRUPTS BIT(22)
89
90/** CSAM needs to scan the page that's being executed */
91#define VM_FF_CSAM_SCAN_PAGE BIT(24)
92/** CSAM needs to do some homework. */
93#define VM_FF_CSAM_PENDING_ACTION BIT(25)
94
95/** Force return to Ring-3. */
96#define VM_FF_TO_R3 BIT(28)
97
98/** Suspend the VM - debug only. */
99#define VM_FF_DEBUG_SUSPEND BIT(31)
100
101/** Externally forced actions. Used to quit the idle/wait loop. */
102#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
103/** Externally forced actions. Used to quit the idle/wait loop. */
104#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
105/** High priority pre-execution actions. */
106#define VM_FF_HIGH_PRIORITY_PRE_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
107 | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT)
108/** High priority pre raw-mode execution mask. */
109#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_INHIBIT_INTERRUPTS)
110/** High priority post-execution actions. */
111#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT|VM_FF_CSAM_PENDING_ACTION)
112/** Normal priority post-execution actions. */
113#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)
114/** Normal priority actions. */
115#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
116/** Flags to check before resuming guest execution. */
117#define VM_FF_RESUME_GUEST_MASK (VM_FF_TO_R3)
118/** All the forced flags. */
119#define VM_FF_ALL_MASK (~0U)
120/** All the forced flags. */
121#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_CSAM_PENDING_ACTION | VM_FF_PDM_CRITSECT))
122
123/** @} */
124
125/** @def VM_FF_SET
126 * Sets a force action flag.
127 *
128 * @param pVM VM Handle.
129 * @param fFlag The flag to set.
130 */
131#if 1
132# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag))
133#else
134# define VM_FF_SET(pVM, fFlag) \
135 do { ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag)); \
136 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
137 } while (0)
138#endif
139
140/** @def VM_FF_CLEAR
141 * Clears a force action flag.
142 *
143 * @param pVM VM Handle.
144 * @param fFlag The flag to clear.
145 */
146#if 1
147# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag))
148#else
149# define VM_FF_CLEAR(pVM, fFlag) \
150 do { ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag)); \
151 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
152 } while (0)
153#endif
154
155/** @def VM_FF_ISSET
156 * Checks if a force action flag is set.
157 *
158 * @param pVM VM Handle.
159 * @param fFlag The flag to check.
160 */
161#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fForcedActions & (fFlag)) == (fFlag))
162
163/** @def VM_FF_ISPENDING
164 * Checks if one or more force action in the specified set is pending.
165 *
166 * @param pVM VM Handle.
167 * @param fFlags The flags to check for.
168 */
169#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fForcedActions & (fFlags))
170
171
172/** @def VM_IS_EMT
173 * Checks if the current thread is the emulation thread (EMT).
174 *
175 * @remark The ring-0 variation will need attention if we expand the ring-0
176 * code to let threads other than EMT mess around with the VM.
177 */
178#ifdef IN_GC
179# define VM_IS_EMT(pVM) true
180#elif defined(IN_RING0)
181# define VM_IS_EMT(pVM) true
182#else
183# define VM_IS_EMT(pVM) ((pVM)->NativeThreadEMT == RTThreadNativeSelf())
184#endif
185
186/** @def VM_ASSERT_EMT
187 * Asserts that the current thread IS the emulation thread (EMT).
188 */
189#ifdef IN_GC
190# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
191#elif defined(IN_RING0)
192# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
193#else
194# define VM_ASSERT_EMT(pVM) \
195 AssertMsg(VM_IS_EMT(pVM), \
196 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), pVM->NativeThreadEMT))
197#endif
198
199
200/**
201 * Asserts that the current thread is NOT the emulation thread.
202 */
203#define VM_ASSERT_OTHER_THREAD(pVM) \
204 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
205
206
207
208/** This is the VM structure.
209 *
210 * It contains (nearly?) all the VM data which have to be available in all
211 * contexts. Even if it contains all the data the idea is to use APIs not
212 * to modify all the members all around the place. Therefore we make use of
213 * unions to hide everything which isn't local to the current source module.
214 * This means we'll have to pay a little bit of attention when adding new
215 * members to structures in the unions and make sure to keep the padding sizes
216 * up to date.
217 *
218 * Run tstVMStructSize after update!
219 */
220typedef struct VM
221{
222 /** The state of the VM.
223 * This field is read only to everyone except the VM and EM. */
224 VMSTATE enmVMState;
225 /** Forced action flags.
226 * See the VM_FF_* \#defines. Updated atomically.
227 */
228 volatile uint32_t fForcedActions;
229 /** Pointer to the array of page descriptors for the VM structure allocation. */
230 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
231 /** Session handle. For use when calling SUPR0 APIs. */
232 HCPTRTYPE(PSUPDRVSESSION) pSession;
233 /** Pointer to the next VM.
234 * We keep a per process list of VM for the event that a process could
235 * contain more than one VM.
236 */
237 HCPTRTYPE(struct VM *) pNext;
238 /** Host Context VM Pointer.
239 * @obsolete don't use in new code! */
240 HCPTRTYPE(struct VM *) pVMHC;
241 /** Ring-3 Host Context VM Pointer. */
242 R3PTRTYPE(struct VM *) pVMR3;
243 /** Ring-0 Host Context VM Pointer. */
244 R0PTRTYPE(struct VM *) pVMR0;
245 /** Guest Context VM Pointer. */
246 GCPTRTYPE(struct VM *) pVMGC;
247
248 /** @name Public VMM Switcher APIs
249 * @{ */
250 /**
251 * Assembly switch entry point for returning to host context.
252 * This function will clean up the stack frame.
253 *
254 * @param eax The return code, register.
255 * @param Ctx The guest core context.
256 * @remark Assume interrupts disabled.
257 */
258 RTGCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
259
260 /**
261 * Assembly switch entry point for returning to host context.
262 *
263 * This is an alternative entry point which we'll be using when the we have the
264 * hypervisor context and need to save that before going to the host.
265 *
266 * This is typically useful when abandoning the hypervisor because of a trap
267 * and want the trap state to be saved.
268 *
269 * @param eax The return code, register.
270 * @param ecx Pointer to the hypervisor core context, register.
271 * @remark Assume interrupts disabled.
272 */
273 RTGCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
274
275 /**
276 * Assembly switch entry point for returning to host context.
277 *
278 * This is an alternative to the two *Ctx APIs and implies that the context has already
279 * been saved, or that it's just a brief return to HC and that the caller intends to resume
280 * whatever it is doing upon 'return' from this call.
281 *
282 * @param eax The return code, register.
283 * @remark Assume interrupts disabled.
284 */
285 RTGCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
286 /** @} */
287
288
289 /** @name Various VM data owned by VM.
290 * @{ */
291 /** The thread handle of the emulation thread.
292 * Use the VM_IS_EMT() macro to check if executing in EMT. */
293 RTTHREAD ThreadEMT;
294 /** The native handle of ThreadEMT. Getting the native handle
295 * is generally faster than getting the IPRT one (except on OS/2 :-). */
296 RTNATIVETHREAD NativeThreadEMT;
297 /** @} */
298
299
300 /** @name Various items that are frequently accessed.
301 * @{ */
302 /** Raw ring-3 indicator. */
303 bool fRawR3Enabled;
304 /** Raw ring-0 indicator. */
305 bool fRawR0Enabled;
306 /** PATM enabled flag.
307 * This is placed here for performance reasons. */
308 bool fPATMEnabled;
309 /** CSAM enabled flag.
310 * This is placed here for performance reasons. */
311 bool fCSAMEnabled;
312
313 /** Hardware VM support is available and enabled.
314 * This is placed here for performance reasons. */
315 bool fHWACCMEnabled;
316 /** @} */
317
318
319 /* padding to make gnuc put the StatQemuToGC where msc does. */
320/*#if HC_ARCH_BITS == 32
321 uint32_t padding0;
322#endif */
323
324 /** Profiling the total time from Qemu to GC. */
325 STAMPROFILEADV StatTotalQemuToGC;
326 /** Profiling the total time from GC to Qemu. */
327 STAMPROFILEADV StatTotalGCToQemu;
328 /** Profiling the total time spent in GC. */
329 STAMPROFILEADV StatTotalInGC;
330 /** Profiling the total time spent not in Qemu. */
331 STAMPROFILEADV StatTotalInQemu;
332 /** Profiling the VMMSwitcher code for going to GC. */
333 STAMPROFILEADV StatSwitcherToGC;
334 /** Profiling the VMMSwitcher code for going to HC. */
335 STAMPROFILEADV StatSwitcherToHC;
336 STAMPROFILEADV StatSwitcherSaveRegs;
337 STAMPROFILEADV StatSwitcherSysEnter;
338 STAMPROFILEADV StatSwitcherDebug;
339 STAMPROFILEADV StatSwitcherCR0;
340 STAMPROFILEADV StatSwitcherCR4;
341 STAMPROFILEADV StatSwitcherJmpCR3;
342 STAMPROFILEADV StatSwitcherRstrRegs;
343 STAMPROFILEADV StatSwitcherLgdt;
344 STAMPROFILEADV StatSwitcherLidt;
345 STAMPROFILEADV StatSwitcherLldt;
346 STAMPROFILEADV StatSwitcherTSS;
347
348 /* padding - the unions must be aligned on 32 bytes boundraries. */
349 uint32_t padding[HC_ARCH_BITS == 32 ? 6 : 6];
350
351 /** CPUM part. */
352 union
353 {
354#ifdef ___CPUMInternal_h
355 struct CPUM s;
356#endif
357#ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
358 char padding[3584]; /* multiple of 32 */
359#else
360 char padding[HC_ARCH_BITS == 32 ? 3424 : 3552]; /* multiple of 32 */
361#endif
362 } cpum;
363
364 /** VMM part. */
365 union
366 {
367#ifdef ___VMMInternal_h
368 struct VMM s;
369#endif
370 char padding[1024]; /* multiple of 32 */
371 } vmm;
372
373 /** PGM part. */
374 union
375 {
376#ifdef ___PGMInternal_h
377 struct PGM s;
378#endif
379 char padding[50*1024]; /* multiple of 32 */
380 } pgm;
381
382 /** HWACCM part. */
383 union
384 {
385#ifdef ___HWACCMInternal_h
386 struct HWACCM s;
387#endif
388 char padding[1024]; /* multiple of 32 */
389 } hwaccm;
390
391 /** TRPM part. */
392 union
393 {
394#ifdef ___TRPMInternal_h
395 struct TRPM s;
396#endif
397 char padding[5344]; /* multiple of 32 */
398 } trpm;
399
400 /** SELM part. */
401 union
402 {
403#ifdef ___SELMInternal_h
404 struct SELM s;
405#endif
406 char padding[544]; /* multiple of 32 */
407 } selm;
408
409 /** MM part. */
410 union
411 {
412#ifdef ___MMInternal_h
413 struct MM s;
414#endif
415 char padding[128]; /* multiple of 32 */
416 } mm;
417
418 /** CFGM part. */
419 union
420 {
421#ifdef ___CFGMInternal_h
422 struct CFGM s;
423#endif
424 char padding[32]; /* multiple of 32 */
425 } cfgm;
426
427 /** PDM part. */
428 union
429 {
430#ifdef ___PDMInternal_h
431 struct PDM s;
432#endif
433 char padding[1024]; /* multiple of 32 */
434 } pdm;
435
436 /** IOM part. */
437 union
438 {
439#ifdef ___IOMInternal_h
440 struct IOM s;
441#endif
442 char padding[4544]; /* multiple of 32 */
443 } iom;
444
445 /** PATM part. */
446 union
447 {
448#ifdef ___PATMInternal_h
449 struct PATM s;
450#endif
451 char padding[768]; /* multiple of 32 */
452 } patm;
453
454 /** CSAM part. */
455 union
456 {
457#ifdef ___CSAMInternal_h
458 struct CSAM s;
459#endif
460 char padding[3328]; /* multiple of 32 */
461 } csam;
462
463 /** EM part. */
464 union
465 {
466#ifdef ___EMInternal_h
467 struct EM s;
468#endif
469 char padding[1344]; /* multiple of 32 */
470 } em;
471
472 /** TM part. */
473 union
474 {
475#ifdef ___TMInternal_h
476 struct TM s;
477#endif
478 char padding[1280]; /* multiple of 32 */
479 } tm;
480
481 /** DBGF part. */
482 union
483 {
484#ifdef ___DBGFInternal_h
485 struct DBGF s;
486#endif
487 char padding[HC_ARCH_BITS == 32 ? 1888 : 1920]; /* multiple of 32 */
488 } dbgf;
489
490 /** STAM part. */
491 union
492 {
493#ifdef ___STAMInternal_h
494 struct STAM s;
495#endif
496 char padding[32]; /* multiple of 32 */
497 } stam;
498
499 /** SSM part. */
500 union
501 {
502#ifdef ___SSMInternal_h
503 struct SSM s;
504#endif
505 char padding[32]; /* multiple of 32 */
506 } ssm;
507
508 /** VM part. */
509 union
510 {
511#ifdef ___VMInternal_h
512 struct VMINT s;
513#endif
514 char padding[768]; /* multiple of 32 */
515 } vm;
516
517 /** REM part. */
518 union
519 {
520#ifdef ___REMInternal_h
521 struct REM s;
522#endif
523 char padding[HC_ARCH_BITS == 32 ? 0x6b00 : 0xbf00]; /* multiple of 32 */
524 } rem;
525} VM;
526
527/** Pointer to a VM. */
528#ifndef ___VBox_types_h
529typedef struct VM *PVM;
530#endif
531
532
533#ifdef IN_GC
534__BEGIN_DECLS
535
536/** The VM structure.
537 * This is imported from the VMMGCBuiltin module, i.e. it's a one
538 * of those magic globals which we should avoid using.
539 */
540extern DECLIMPORT(VM) g_VM;
541
542__END_DECLS
543#endif
544
545/** @} */
546
547#endif
548
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette