VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 4506

Last change on this file since 4506 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 innotek GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License as published by the Free Software Foundation,
12 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
13 * distribution. VirtualBox OSE is distributed in the hope that it will
14 * be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16
17#ifndef ___VBox_vm_h
18#define ___VBox_vm_h
19
20#include <VBox/cdefs.h>
21#include <VBox/types.h>
22#include <VBox/cpum.h>
23#include <VBox/stam.h>
24#include <VBox/vmapi.h>
25#include <VBox/sup.h>
26
27
28/** @defgroup grp_vm The Virtual Machine
29 * @{
30 */
31
32/** The name of the Guest Context VMM Core module. */
33#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
34/** The name of the Ring 0 Context VMM Core module. */
35#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
36
37/** VM Forced Action Flags.
38 *
39 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
40 * action mask of a VM.
41 *
42 * @{
43 */
44/** This action forces the VM to service check and pending interrups on the APIC. */
45#define VM_FF_INTERRUPT_APIC BIT(0)
46/** This action forces the VM to service check and pending interrups on the PIC. */
47#define VM_FF_INTERRUPT_PIC BIT(1)
48/** This action forces the VM to schedule and run pending timer (TM). */
49#define VM_FF_TIMER BIT(2)
50/** PDM Queues are pending. */
51#define VM_FF_PDM_QUEUES BIT(3)
52/** PDM DMA transfers are pending. */
53#define VM_FF_PDM_DMA BIT(4)
54/** PDM critical section unlocking is pending, process promptly upon return to R3. */
55#define VM_FF_PDM_CRITSECT BIT(5)
56
57/** This action forces the VM to call DBGF so DBGF can service debugger
58 * requests in the emulation thread.
59 * This action flag stays asserted till DBGF clears it.*/
60#define VM_FF_DBGF BIT(8)
61/** This action forces the VM to service pending requests from other
62 * thread or requests which must be executed in another context. */
63#define VM_FF_REQUEST BIT(9)
64/** Terminate the VM immediately. */
65#define VM_FF_TERMINATE BIT(10)
66/** Reset the VM. (postponed) */
67#define VM_FF_RESET BIT(11)
68
69/** This action forces the VM to resync the page tables before going
70 * back to execute guest code. (GLOBAL FLUSH) */
71#define VM_FF_PGM_SYNC_CR3 BIT(16)
72/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
73 * (NON-GLOBAL FLUSH) */
74#define VM_FF_PGM_SYNC_CR3_NON_GLOBAL BIT(17)
75/** Check the interupt and trap gates */
76#define VM_FF_TRPM_SYNC_IDT BIT(18)
77/** Check Guest's TSS ring 0 stack */
78#define VM_FF_SELM_SYNC_TSS BIT(19)
79/** Check Guest's GDT table */
80#define VM_FF_SELM_SYNC_GDT BIT(20)
81/** Check Guest's LDT table */
82#define VM_FF_SELM_SYNC_LDT BIT(21)
83/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
84#define VM_FF_INHIBIT_INTERRUPTS BIT(22)
85
86/** CSAM needs to scan the page that's being executed */
87#define VM_FF_CSAM_SCAN_PAGE BIT(24)
88/** CSAM needs to do some homework. */
89#define VM_FF_CSAM_PENDING_ACTION BIT(25)
90
91/** Force return to Ring-3. */
92#define VM_FF_TO_R3 BIT(28)
93
94/** Suspend the VM - debug only. */
95#define VM_FF_DEBUG_SUSPEND BIT(31)
96
97/** Externally forced actions. Used to quit the idle/wait loop. */
98#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
99/** Externally forced actions. Used to quit the idle/wait loop. */
100#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
101/** High priority pre-execution actions. */
102#define VM_FF_HIGH_PRIORITY_PRE_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
103 | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT)
104/** High priority pre raw-mode execution mask. */
105#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_INHIBIT_INTERRUPTS)
106/** High priority post-execution actions. */
107#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT|VM_FF_CSAM_PENDING_ACTION)
108/** Normal priority post-execution actions. */
109#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)
110/** Normal priority actions. */
111#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
112/** Flags to check before resuming guest execution. */
113#define VM_FF_RESUME_GUEST_MASK (VM_FF_TO_R3)
114/** All the forced flags. */
115#define VM_FF_ALL_MASK (~0U)
116/** All the forced flags. */
117#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_CSAM_PENDING_ACTION | VM_FF_PDM_CRITSECT))
118
119/** @} */
120
121/** @def VM_FF_SET
122 * Sets a force action flag.
123 *
124 * @param pVM VM Handle.
125 * @param fFlag The flag to set.
126 */
127#if 1
128# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag))
129#else
130# define VM_FF_SET(pVM, fFlag) \
131 do { ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag)); \
132 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
133 } while (0)
134#endif
135
136/** @def VM_FF_CLEAR
137 * Clears a force action flag.
138 *
139 * @param pVM VM Handle.
140 * @param fFlag The flag to clear.
141 */
142#if 1
143# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag))
144#else
145# define VM_FF_CLEAR(pVM, fFlag) \
146 do { ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag)); \
147 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
148 } while (0)
149#endif
150
151/** @def VM_FF_ISSET
152 * Checks if a force action flag is set.
153 *
154 * @param pVM VM Handle.
155 * @param fFlag The flag to check.
156 */
157#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fForcedActions & (fFlag)) == (fFlag))
158
159/** @def VM_FF_ISPENDING
160 * Checks if one or more force action in the specified set is pending.
161 *
162 * @param pVM VM Handle.
163 * @param fFlags The flags to check for.
164 */
165#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fForcedActions & (fFlags))
166
167
168/** @def VM_IS_EMT
169 * Checks if the current thread is the emulation thread (EMT).
170 *
171 * @remark The ring-0 variation will need attention if we expand the ring-0
172 * code to let threads other than EMT mess around with the VM.
173 */
174#ifdef IN_GC
175# define VM_IS_EMT(pVM) true
176#elif defined(IN_RING0)
177# define VM_IS_EMT(pVM) true
178#else
179# define VM_IS_EMT(pVM) ((pVM)->NativeThreadEMT == RTThreadNativeSelf())
180#endif
181
182/** @def VM_ASSERT_EMT
183 * Asserts that the current thread IS the emulation thread (EMT).
184 */
185#ifdef IN_GC
186# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
187#elif defined(IN_RING0)
188# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
189#else
190# define VM_ASSERT_EMT(pVM) \
191 AssertMsg(VM_IS_EMT(pVM), \
192 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), pVM->NativeThreadEMT))
193#endif
194
195
196/**
197 * Asserts that the current thread is NOT the emulation thread.
198 */
199#define VM_ASSERT_OTHER_THREAD(pVM) \
200 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
201
202
203
204/** This is the VM structure.
205 *
206 * It contains (nearly?) all the VM data which have to be available in all
207 * contexts. Even if it contains all the data the idea is to use APIs not
208 * to modify all the members all around the place. Therefore we make use of
209 * unions to hide everything which isn't local to the current source module.
210 * This means we'll have to pay a little bit of attention when adding new
211 * members to structures in the unions and make sure to keep the padding sizes
212 * up to date.
213 *
214 * Run tstVMStructSize after update!
215 */
216typedef struct VM
217{
218 /** The state of the VM.
219 * This field is read only to everyone except the VM and EM. */
220 VMSTATE enmVMState;
221 /** Forced action flags.
222 * See the VM_FF_* \#defines. Updated atomically.
223 */
224 volatile uint32_t fForcedActions;
225 /** Pointer to the array of page descriptors for the VM structure allocation. */
226 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
227 /** Session handle. For use when calling SUPR0 APIs. */
228 HCPTRTYPE(PSUPDRVSESSION) pSession;
229 /** Pointer to the next VM.
230 * We keep a per process list of VM for the event that a process could
231 * contain more than one VM.
232 */
233 HCPTRTYPE(struct VM *) pNext;
234 /** Host Context VM Pointer.
235 * @obsolete don't use in new code! */
236 HCPTRTYPE(struct VM *) pVMHC;
237 /** Ring-3 Host Context VM Pointer. */
238 R3PTRTYPE(struct VM *) pVMR3;
239 /** Ring-0 Host Context VM Pointer. */
240 R0PTRTYPE(struct VM *) pVMR0;
241 /** Guest Context VM Pointer. */
242 GCPTRTYPE(struct VM *) pVMGC;
243
244 /** @name Public VMM Switcher APIs
245 * @{ */
246 /**
247 * Assembly switch entry point for returning to host context.
248 * This function will clean up the stack frame.
249 *
250 * @param eax The return code, register.
251 * @param Ctx The guest core context.
252 * @remark Assume interrupts disabled.
253 */
254 RTGCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
255
256 /**
257 * Assembly switch entry point for returning to host context.
258 *
259 * This is an alternative entry point which we'll be using when the we have the
260 * hypervisor context and need to save that before going to the host.
261 *
262 * This is typically useful when abandoning the hypervisor because of a trap
263 * and want the trap state to be saved.
264 *
265 * @param eax The return code, register.
266 * @param ecx Pointer to the hypervisor core context, register.
267 * @remark Assume interrupts disabled.
268 */
269 RTGCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
270
271 /**
272 * Assembly switch entry point for returning to host context.
273 *
274 * This is an alternative to the two *Ctx APIs and implies that the context has already
275 * been saved, or that it's just a brief return to HC and that the caller intends to resume
276 * whatever it is doing upon 'return' from this call.
277 *
278 * @param eax The return code, register.
279 * @remark Assume interrupts disabled.
280 */
281 RTGCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
282 /** @} */
283
284
285 /** @name Various VM data owned by VM.
286 * @{ */
287 /** The thread handle of the emulation thread.
288 * Use the VM_IS_EMT() macro to check if executing in EMT. */
289 RTTHREAD ThreadEMT;
290 /** The native handle of ThreadEMT. Getting the native handle
291 * is generally faster than getting the IPRT one (except on OS/2 :-). */
292 RTNATIVETHREAD NativeThreadEMT;
293 /** @} */
294
295
296 /** @name Various items that are frequently accessed.
297 * @{ */
298 /** Raw ring-3 indicator. */
299 bool fRawR3Enabled;
300 /** Raw ring-0 indicator. */
301 bool fRawR0Enabled;
302 /** PATM enabled flag.
303 * This is placed here for performance reasons. */
304 bool fPATMEnabled;
305 /** CSAM enabled flag.
306 * This is placed here for performance reasons. */
307 bool fCSAMEnabled;
308
309 /** Hardware VM support is available and enabled.
310 * This is placed here for performance reasons. */
311 bool fHWACCMEnabled;
312 /** @} */
313
314
315 /* padding to make gnuc put the StatQemuToGC where msc does. */
316/*#if HC_ARCH_BITS == 32
317 uint32_t padding0;
318#endif */
319
320 /** Profiling the total time from Qemu to GC. */
321 STAMPROFILEADV StatTotalQemuToGC;
322 /** Profiling the total time from GC to Qemu. */
323 STAMPROFILEADV StatTotalGCToQemu;
324 /** Profiling the total time spent in GC. */
325 STAMPROFILEADV StatTotalInGC;
326 /** Profiling the total time spent not in Qemu. */
327 STAMPROFILEADV StatTotalInQemu;
328 /** Profiling the VMMSwitcher code for going to GC. */
329 STAMPROFILEADV StatSwitcherToGC;
330 /** Profiling the VMMSwitcher code for going to HC. */
331 STAMPROFILEADV StatSwitcherToHC;
332 STAMPROFILEADV StatSwitcherSaveRegs;
333 STAMPROFILEADV StatSwitcherSysEnter;
334 STAMPROFILEADV StatSwitcherDebug;
335 STAMPROFILEADV StatSwitcherCR0;
336 STAMPROFILEADV StatSwitcherCR4;
337 STAMPROFILEADV StatSwitcherJmpCR3;
338 STAMPROFILEADV StatSwitcherRstrRegs;
339 STAMPROFILEADV StatSwitcherLgdt;
340 STAMPROFILEADV StatSwitcherLidt;
341 STAMPROFILEADV StatSwitcherLldt;
342 STAMPROFILEADV StatSwitcherTSS;
343
344 /* padding - the unions must be aligned on 32 bytes boundraries. */
345 uint32_t padding[HC_ARCH_BITS == 32 ? 6 : 6];
346
347 /** CPUM part. */
348 union
349 {
350#ifdef ___CPUMInternal_h
351 struct CPUM s;
352#endif
353#ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
354 char padding[3584]; /* multiple of 32 */
355#else
356 char padding[HC_ARCH_BITS == 32 ? 3424 : 3552]; /* multiple of 32 */
357#endif
358 } cpum;
359
360 /** VMM part. */
361 union
362 {
363#ifdef ___VMMInternal_h
364 struct VMM s;
365#endif
366 char padding[1024]; /* multiple of 32 */
367 } vmm;
368
369 /** PGM part. */
370 union
371 {
372#ifdef ___PGMInternal_h
373 struct PGM s;
374#endif
375 char padding[50*1024]; /* multiple of 32 */
376 } pgm;
377
378 /** HWACCM part. */
379 union
380 {
381#ifdef ___HWACCMInternal_h
382 struct HWACCM s;
383#endif
384 char padding[1024]; /* multiple of 32 */
385 } hwaccm;
386
387 /** TRPM part. */
388 union
389 {
390#ifdef ___TRPMInternal_h
391 struct TRPM s;
392#endif
393 char padding[5344]; /* multiple of 32 */
394 } trpm;
395
396 /** SELM part. */
397 union
398 {
399#ifdef ___SELMInternal_h
400 struct SELM s;
401#endif
402 char padding[544]; /* multiple of 32 */
403 } selm;
404
405 /** MM part. */
406 union
407 {
408#ifdef ___MMInternal_h
409 struct MM s;
410#endif
411 char padding[128]; /* multiple of 32 */
412 } mm;
413
414 /** CFGM part. */
415 union
416 {
417#ifdef ___CFGMInternal_h
418 struct CFGM s;
419#endif
420 char padding[32]; /* multiple of 32 */
421 } cfgm;
422
423 /** PDM part. */
424 union
425 {
426#ifdef ___PDMInternal_h
427 struct PDM s;
428#endif
429 char padding[1024]; /* multiple of 32 */
430 } pdm;
431
432 /** IOM part. */
433 union
434 {
435#ifdef ___IOMInternal_h
436 struct IOM s;
437#endif
438 char padding[4544]; /* multiple of 32 */
439 } iom;
440
441 /** PATM part. */
442 union
443 {
444#ifdef ___PATMInternal_h
445 struct PATM s;
446#endif
447 char padding[768]; /* multiple of 32 */
448 } patm;
449
450 /** CSAM part. */
451 union
452 {
453#ifdef ___CSAMInternal_h
454 struct CSAM s;
455#endif
456 char padding[3328]; /* multiple of 32 */
457 } csam;
458
459 /** EM part. */
460 union
461 {
462#ifdef ___EMInternal_h
463 struct EM s;
464#endif
465 char padding[1344]; /* multiple of 32 */
466 } em;
467
468 /** TM part. */
469 union
470 {
471#ifdef ___TMInternal_h
472 struct TM s;
473#endif
474 char padding[1280]; /* multiple of 32 */
475 } tm;
476
477 /** DBGF part. */
478 union
479 {
480#ifdef ___DBGFInternal_h
481 struct DBGF s;
482#endif
483 char padding[HC_ARCH_BITS == 32 ? 1888 : 1920]; /* multiple of 32 */
484 } dbgf;
485
486 /** STAM part. */
487 union
488 {
489#ifdef ___STAMInternal_h
490 struct STAM s;
491#endif
492 char padding[32]; /* multiple of 32 */
493 } stam;
494
495 /** SSM part. */
496 union
497 {
498#ifdef ___SSMInternal_h
499 struct SSM s;
500#endif
501 char padding[32]; /* multiple of 32 */
502 } ssm;
503
504 /** VM part. */
505 union
506 {
507#ifdef ___VMInternal_h
508 struct VMINT s;
509#endif
510 char padding[768]; /* multiple of 32 */
511 } vm;
512
513 /** REM part. */
514 union
515 {
516#ifdef ___REMInternal_h
517 struct REM s;
518#endif
519 char padding[HC_ARCH_BITS == 32 ? 0x6b00 : 0xbf00]; /* multiple of 32 */
520 } rem;
521} VM;
522
523/** Pointer to a VM. */
524#ifndef ___VBox_types_h
525typedef struct VM *PVM;
526#endif
527
528
529#ifdef IN_GC
530__BEGIN_DECLS
531
532/** The VM structure.
533 * This is imported from the VMMGCBuiltin module, i.e. it's a one
534 * of those magic globals which we should avoid using.
535 */
536extern DECLIMPORT(VM) g_VM;
537
538__END_DECLS
539#endif
540
541/** @} */
542
543#endif
544
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette