VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 80003

Last change on this file since 80003 was 80003, checked in by vboxsync, 5 years ago

VMM: Kicking out raw-mode (work in progress). bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.8 KB
Line 
1/* $Id: VMMInternal.h 80003 2019-07-26 13:37:47Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
19#define VMM_INCLUDED_SRC_include_VMMInternal_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/sup.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/vmm.h>
28#include <VBox/log.h>
29#include <iprt/critsect.h>
30
31#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
32# error "Not in VMM! This is an internal header!"
33#endif
34#if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32
35# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
36#endif
37
38
39
40/** @defgroup grp_vmm_int Internals
41 * @ingroup grp_vmm
42 * @internal
43 * @{
44 */
45
46/** @def VBOX_WITH_RC_RELEASE_LOGGING
47 * Enables RC release logging. */
48#define VBOX_WITH_RC_RELEASE_LOGGING
49
50/** @def VBOX_WITH_R0_LOGGING
51 * Enables Ring-0 logging (non-release).
52 *
53 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
54 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
55 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
56 */
57#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
58# define VBOX_WITH_R0_LOGGING
59#endif
60
61/** @def VBOX_STRICT_VMM_STACK
62 * Enables VMM stack guard pages to catch stack over- and underruns. */
63#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
64# define VBOX_STRICT_VMM_STACK
65#endif
66
67
68/**
69 * The ring-0 logger instance wrapper.
70 *
71 * We need to be able to find the VM handle from the logger instance, so we wrap
72 * it in this structure.
73 */
74typedef struct VMMR0LOGGER
75{
76 /** Pointer to Pointer to the VM. */
77 R0PTRTYPE(PVM) pVM;
78 /** Size of the allocated logger instance (Logger). */
79 uint32_t cbLogger;
80 /** Flag indicating whether we've create the logger Ring-0 instance yet. */
81 bool fCreated;
82 /** Flag indicating whether we've disabled flushing (world switch) or not. */
83 bool fFlushingDisabled;
84 /** Flag indicating whether we've registered the instance already. */
85 bool fRegistered;
86 bool a8Alignment;
87 /** The CPU ID. */
88 VMCPUID idCpu;
89#if HC_ARCH_BITS == 64
90 uint32_t u32Alignment;
91#endif
92 /** The ring-0 logger instance. This extends beyond the size. */
93 RTLOGGER Logger;
94} VMMR0LOGGER;
95/** Pointer to a ring-0 logger instance wrapper. */
96typedef VMMR0LOGGER *PVMMR0LOGGER;
97
98
99/**
100 * Jump buffer for the setjmp/longjmp like constructs used to
101 * quickly 'call' back into Ring-3.
102 */
103typedef struct VMMR0JMPBUF
104{
105 /** Traditional jmp_buf stuff
106 * @{ */
107#if HC_ARCH_BITS == 32
108 uint32_t ebx;
109 uint32_t esi;
110 uint32_t edi;
111 uint32_t ebp;
112 uint32_t esp;
113 uint32_t eip;
114 uint32_t eflags;
115#endif
116#if HC_ARCH_BITS == 64
117 uint64_t rbx;
118# ifdef RT_OS_WINDOWS
119 uint64_t rsi;
120 uint64_t rdi;
121# endif
122 uint64_t rbp;
123 uint64_t r12;
124 uint64_t r13;
125 uint64_t r14;
126 uint64_t r15;
127 uint64_t rsp;
128 uint64_t rip;
129# ifdef RT_OS_WINDOWS
130 uint128_t xmm6;
131 uint128_t xmm7;
132 uint128_t xmm8;
133 uint128_t xmm9;
134 uint128_t xmm10;
135 uint128_t xmm11;
136 uint128_t xmm12;
137 uint128_t xmm13;
138 uint128_t xmm14;
139 uint128_t xmm15;
140# endif
141 uint64_t rflags;
142#endif
143 /** @} */
144
145 /** Flag that indicates that we've done a ring-3 call. */
146 bool fInRing3Call;
147 /** The number of bytes we've saved. */
148 uint32_t cbSavedStack;
149 /** Pointer to the buffer used to save the stack.
150 * This is assumed to be 8KB. */
151 RTR0PTR pvSavedStack;
152 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
153 RTHCUINTREG SpCheck;
154 /** The esp we should resume execution with after the restore. */
155 RTHCUINTREG SpResume;
156 /** ESP/RSP at the time of the jump to ring 3. */
157 RTHCUINTREG SavedEsp;
158 /** EBP/RBP at the time of the jump to ring 3. */
159 RTHCUINTREG SavedEbp;
160 /** EIP/RIP within vmmR0CallRing3LongJmp for assisting unwinding. */
161 RTHCUINTREG SavedEipForUnwind;
162 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
163 RTHCUINTREG UnwindRetPcValue;
164 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
165 RTHCUINTREG UnwindRetPcLocation;
166#if HC_ARCH_BITS == 32
167 /** Alignment padding. */
168 uint32_t uPadding;
169#endif
170
171 /** Stats: Max amount of stack used. */
172 uint32_t cbUsedMax;
173 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
174 uint32_t cbUsedAvg;
175 /** Stats: Total amount of stack used. */
176 uint64_t cbUsedTotal;
177 /** Stats: Number of stack usages. */
178 uint64_t cUsedTotal;
179} VMMR0JMPBUF;
180/** Pointer to a ring-0 jump buffer. */
181typedef VMMR0JMPBUF *PVMMR0JMPBUF;
182
183
184/**
185 * VMM Data (part of VM)
186 */
187typedef struct VMM
188{
189 /** Whether we should use the periodic preemption timers. */
190 bool fUsePeriodicPreemptionTimers;
191 /** Alignment padding. */
192 bool afPadding0[7];
193
194 /** The EMT yield timer. */
195 PTMTIMERR3 pYieldTimer;
196 /** The period to the next timeout when suspended or stopped.
197 * This is 0 when running. */
198 uint32_t cYieldResumeMillies;
199 /** The EMT yield timer interval (milliseconds). */
200 uint32_t cYieldEveryMillies;
201 /** The timestamp of the previous yield. (nano) */
202 uint64_t u64LastYield;
203
204 /** @name EMT Rendezvous
205 * @{ */
206 /** Semaphore to wait on upon entering ordered execution. */
207 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
208 /** Semaphore to wait on upon entering for one-by-one execution. */
209 RTSEMEVENT hEvtRendezvousEnterOneByOne;
210 /** Semaphore to wait on upon entering for all-at-once execution. */
211 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
212 /** Semaphore to wait on when done. */
213 RTSEMEVENTMULTI hEvtMulRendezvousDone;
214 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
215 RTSEMEVENT hEvtRendezvousDoneCaller;
216 /** Semaphore to wait on upon recursing. */
217 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
218 /** Semaphore to wait on after done with recursion (caller restoring state). */
219 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
220 /** Semaphore the initiator waits on while the EMTs are getting into position
221 * on hEvtMulRendezvousRecursionPush. */
222 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
223 /** Semaphore the initiator waits on while the EMTs sitting on
224 * hEvtMulRendezvousRecursionPop wakes up and leave. */
225 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
226 /** Callback. */
227 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
228 /** The user argument for the callback. */
229 RTR3PTR volatile pvRendezvousUser;
230 /** Flags. */
231 volatile uint32_t fRendezvousFlags;
232 /** The number of EMTs that has entered. */
233 volatile uint32_t cRendezvousEmtsEntered;
234 /** The number of EMTs that has done their job. */
235 volatile uint32_t cRendezvousEmtsDone;
236 /** The number of EMTs that has returned. */
237 volatile uint32_t cRendezvousEmtsReturned;
238 /** The status code. */
239 volatile int32_t i32RendezvousStatus;
240 /** Spin lock. */
241 volatile uint32_t u32RendezvousLock;
242 /** The recursion depth. */
243 volatile uint32_t cRendezvousRecursions;
244 /** The number of EMTs that have entered the recursion routine. */
245 volatile uint32_t cRendezvousEmtsRecursingPush;
246 /** The number of EMTs that have leaft the recursion routine. */
247 volatile uint32_t cRendezvousEmtsRecursingPop;
248 /** Triggers rendezvous recursion in the other threads. */
249 volatile bool fRendezvousRecursion;
250
251 /** @} */
252
253 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
254 * release logging purposes. */
255 bool fIsPreemptPendingApiTrusty : 1;
256 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
257 * release logging purposes. */
258 bool fIsPreemptPossible : 1;
259
260 bool afAlignment2[2]; /**< Alignment padding. */
261
262 /** Buffer for storing the standard assertion message for a ring-0 assertion.
263 * Used for saving the assertion message text for the release log and guru
264 * meditation dump. */
265 char szRing0AssertMsg1[512];
266 /** Buffer for storing the custom message for a ring-0 assertion. */
267 char szRing0AssertMsg2[256];
268
269 /** Number of VMMR0_DO_HM_RUN calls. */
270 STAMCOUNTER StatRunRC;
271
272 /** Statistics for each of the RC/R0 return codes.
273 * @{ */
274 STAMCOUNTER StatRZRetNormal;
275 STAMCOUNTER StatRZRetInterrupt;
276 STAMCOUNTER StatRZRetInterruptHyper;
277 STAMCOUNTER StatRZRetGuestTrap;
278 STAMCOUNTER StatRZRetRingSwitch;
279 STAMCOUNTER StatRZRetRingSwitchInt;
280 STAMCOUNTER StatRZRetStaleSelector;
281 STAMCOUNTER StatRZRetIRETTrap;
282 STAMCOUNTER StatRZRetEmulate;
283 STAMCOUNTER StatRZRetPatchEmulate;
284 STAMCOUNTER StatRZRetIORead;
285 STAMCOUNTER StatRZRetIOWrite;
286 STAMCOUNTER StatRZRetIOCommitWrite;
287 STAMCOUNTER StatRZRetMMIORead;
288 STAMCOUNTER StatRZRetMMIOWrite;
289 STAMCOUNTER StatRZRetMMIOCommitWrite;
290 STAMCOUNTER StatRZRetMMIOPatchRead;
291 STAMCOUNTER StatRZRetMMIOPatchWrite;
292 STAMCOUNTER StatRZRetMMIOReadWrite;
293 STAMCOUNTER StatRZRetMSRRead;
294 STAMCOUNTER StatRZRetMSRWrite;
295 STAMCOUNTER StatRZRetLDTFault;
296 STAMCOUNTER StatRZRetGDTFault;
297 STAMCOUNTER StatRZRetIDTFault;
298 STAMCOUNTER StatRZRetTSSFault;
299 STAMCOUNTER StatRZRetCSAMTask;
300 STAMCOUNTER StatRZRetSyncCR3;
301 STAMCOUNTER StatRZRetMisc;
302 STAMCOUNTER StatRZRetPatchInt3;
303 STAMCOUNTER StatRZRetPatchPF;
304 STAMCOUNTER StatRZRetPatchGP;
305 STAMCOUNTER StatRZRetPatchIretIRQ;
306 STAMCOUNTER StatRZRetRescheduleREM;
307 STAMCOUNTER StatRZRetToR3Total;
308 STAMCOUNTER StatRZRetToR3FF;
309 STAMCOUNTER StatRZRetToR3Unknown;
310 STAMCOUNTER StatRZRetToR3TMVirt;
311 STAMCOUNTER StatRZRetToR3HandyPages;
312 STAMCOUNTER StatRZRetToR3PDMQueues;
313 STAMCOUNTER StatRZRetToR3Rendezvous;
314 STAMCOUNTER StatRZRetToR3Timer;
315 STAMCOUNTER StatRZRetToR3DMA;
316 STAMCOUNTER StatRZRetToR3CritSect;
317 STAMCOUNTER StatRZRetToR3Iem;
318 STAMCOUNTER StatRZRetToR3Iom;
319 STAMCOUNTER StatRZRetTimerPending;
320 STAMCOUNTER StatRZRetInterruptPending;
321 STAMCOUNTER StatRZRetCallRing3;
322 STAMCOUNTER StatRZRetPATMDuplicateFn;
323 STAMCOUNTER StatRZRetPGMChangeMode;
324 STAMCOUNTER StatRZRetPendingRequest;
325 STAMCOUNTER StatRZRetPGMFlushPending;
326 STAMCOUNTER StatRZRetPatchTPR;
327 STAMCOUNTER StatRZCallPDMCritSectEnter;
328 STAMCOUNTER StatRZCallPDMLock;
329 STAMCOUNTER StatRZCallLogFlush;
330 STAMCOUNTER StatRZCallPGMPoolGrow;
331 STAMCOUNTER StatRZCallPGMMapChunk;
332 STAMCOUNTER StatRZCallPGMAllocHandy;
333 STAMCOUNTER StatRZCallRemReplay;
334 STAMCOUNTER StatRZCallVMSetError;
335 STAMCOUNTER StatRZCallVMSetRuntimeError;
336 STAMCOUNTER StatRZCallPGMLock;
337 /** @} */
338} VMM;
339/** Pointer to VMM. */
340typedef VMM *PVMM;
341
342
343/**
344 * VMMCPU Data (part of VMCPU)
345 */
346typedef struct VMMCPU
347{
348 /** The last RC/R0 return code. */
349 int32_t iLastGZRc;
350 /** Alignment padding. */
351 uint32_t u32Padding0;
352
353 /** VMM stack, pointer to the top of the stack in R3.
354 * Stack is allocated from the hypervisor heap and is page aligned
355 * and always writable in RC. */
356 R3PTRTYPE(uint8_t *) pbEMTStackR3;
357
358 /** Pointer to the R0 logger instance - R3 Ptr.
359 * This is NULL if logging is disabled. */
360 R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3;
361 /** Pointer to the R0 logger instance - R0 Ptr.
362 * This is NULL if logging is disabled. */
363 R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
364
365 /** Pointer to the R0 release logger instance - R3 Ptr.
366 * This is NULL if logging is disabled. */
367 R3PTRTYPE(PVMMR0LOGGER) pR0RelLoggerR3;
368 /** Pointer to the R0 release instance - R0 Ptr.
369 * This is NULL if logging is disabled. */
370 R0PTRTYPE(PVMMR0LOGGER) pR0RelLoggerR0;
371
372 /** Thread context switching hook (ring-0). */
373 RTTHREADCTXHOOK hCtxHook;
374
375 /** @name Rendezvous
376 * @{ */
377 /** Whether the EMT is executing a rendezvous right now. For detecting
378 * attempts at recursive rendezvous. */
379 bool volatile fInRendezvous;
380 bool afPadding1[10];
381 /** @} */
382
383 /** Whether we can HLT in VMMR0 rather than having to return to EM.
384 * Updated by vmR3SetHaltMethodU(). */
385 bool fMayHaltInRing0;
386 /** The minimum delta for which we can HLT in ring-0 for.
387 * The deadlines we can calculate are from TM, so, if it's too close
388 * we should just return to ring-3 and run the timer wheel, no point
389 * in spinning in ring-0.
390 * Updated by vmR3SetHaltMethodU(). */
391 uint32_t cNsSpinBlockThreshold;
392 /** Number of ring-0 halts (used for depreciating following values). */
393 uint32_t cR0Halts;
394 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
395 uint32_t cR0HaltsSucceeded;
396 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
397 uint32_t cR0HaltsToRing3;
398 /** Padding */
399 uint32_t u32Padding2;
400
401 /** @name Raw-mode context tracing data.
402 * @{ */
403 SUPDRVTRACERUSRCTX TracerCtx;
404 /** @} */
405
406 /** Alignment padding, making sure u64CallRing3Arg and CallRing3JmpBufR0 are nicely aligned. */
407 uint32_t au32Padding3[1];
408
409 /** @name Call Ring-3
410 * Formerly known as host calls.
411 * @{ */
412 /** The disable counter. */
413 uint32_t cCallRing3Disabled;
414 /** The pending operation. */
415 VMMCALLRING3 enmCallRing3Operation;
416 /** The result of the last operation. */
417 int32_t rcCallRing3;
418 /** The argument to the operation. */
419 uint64_t u64CallRing3Arg;
420 /** The Ring-0 notification callback. */
421 R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallRing3CallbackR0;
422 /** The Ring-0 notification callback user argument. */
423 R0PTRTYPE(void *) pvCallRing3CallbackUserR0;
424 /** The Ring-0 jmp buffer.
425 * @remarks The size of this type isn't stable in assembly, so don't put
426 * anything that needs to be accessed from assembly after it. */
427 VMMR0JMPBUF CallRing3JmpBufR0;
428 /** @} */
429
430 STAMPROFILE StatR0HaltBlock;
431 STAMPROFILE StatR0HaltBlockOnTime;
432 STAMPROFILE StatR0HaltBlockOverslept;
433 STAMPROFILE StatR0HaltBlockInsomnia;
434 STAMCOUNTER StatR0HaltExec;
435 STAMCOUNTER StatR0HaltExecFromBlock;
436 STAMCOUNTER StatR0HaltExecFromSpin;
437 STAMCOUNTER StatR0HaltToR3FromSpin;
438} VMMCPU;
439AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
440/** Pointer to VMMCPU. */
441typedef VMMCPU *PVMMCPU;
442
443
444/**
445 * The VMMRCEntry() codes.
446 */
447typedef enum VMMRCOPERATION
448{
449 /** Do GC module init. */
450 VMMRC_DO_VMMRC_INIT = 1,
451
452 /** The first Trap testcase. */
453 VMMRC_DO_TESTCASE_TRAP_FIRST = 0x0dead000,
454 /** Trap 0 testcases, uArg selects the variation. */
455 VMMRC_DO_TESTCASE_TRAP_0 = VMMRC_DO_TESTCASE_TRAP_FIRST,
456 /** Trap 1 testcases, uArg selects the variation. */
457 VMMRC_DO_TESTCASE_TRAP_1,
458 /** Trap 2 testcases, uArg selects the variation. */
459 VMMRC_DO_TESTCASE_TRAP_2,
460 /** Trap 3 testcases, uArg selects the variation. */
461 VMMRC_DO_TESTCASE_TRAP_3,
462 /** Trap 4 testcases, uArg selects the variation. */
463 VMMRC_DO_TESTCASE_TRAP_4,
464 /** Trap 5 testcases, uArg selects the variation. */
465 VMMRC_DO_TESTCASE_TRAP_5,
466 /** Trap 6 testcases, uArg selects the variation. */
467 VMMRC_DO_TESTCASE_TRAP_6,
468 /** Trap 7 testcases, uArg selects the variation. */
469 VMMRC_DO_TESTCASE_TRAP_7,
470 /** Trap 8 testcases, uArg selects the variation. */
471 VMMRC_DO_TESTCASE_TRAP_8,
472 /** Trap 9 testcases, uArg selects the variation. */
473 VMMRC_DO_TESTCASE_TRAP_9,
474 /** Trap 0a testcases, uArg selects the variation. */
475 VMMRC_DO_TESTCASE_TRAP_0A,
476 /** Trap 0b testcases, uArg selects the variation. */
477 VMMRC_DO_TESTCASE_TRAP_0B,
478 /** Trap 0c testcases, uArg selects the variation. */
479 VMMRC_DO_TESTCASE_TRAP_0C,
480 /** Trap 0d testcases, uArg selects the variation. */
481 VMMRC_DO_TESTCASE_TRAP_0D,
482 /** Trap 0e testcases, uArg selects the variation. */
483 VMMRC_DO_TESTCASE_TRAP_0E,
484 /** The last trap testcase (exclusive). */
485 VMMRC_DO_TESTCASE_TRAP_LAST,
486 /** Testcase for checking interrupt forwarding. */
487 VMMRC_DO_TESTCASE_HYPER_INTERRUPT,
488 /** Switching testing and profiling stub. */
489 VMMRC_DO_TESTCASE_NOP,
490 /** Testcase for checking interrupt masking. */
491 VMMRC_DO_TESTCASE_INTERRUPT_MASKING,
492 /** Switching testing and profiling stub. */
493 VMMRC_DO_TESTCASE_HM_NOP,
494
495 /** The usual 32-bit hack. */
496 VMMRC_DO_32_BIT_HACK = 0x7fffffff
497} VMMRCOPERATION;
498
499
500
501/**
502 * MSR test result entry.
503 */
504typedef struct VMMTESTMSRENTRY
505{
506 /** The MSR number, including padding.
507 * Set to UINT64_MAX if invalid MSR. */
508 uint64_t uMsr;
509 /** The register value. */
510 uint64_t uValue;
511} VMMTESTMSRENTRY;
512/** Pointer to an MSR test result entry. */
513typedef VMMTESTMSRENTRY *PVMMTESTMSRENTRY;
514
515
516
517RT_C_DECLS_BEGIN
518
519int vmmInitFormatTypes(void);
520void vmmTermFormatTypes(void);
521uint32_t vmmGetBuildType(void);
522
523#ifdef IN_RING3
524int vmmR3SwitcherInit(PVM pVM);
525void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
526#endif /* IN_RING3 */
527
528#ifdef IN_RING0
529/**
530 * World switcher assembly routine.
531 * It will call VMMRCEntry().
532 *
533 * @returns return code from VMMRCEntry().
534 * @param pVM The cross context VM structure.
535 * @param uArg See VMMRCEntry().
536 * @internal
537 */
538DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
539
540/**
541 * Callback function for vmmR0CallRing3SetJmp.
542 *
543 * @returns VBox status code.
544 * @param pVM The cross context VM structure.
545 */
546typedef DECLCALLBACK(int) FNVMMR0SETJMP(PVM pVM, PVMCPU pVCpu);
547/** Pointer to FNVMMR0SETJMP(). */
548typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
549
550/**
551 * The setjmp variant used for calling Ring-3.
552 *
553 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
554 * in the middle of a ring-3 call. Another differences is the function pointer and
555 * argument. This has to do with resuming code and the stack frame of the caller.
556 *
557 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
558 * @param pJmpBuf The jmp_buf to set.
559 * @param pfn The function to be called when not resuming.
560 * @param pVM The cross context VM structure.
561 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
562 */
563DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
564
565
566/**
567 * Callback function for vmmR0CallRing3SetJmp2.
568 *
569 * @returns VBox status code.
570 * @param pvUser The user argument.
571 */
572typedef DECLCALLBACK(int) FNVMMR0SETJMP2(PGVM pGVM, VMCPUID idCpu);
573/** Pointer to FNVMMR0SETJMP2(). */
574typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
575
576/**
577 * Same as vmmR0CallRing3SetJmp except for the function signature.
578 *
579 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
580 * @param pJmpBuf The jmp_buf to set.
581 * @param pfn The function to be called when not resuming.
582 * @param pGVM The ring-0 VM structure.
583 * @param idCpu The ID of the calling EMT.
584 */
585DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
586
587
588/**
589 * Callback function for vmmR0CallRing3SetJmpEx.
590 *
591 * @returns VBox status code.
592 * @param pvUser The user argument.
593 */
594typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser);
595/** Pointer to FNVMMR0SETJMPEX(). */
596typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
597
598/**
599 * Same as vmmR0CallRing3SetJmp except for the function signature.
600 *
601 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
602 * @param pJmpBuf The jmp_buf to set.
603 * @param pfn The function to be called when not resuming.
604 * @param pvUser The argument of that function.
605 */
606DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser);
607
608
609/**
610 * Worker for VMMRZCallRing3.
611 * This will save the stack and registers.
612 *
613 * @returns rc.
614 * @param pJmpBuf Pointer to the jump buffer.
615 * @param rc The return code.
616 */
617DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
618
619/**
620 * Internal R0 logger worker: Logger wrapper.
621 */
622VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...);
623
624/**
625 * Internal R0 logger worker: Flush logger.
626 *
627 * @param pLogger The logger instance to flush.
628 * @remark This function must be exported!
629 */
630VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger);
631
632/**
633 * Internal R0 logger worker: Custom prefix.
634 *
635 * @returns Number of chars written.
636 *
637 * @param pLogger The logger instance.
638 * @param pchBuf The output buffer.
639 * @param cchBuf The size of the buffer.
640 * @param pvUser User argument (ignored).
641 */
642VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
643
644# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
645int vmmR0TripleFaultHackInit(void);
646void vmmR0TripleFaultHackTerm(void);
647# endif
648
649#endif /* IN_RING0 */
650#ifdef IN_RC
651
652/**
653 * Internal GC logger worker: Logger wrapper.
654 */
655VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
656
657/**
658 * Internal GC release logger worker: Logger wrapper.
659 */
660VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
661
662/**
663 * Internal GC logger worker: Flush logger.
664 *
665 * @returns VINF_SUCCESS.
666 * @param pLogger The logger instance to flush.
667 * @remark This function must be exported!
668 */
669VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger);
670
671/** @name Trap testcases and related labels.
672 * @{ */
673DECLASM(void) vmmGCEnableWP(void);
674DECLASM(void) vmmGCDisableWP(void);
675DECLASM(int) vmmGCTestTrap3(void);
676DECLASM(int) vmmGCTestTrap8(void);
677DECLASM(int) vmmGCTestTrap0d(void);
678DECLASM(int) vmmGCTestTrap0e(void);
679DECLASM(int) vmmGCTestTrap0e_FaultEIP(void); /**< a label */
680DECLASM(int) vmmGCTestTrap0e_ResumeEIP(void); /**< a label */
681/** @} */
682
683#endif /* IN_RC */
684
685RT_C_DECLS_END
686
687/** @} */
688
689#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette