VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMInternal.h@ 26522

Last change on this file since 26522 was 26066, checked in by vboxsync, 15 years ago

Guest SMP: force all VCPUs to go back to ring 3 when a pgm pool flush is pending. Not doing so might cause trouble on a loaded host.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.2 KB
Line 
1/* $Id: VMMInternal.h 26066 2010-01-27 12:59:32Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___VMMInternal_h
23#define ___VMMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/stam.h>
27#include <VBox/log.h>
28#include <iprt/critsect.h>
29
30
31#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
32# error "Not in VMM! This is an internal header!"
33#endif
34
35
36/** @defgroup grp_vmm_int Internals
37 * @ingroup grp_vmm
38 * @internal
39 * @{
40 */
41
42/** @def VBOX_WITH_RC_RELEASE_LOGGING
43 * Enables RC release logging. */
44#define VBOX_WITH_RC_RELEASE_LOGGING
45
46/** @def VBOX_WITH_R0_LOGGING
47 * Enables Ring-0 logging (non-release).
48 *
49 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
50 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
51 * #if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
52 */
53#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DOXYGEN_RUNNING)
54# define VBOX_WITH_R0_LOGGING
55#endif
56
57/** @def VBOX_STRICT_VMM_STACK
58 * Enables VMM stack guard pages to catch stack over- and underruns. */
59#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
60# define VBOX_STRICT_VMM_STACK
61#endif
62
63
64/**
65 * Converts a VMM pointer into a VM pointer.
66 * @returns Pointer to the VM structure the VMM is part of.
67 * @param pVMM Pointer to VMM instance data.
68 */
69#define VMM2VM(pVMM) ( (PVM)((char*)pVMM - pVMM->offVM) )
70
71
72/**
73 * Switcher function, HC to RC.
74 *
75 * @param pVM The VM handle.
76 * @returns Return code indicating the action to take.
77 */
78typedef DECLASMTYPE(int) FNVMMSWITCHERHC(PVM pVM);
79/** Pointer to switcher function. */
80typedef FNVMMSWITCHERHC *PFNVMMSWITCHERHC;
81
82/**
83 * Switcher function, RC to HC.
84 *
85 * @param rc VBox status code.
86 */
87typedef DECLASMTYPE(void) FNVMMSWITCHERRC(int rc);
88/** Pointer to switcher function. */
89typedef FNVMMSWITCHERRC *PFNVMMSWITCHERRC;
90
91
92/**
93 * The ring-0 logger instance wrapper.
94 *
95 * We need to be able to find the VM handle from the logger instance, so we wrap
96 * it in this structure.
97 */
98typedef struct VMMR0LOGGER
99{
100 /** Pointer to the VM handle. */
101 R0PTRTYPE(PVM) pVM;
102 /** Size of the allocated logger instance (Logger). */
103 uint32_t cbLogger;
104 /** Flag indicating whether we've create the logger Ring-0 instance yet. */
105 bool fCreated;
106 /** Flag indicating whether we've disabled flushing (world switch) or not. */
107 bool fFlushingDisabled;
108 /** Flag indicating whether we've registered the instance already. */
109 bool fRegistered;
110 bool a8Alignment;
111 /** The CPU ID. */
112 VMCPUID idCpu;
113#if HC_ARCH_BITS == 64
114 uint32_t u32Alignment;
115#endif
116 /** The ring-0 logger instance. This extends beyond the size. */
117 RTLOGGER Logger;
118} VMMR0LOGGER;
119/** Pointer to a ring-0 logger instance wrapper. */
120typedef VMMR0LOGGER *PVMMR0LOGGER;
121
122
123/**
124 * Jump buffer for the setjmp/longjmp like constructs used to
125 * quickly 'call' back into Ring-3.
126 */
127typedef struct VMMR0JMPBUF
128{
129 /** Traditional jmp_buf stuff
130 * @{ */
131#if HC_ARCH_BITS == 32
132 uint32_t ebx;
133 uint32_t esi;
134 uint32_t edi;
135 uint32_t ebp;
136 uint32_t esp;
137 uint32_t eip;
138 uint32_t eflags;
139#endif
140#if HC_ARCH_BITS == 64
141 uint64_t rbx;
142# ifdef RT_OS_WINDOWS
143 uint64_t rsi;
144 uint64_t rdi;
145# endif
146 uint64_t rbp;
147 uint64_t r12;
148 uint64_t r13;
149 uint64_t r14;
150 uint64_t r15;
151 uint64_t rsp;
152 uint64_t rip;
153# ifdef RT_OS_WINDOWS
154 uint128_t xmm6;
155 uint128_t xmm7;
156 uint128_t xmm8;
157 uint128_t xmm9;
158 uint128_t xmm10;
159 uint128_t xmm11;
160 uint128_t xmm12;
161 uint128_t xmm13;
162 uint128_t xmm14;
163 uint128_t xmm15;
164# endif
165 uint64_t rflags;
166#endif
167 /** @} */
168
169 /** Flag that indicates that we've done a ring-3 call. */
170 bool fInRing3Call;
171 /** The number of bytes we've saved. */
172 uint32_t cbSavedStack;
173 /** Pointer to the buffer used to save the stack.
174 * This is assumed to be 8KB. */
175 RTR0PTR pvSavedStack;
176 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
177 RTHCUINTREG SpCheck;
178 /** The esp we should resume execution with after the restore. */
179 RTHCUINTREG SpResume;
180 /** ESP/RSP at the time of the jump to ring 3. */
181 RTHCUINTREG SavedEsp;
182 /** EBP/RBP at the time of the jump to ring 3. */
183 RTHCUINTREG SavedEbp;
184
185 /** Stats: Max amount of stack used. */
186 uint32_t cbUsedMax;
187 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
188 uint32_t cbUsedAvg;
189 /** Stats: Total amount of stack used. */
190 uint64_t cbUsedTotal;
191 /** Stats: Number of stack usages. */
192 uint64_t cUsedTotal;
193} VMMR0JMPBUF;
194/** Pointer to a ring-0 jump buffer. */
195typedef VMMR0JMPBUF *PVMMR0JMPBUF;
196
197
198/**
199 * VMM Data (part of VM)
200 */
201typedef struct VMM
202{
203 /** Offset to the VM structure.
204 * See VMM2VM(). */
205 RTINT offVM;
206
207 /** @name World Switcher and Related
208 * @{
209 */
210 /** Size of the core code. */
211 RTUINT cbCoreCode;
212 /** Physical address of core code. */
213 RTHCPHYS HCPhysCoreCode;
214 /** Pointer to core code ring-3 mapping - contiguous memory.
215 * At present this only means the context switcher code. */
216 RTR3PTR pvCoreCodeR3;
217 /** Pointer to core code ring-0 mapping - contiguous memory.
218 * At present this only means the context switcher code. */
219 RTR0PTR pvCoreCodeR0;
220 /** Pointer to core code guest context mapping. */
221 RTRCPTR pvCoreCodeRC;
222 RTRCPTR pRCPadding0; /**< Alignment padding */
223#ifdef VBOX_WITH_NMI
224 /** The guest context address of the APIC (host) mapping. */
225 RTRCPTR GCPtrApicBase;
226 RTRCPTR pRCPadding1; /**< Alignment padding */
227#endif
228 /** The current switcher.
229 * This will be set before the VMM is fully initialized. */
230 VMMSWITCHER enmSwitcher;
231 /** Flag to disable the switcher permanently (VMX) (boolean) */
232 bool fSwitcherDisabled;
233 /** Array of offsets to the different switchers within the core code. */
234 RTUINT aoffSwitchers[VMMSWITCHER_MAX];
235
236 /** Resume Guest Execution. See CPUMGCResumeGuest(). */
237 RTRCPTR pfnCPUMRCResumeGuest;
238 /** Resume Guest Execution in V86 mode. See CPUMGCResumeGuestV86(). */
239 RTRCPTR pfnCPUMRCResumeGuestV86;
240 /** Call Trampoline. See vmmGCCallTrampoline(). */
241 RTRCPTR pfnCallTrampolineRC;
242 /** Guest to host switcher entry point. */
243 RCPTRTYPE(PFNVMMSWITCHERRC) pfnGuestToHostRC;
244 /** Host to guest switcher entry point. */
245 R0PTRTYPE(PFNVMMSWITCHERHC) pfnHostToGuestR0;
246 /** @} */
247
248 /** @name Logging
249 * @{
250 */
251 /** Size of the allocated logger instance (pRCLoggerRC/pRCLoggerR3). */
252 uint32_t cbRCLogger;
253 /** Pointer to the RC logger instance - RC Ptr.
254 * This is NULL if logging is disabled. */
255 RCPTRTYPE(PRTLOGGERRC) pRCLoggerRC;
256 /** Pointer to the GC logger instance - R3 Ptr.
257 * This is NULL if logging is disabled. */
258 R3PTRTYPE(PRTLOGGERRC) pRCLoggerR3;
259 /** Pointer to the GC release logger instance - R3 Ptr. */
260 R3PTRTYPE(PRTLOGGERRC) pRCRelLoggerR3;
261 /** Pointer to the GC release logger instance - RC Ptr. */
262 RCPTRTYPE(PRTLOGGERRC) pRCRelLoggerRC;
263 /** Size of the allocated release logger instance (pRCRelLoggerRC/pRCRelLoggerR3).
264 * This may differ from cbRCLogger. */
265 uint32_t cbRCRelLogger;
266 /** Whether log flushing has been disabled or not. */
267 bool fRCLoggerFlushingDisabled;
268 bool afAlignment[6]; /**< Alignment padding. */
269 /** @} */
270
271 /** Whether the stack guard pages have been stationed or not. */
272 bool fStackGuardsStationed;
273
274 /** The EMT yield timer. */
275 PTMTIMERR3 pYieldTimer;
276 /** The period to the next timeout when suspended or stopped.
277 * This is 0 when running. */
278 uint32_t cYieldResumeMillies;
279 /** The EMT yield timer interval (milliseconds). */
280 uint32_t cYieldEveryMillies;
281 /** The timestamp of the previous yield. (nano) */
282 uint64_t u64LastYield;
283
284 /** Critical section.
285 * Use for synchronizing all VCPUs
286 */
287 RTCRITSECT CritSectSync;
288
289 /** @name EMT Rendezvous
290 * @{ */
291 /** Semaphore to wait on upon entering ordered execution. */
292 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
293 /** Semaphore to wait on upon entering for one-by-one execution. */
294 RTSEMEVENT hEvtRendezvousEnterOneByOne;
295 /** Semaphore to wait on upon entering for all-at-once execution. */
296 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
297 /** Semaphore to wait on when done. */
298 RTSEMEVENTMULTI hEvtMulRendezvousDone;
299 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
300 RTSEMEVENT hEvtRendezvousDoneCaller;
301 /** Callback. */
302 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
303 /** The user argument for the callback. */
304 RTR3PTR volatile pvRendezvousUser;
305 /** Flags. */
306 volatile uint32_t fRendezvousFlags;
307 /** The number of EMTs that has entered. */
308 volatile uint32_t cRendezvousEmtsEntered;
309 /** The number of EMTs that has done their job. */
310 volatile uint32_t cRendezvousEmtsDone;
311 /** The number of EMTs that has returned. */
312 volatile uint32_t cRendezvousEmtsReturned;
313 /** The status code. */
314 volatile int32_t i32RendezvousStatus;
315 /** Spin lock. */
316 volatile uint32_t u32RendezvousLock;
317 /** @} */
318
319#if HC_ARCH_BITS == 32
320 uint32_t u32Alignment; /**< Alignment padding. */
321#endif
322
323 /** Buffer for storing the standard assertion message for a ring-0 assertion.
324 * Used for saving the assertion message text for the release log and guru
325 * meditation dump. */
326 char szRing0AssertMsg1[512];
327 /** Buffer for storing the custom message for a ring-0 assertion. */
328 char szRing0AssertMsg2[256];
329
330 /** Number of VMMR0_DO_RUN_GC calls. */
331 STAMCOUNTER StatRunRC;
332
333 /** Statistics for each of the RC/R0 return codes.
334 * @{ */
335 STAMCOUNTER StatRZRetNormal;
336 STAMCOUNTER StatRZRetInterrupt;
337 STAMCOUNTER StatRZRetInterruptHyper;
338 STAMCOUNTER StatRZRetGuestTrap;
339 STAMCOUNTER StatRZRetRingSwitch;
340 STAMCOUNTER StatRZRetRingSwitchInt;
341 STAMCOUNTER StatRZRetStaleSelector;
342 STAMCOUNTER StatRZRetIRETTrap;
343 STAMCOUNTER StatRZRetEmulate;
344 STAMCOUNTER StatRZRetIOBlockEmulate;
345 STAMCOUNTER StatRZRetPatchEmulate;
346 STAMCOUNTER StatRZRetIORead;
347 STAMCOUNTER StatRZRetIOWrite;
348 STAMCOUNTER StatRZRetMMIORead;
349 STAMCOUNTER StatRZRetMMIOWrite;
350 STAMCOUNTER StatRZRetMMIOPatchRead;
351 STAMCOUNTER StatRZRetMMIOPatchWrite;
352 STAMCOUNTER StatRZRetMMIOReadWrite;
353 STAMCOUNTER StatRZRetLDTFault;
354 STAMCOUNTER StatRZRetGDTFault;
355 STAMCOUNTER StatRZRetIDTFault;
356 STAMCOUNTER StatRZRetTSSFault;
357 STAMCOUNTER StatRZRetPDFault;
358 STAMCOUNTER StatRZRetCSAMTask;
359 STAMCOUNTER StatRZRetSyncCR3;
360 STAMCOUNTER StatRZRetMisc;
361 STAMCOUNTER StatRZRetPatchInt3;
362 STAMCOUNTER StatRZRetPatchPF;
363 STAMCOUNTER StatRZRetPatchGP;
364 STAMCOUNTER StatRZRetPatchIretIRQ;
365 STAMCOUNTER StatRZRetRescheduleREM;
366 STAMCOUNTER StatRZRetToR3;
367 STAMCOUNTER StatRZRetTimerPending;
368 STAMCOUNTER StatRZRetInterruptPending;
369 STAMCOUNTER StatRZRetCallRing3;
370 STAMCOUNTER StatRZRetPATMDuplicateFn;
371 STAMCOUNTER StatRZRetPGMChangeMode;
372 STAMCOUNTER StatRZRetPendingRequest;
373 STAMCOUNTER StatRZRetPGMFlushPending;
374 STAMCOUNTER StatRZRetPatchTPR;
375 STAMCOUNTER StatRZCallPDMLock;
376 STAMCOUNTER StatRZCallLogFlush;
377 STAMCOUNTER StatRZCallPDMQueueFlush;
378 STAMCOUNTER StatRZCallPGMPoolGrow;
379 STAMCOUNTER StatRZCallPGMMapChunk;
380 STAMCOUNTER StatRZCallPGMAllocHandy;
381 STAMCOUNTER StatRZCallRemReplay;
382 STAMCOUNTER StatRZCallVMSetError;
383 STAMCOUNTER StatRZCallVMSetRuntimeError;
384 STAMCOUNTER StatRZCallPGMLock;
385 /** @} */
386} VMM;
387/** Pointer to VMM. */
388typedef VMM *PVMM;
389
390
391/**
392 * VMMCPU Data (part of VMCPU)
393 */
394typedef struct VMMCPU
395{
396 /** Offset to the VMCPU structure.
397 * See VMM2VMCPU(). */
398 RTINT offVMCPU;
399
400 /** The last RC/R0 return code. */
401 int32_t iLastGZRc;
402
403 /** VMM stack, pointer to the top of the stack in R3.
404 * Stack is allocated from the hypervisor heap and is page aligned
405 * and always writable in RC. */
406 R3PTRTYPE(uint8_t *) pbEMTStackR3;
407 /** Pointer to the bottom of the stack - needed for doing relocations. */
408 RCPTRTYPE(uint8_t *) pbEMTStackRC;
409 /** Pointer to the bottom of the stack - needed for doing relocations. */
410 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC;
411
412#ifdef LOG_ENABLED
413 /** Pointer to the R0 logger instance - R3 Ptr.
414 * This is NULL if logging is disabled. */
415 R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3;
416 /** Pointer to the R0 logger instance - R0 Ptr.
417 * This is NULL if logging is disabled. */
418 R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
419#endif
420
421 /** @name Call Ring-3
422 * Formerly known as host calls.
423 * @{ */
424 /** The disable counter. */
425 uint32_t cCallRing3Disabled;
426 /** The pending operation. */
427 VMMCALLRING3 enmCallRing3Operation;
428 /** The result of the last operation. */
429 int32_t rcCallRing3;
430#if HC_ARCH_BITS == 64
431 uint32_t padding;
432#endif
433 /** The argument to the operation. */
434 uint64_t u64CallRing3Arg;
435 /** The Ring-0 jmp buffer. */
436 VMMR0JMPBUF CallRing3JmpBufR0;
437 /** @} */
438
439} VMMCPU;
440/** Pointer to VMMCPU. */
441typedef VMMCPU *PVMMCPU;
442
443
444/**
445 * The VMMGCEntry() codes.
446 */
447typedef enum VMMGCOPERATION
448{
449 /** Do GC module init. */
450 VMMGC_DO_VMMGC_INIT = 1,
451
452 /** The first Trap testcase. */
453 VMMGC_DO_TESTCASE_TRAP_FIRST = 0x0dead000,
454 /** Trap 0 testcases, uArg selects the variation. */
455 VMMGC_DO_TESTCASE_TRAP_0 = VMMGC_DO_TESTCASE_TRAP_FIRST,
456 /** Trap 1 testcases, uArg selects the variation. */
457 VMMGC_DO_TESTCASE_TRAP_1,
458 /** Trap 2 testcases, uArg selects the variation. */
459 VMMGC_DO_TESTCASE_TRAP_2,
460 /** Trap 3 testcases, uArg selects the variation. */
461 VMMGC_DO_TESTCASE_TRAP_3,
462 /** Trap 4 testcases, uArg selects the variation. */
463 VMMGC_DO_TESTCASE_TRAP_4,
464 /** Trap 5 testcases, uArg selects the variation. */
465 VMMGC_DO_TESTCASE_TRAP_5,
466 /** Trap 6 testcases, uArg selects the variation. */
467 VMMGC_DO_TESTCASE_TRAP_6,
468 /** Trap 7 testcases, uArg selects the variation. */
469 VMMGC_DO_TESTCASE_TRAP_7,
470 /** Trap 8 testcases, uArg selects the variation. */
471 VMMGC_DO_TESTCASE_TRAP_8,
472 /** Trap 9 testcases, uArg selects the variation. */
473 VMMGC_DO_TESTCASE_TRAP_9,
474 /** Trap 0a testcases, uArg selects the variation. */
475 VMMGC_DO_TESTCASE_TRAP_0A,
476 /** Trap 0b testcases, uArg selects the variation. */
477 VMMGC_DO_TESTCASE_TRAP_0B,
478 /** Trap 0c testcases, uArg selects the variation. */
479 VMMGC_DO_TESTCASE_TRAP_0C,
480 /** Trap 0d testcases, uArg selects the variation. */
481 VMMGC_DO_TESTCASE_TRAP_0D,
482 /** Trap 0e testcases, uArg selects the variation. */
483 VMMGC_DO_TESTCASE_TRAP_0E,
484 /** The last trap testcase (exclusive). */
485 VMMGC_DO_TESTCASE_TRAP_LAST,
486 /** Testcase for checking interrupt forwarding. */
487 VMMGC_DO_TESTCASE_HYPER_INTERRUPT,
488 /** Switching testing and profiling stub. */
489 VMMGC_DO_TESTCASE_NOP,
490 /** Testcase for checking interrupt masking.. */
491 VMMGC_DO_TESTCASE_INTERRUPT_MASKING,
492 /** Switching testing and profiling stub. */
493 VMMGC_DO_TESTCASE_HWACCM_NOP,
494
495 /** The usual 32-bit hack. */
496 VMMGC_DO_32_BIT_HACK = 0x7fffffff
497} VMMGCOPERATION;
498
499
500RT_C_DECLS_BEGIN
501
502#ifdef IN_RING3
503int vmmR3SwitcherInit(PVM pVM);
504void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
505#endif /* IN_RING3 */
506
507#ifdef IN_RING0
508/**
509 * World switcher assembly routine.
510 * It will call VMMGCEntry().
511 *
512 * @returns return code from VMMGCEntry().
513 * @param pVM The VM in question.
514 * @param uArg See VMMGCEntry().
515 * @internal
516 */
517DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
518
519/**
520 * Callback function for vmmR0CallRing3SetJmp.
521 *
522 * @returns VBox status code.
523 * @param pVM The VM handle.
524 */
525typedef DECLCALLBACK(int) FNVMMR0SETJMP(PVM pVM, PVMCPU pVCpu);
526/** Pointer to FNVMMR0SETJMP(). */
527typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
528
529/**
530 * The setjmp variant used for calling Ring-3.
531 *
532 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
533 * in the middle of a ring-3 call. Another differences is the function pointer and
534 * argument. This has to do with resuming code and the stack frame of the caller.
535 *
536 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
537 * @param pJmpBuf The jmp_buf to set.
538 * @param pfn The function to be called when not resuming..
539 * @param pVM The argument of that function.
540 */
541DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
542
543/**
544 * Callback function for vmmR0CallRing3SetJmpEx.
545 *
546 * @returns VBox status code.
547 * @param pvUser The user argument.
548 */
549typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser);
550/** Pointer to FNVMMR0SETJMP(). */
551typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
552
553/**
554 * Same as vmmR0CallRing3SetJmp except for the function signature.
555 *
556 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
557 * @param pJmpBuf The jmp_buf to set.
558 * @param pfn The function to be called when not resuming..
559 * @param pvUser The argument of that function.
560 */
561DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser);
562
563
564/**
565 * Worker for VMMRZCallRing3.
566 * This will save the stack and registers.
567 *
568 * @returns rc.
569 * @param pJmpBuf Pointer to the jump buffer.
570 * @param rc The return code.
571 */
572DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
573
574/**
575 * Internal R0 logger worker: Logger wrapper.
576 */
577VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...);
578
579/**
580 * Internal R0 logger worker: Flush logger.
581 *
582 * @param pLogger The logger instance to flush.
583 * @remark This function must be exported!
584 */
585VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger);
586
587/**
588 * Interal R0 logger worker: Custom prefix.
589 *
590 * @returns Number of chars written.
591 *
592 * @param pLogger The logger instance.
593 * @param pchBuf The output buffer.
594 * @param cchBuf The size of the buffer.
595 * @param pvUser User argument (ignored).
596 */
597VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
598
599#endif /* IN_RING0 */
600#ifdef IN_RC
601
602/**
603 * Internal GC logger worker: Logger wrapper.
604 */
605VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
606
607/**
608 * Internal GC release logger worker: Logger wrapper.
609 */
610VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
611
612/**
613 * Internal GC logger worker: Flush logger.
614 *
615 * @returns VINF_SUCCESS.
616 * @param pLogger The logger instance to flush.
617 * @remark This function must be exported!
618 */
619VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger);
620
621/** @name Trap testcases and related labels.
622 * @{ */
623DECLASM(void) vmmGCEnableWP(void);
624DECLASM(void) vmmGCDisableWP(void);
625DECLASM(int) vmmGCTestTrap3(void);
626DECLASM(int) vmmGCTestTrap8(void);
627DECLASM(int) vmmGCTestTrap0d(void);
628DECLASM(int) vmmGCTestTrap0e(void);
629DECLASM(int) vmmGCTestTrap0e_FaultEIP(void); /**< a label */
630DECLASM(int) vmmGCTestTrap0e_ResumeEIP(void); /**< a label */
631/** @} */
632
633#endif /* IN_RC */
634
635RT_C_DECLS_END
636
637/** @} */
638
639#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette