VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMInternal.h@ 34906

Last change on this file since 34906 was 33540, checked in by vboxsync, 14 years ago

*: spelling fixes, thanks Timeless!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.5 KB
Line 
1/* $Id: VMMInternal.h 33540 2010-10-28 09:27:05Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___VMMInternal_h
19#define ___VMMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/stam.h>
23#include <VBox/log.h>
24#include <iprt/critsect.h>
25
26
27#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
28# error "Not in VMM! This is an internal header!"
29#endif
30
31
32/** @defgroup grp_vmm_int Internals
33 * @ingroup grp_vmm
34 * @internal
35 * @{
36 */
37
38/** @def VBOX_WITH_RC_RELEASE_LOGGING
39 * Enables RC release logging. */
40#define VBOX_WITH_RC_RELEASE_LOGGING
41
42/** @def VBOX_WITH_R0_LOGGING
43 * Enables Ring-0 logging (non-release).
44 *
45 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
46 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
47 * #if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
48 */
49#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DOXYGEN_RUNNING)
50# define VBOX_WITH_R0_LOGGING
51#endif
52
53/** @def VBOX_STRICT_VMM_STACK
54 * Enables VMM stack guard pages to catch stack over- and underruns. */
55#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
56# define VBOX_STRICT_VMM_STACK
57#endif
58
59
60/**
61 * Converts a VMM pointer into a VM pointer.
62 * @returns Pointer to the VM structure the VMM is part of.
63 * @param pVMM Pointer to VMM instance data.
64 */
65#define VMM2VM(pVMM) ( (PVM)((char*)pVMM - pVMM->offVM) )
66
67
68/**
69 * Switcher function, HC to RC.
70 *
71 * @param pVM The VM handle.
72 * @returns Return code indicating the action to take.
73 */
74typedef DECLASMTYPE(int) FNVMMSWITCHERHC(PVM pVM);
75/** Pointer to switcher function. */
76typedef FNVMMSWITCHERHC *PFNVMMSWITCHERHC;
77
78/**
79 * Switcher function, RC to HC.
80 *
81 * @param rc VBox status code.
82 */
83typedef DECLASMTYPE(void) FNVMMSWITCHERRC(int rc);
84/** Pointer to switcher function. */
85typedef FNVMMSWITCHERRC *PFNVMMSWITCHERRC;
86
87
88/**
89 * The ring-0 logger instance wrapper.
90 *
91 * We need to be able to find the VM handle from the logger instance, so we wrap
92 * it in this structure.
93 */
94typedef struct VMMR0LOGGER
95{
96 /** Pointer to the VM handle. */
97 R0PTRTYPE(PVM) pVM;
98 /** Size of the allocated logger instance (Logger). */
99 uint32_t cbLogger;
100 /** Flag indicating whether we've create the logger Ring-0 instance yet. */
101 bool fCreated;
102 /** Flag indicating whether we've disabled flushing (world switch) or not. */
103 bool fFlushingDisabled;
104 /** Flag indicating whether we've registered the instance already. */
105 bool fRegistered;
106 bool a8Alignment;
107 /** The CPU ID. */
108 VMCPUID idCpu;
109#if HC_ARCH_BITS == 64
110 uint32_t u32Alignment;
111#endif
112 /** The ring-0 logger instance. This extends beyond the size. */
113 RTLOGGER Logger;
114} VMMR0LOGGER;
115/** Pointer to a ring-0 logger instance wrapper. */
116typedef VMMR0LOGGER *PVMMR0LOGGER;
117
118
119/**
120 * Jump buffer for the setjmp/longjmp like constructs used to
121 * quickly 'call' back into Ring-3.
122 */
123typedef struct VMMR0JMPBUF
124{
125 /** Traditional jmp_buf stuff
126 * @{ */
127#if HC_ARCH_BITS == 32
128 uint32_t ebx;
129 uint32_t esi;
130 uint32_t edi;
131 uint32_t ebp;
132 uint32_t esp;
133 uint32_t eip;
134 uint32_t eflags;
135#endif
136#if HC_ARCH_BITS == 64
137 uint64_t rbx;
138# ifdef RT_OS_WINDOWS
139 uint64_t rsi;
140 uint64_t rdi;
141# endif
142 uint64_t rbp;
143 uint64_t r12;
144 uint64_t r13;
145 uint64_t r14;
146 uint64_t r15;
147 uint64_t rsp;
148 uint64_t rip;
149# ifdef RT_OS_WINDOWS
150 uint128_t xmm6;
151 uint128_t xmm7;
152 uint128_t xmm8;
153 uint128_t xmm9;
154 uint128_t xmm10;
155 uint128_t xmm11;
156 uint128_t xmm12;
157 uint128_t xmm13;
158 uint128_t xmm14;
159 uint128_t xmm15;
160# endif
161 uint64_t rflags;
162#endif
163 /** @} */
164
165 /** Flag that indicates that we've done a ring-3 call. */
166 bool fInRing3Call;
167 /** The number of bytes we've saved. */
168 uint32_t cbSavedStack;
169 /** Pointer to the buffer used to save the stack.
170 * This is assumed to be 8KB. */
171 RTR0PTR pvSavedStack;
172 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
173 RTHCUINTREG SpCheck;
174 /** The esp we should resume execution with after the restore. */
175 RTHCUINTREG SpResume;
176 /** ESP/RSP at the time of the jump to ring 3. */
177 RTHCUINTREG SavedEsp;
178 /** EBP/RBP at the time of the jump to ring 3. */
179 RTHCUINTREG SavedEbp;
180
181 /** Stats: Max amount of stack used. */
182 uint32_t cbUsedMax;
183 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
184 uint32_t cbUsedAvg;
185 /** Stats: Total amount of stack used. */
186 uint64_t cbUsedTotal;
187 /** Stats: Number of stack usages. */
188 uint64_t cUsedTotal;
189} VMMR0JMPBUF;
190/** Pointer to a ring-0 jump buffer. */
191typedef VMMR0JMPBUF *PVMMR0JMPBUF;
192
193
194/**
195 * VMM Data (part of VM)
196 */
197typedef struct VMM
198{
199 /** Offset to the VM structure.
200 * See VMM2VM(). */
201 RTINT offVM;
202
203 /** @name World Switcher and Related
204 * @{
205 */
206 /** Size of the core code. */
207 RTUINT cbCoreCode;
208 /** Physical address of core code. */
209 RTHCPHYS HCPhysCoreCode;
210 /** Pointer to core code ring-3 mapping - contiguous memory.
211 * At present this only means the context switcher code. */
212 RTR3PTR pvCoreCodeR3;
213 /** Pointer to core code ring-0 mapping - contiguous memory.
214 * At present this only means the context switcher code. */
215 RTR0PTR pvCoreCodeR0;
216 /** Pointer to core code guest context mapping. */
217 RTRCPTR pvCoreCodeRC;
218 RTRCPTR pRCPadding0; /**< Alignment padding */
219#ifdef VBOX_WITH_NMI
220 /** The guest context address of the APIC (host) mapping. */
221 RTRCPTR GCPtrApicBase;
222 RTRCPTR pRCPadding1; /**< Alignment padding */
223#endif
224 /** The current switcher.
225 * This will be set before the VMM is fully initialized. */
226 VMMSWITCHER enmSwitcher;
227 /** Flag to disable the switcher permanently (VMX) (boolean) */
228 bool fSwitcherDisabled;
229 /** Array of offsets to the different switchers within the core code. */
230 RTUINT aoffSwitchers[VMMSWITCHER_MAX];
231
232 /** Resume Guest Execution. See CPUMGCResumeGuest(). */
233 RTRCPTR pfnCPUMRCResumeGuest;
234 /** Resume Guest Execution in V86 mode. See CPUMGCResumeGuestV86(). */
235 RTRCPTR pfnCPUMRCResumeGuestV86;
236 /** Call Trampoline. See vmmGCCallTrampoline(). */
237 RTRCPTR pfnCallTrampolineRC;
238 /** Guest to host switcher entry point. */
239 RCPTRTYPE(PFNVMMSWITCHERRC) pfnGuestToHostRC;
240 /** Host to guest switcher entry point. */
241 R0PTRTYPE(PFNVMMSWITCHERHC) pfnHostToGuestR0;
242 /** @} */
243
244 /** @name Logging
245 * @{
246 */
247 /** Size of the allocated logger instance (pRCLoggerRC/pRCLoggerR3). */
248 uint32_t cbRCLogger;
249 /** Pointer to the RC logger instance - RC Ptr.
250 * This is NULL if logging is disabled. */
251 RCPTRTYPE(PRTLOGGERRC) pRCLoggerRC;
252 /** Pointer to the GC logger instance - R3 Ptr.
253 * This is NULL if logging is disabled. */
254 R3PTRTYPE(PRTLOGGERRC) pRCLoggerR3;
255 /** Pointer to the GC release logger instance - R3 Ptr. */
256 R3PTRTYPE(PRTLOGGERRC) pRCRelLoggerR3;
257 /** Pointer to the GC release logger instance - RC Ptr. */
258 RCPTRTYPE(PRTLOGGERRC) pRCRelLoggerRC;
259 /** Size of the allocated release logger instance (pRCRelLoggerRC/pRCRelLoggerR3).
260 * This may differ from cbRCLogger. */
261 uint32_t cbRCRelLogger;
262 /** Whether log flushing has been disabled or not. */
263 bool fRCLoggerFlushingDisabled;
264 bool afAlignment[5]; /**< Alignment padding. */
265 /** @} */
266
267 /** Whether the stack guard pages have been stationed or not. */
268 bool fStackGuardsStationed;
269 /** Whether we should use the periodic preemption timers. */
270 bool fUsePeriodicPreemptionTimers;
271
272 /** The EMT yield timer. */
273 PTMTIMERR3 pYieldTimer;
274 /** The period to the next timeout when suspended or stopped.
275 * This is 0 when running. */
276 uint32_t cYieldResumeMillies;
277 /** The EMT yield timer interval (milliseconds). */
278 uint32_t cYieldEveryMillies;
279 /** The timestamp of the previous yield. (nano) */
280 uint64_t u64LastYield;
281
282 /** Critical section.
283 * Use for synchronizing all VCPUs
284 */
285 RTCRITSECT CritSectSync;
286
287 /** @name EMT Rendezvous
288 * @{ */
289 /** Semaphore to wait on upon entering ordered execution. */
290 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
291 /** Semaphore to wait on upon entering for one-by-one execution. */
292 RTSEMEVENT hEvtRendezvousEnterOneByOne;
293 /** Semaphore to wait on upon entering for all-at-once execution. */
294 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
295 /** Semaphore to wait on when done. */
296 RTSEMEVENTMULTI hEvtMulRendezvousDone;
297 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
298 RTSEMEVENT hEvtRendezvousDoneCaller;
299 /** Callback. */
300 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
301 /** The user argument for the callback. */
302 RTR3PTR volatile pvRendezvousUser;
303 /** Flags. */
304 volatile uint32_t fRendezvousFlags;
305 /** The number of EMTs that has entered. */
306 volatile uint32_t cRendezvousEmtsEntered;
307 /** The number of EMTs that has done their job. */
308 volatile uint32_t cRendezvousEmtsDone;
309 /** The number of EMTs that has returned. */
310 volatile uint32_t cRendezvousEmtsReturned;
311 /** The status code. */
312 volatile int32_t i32RendezvousStatus;
313 /** Spin lock. */
314 volatile uint32_t u32RendezvousLock;
315 /** @} */
316
317#if HC_ARCH_BITS == 32
318 uint32_t u32Alignment; /**< Alignment padding. */
319#endif
320
321 /** Buffer for storing the standard assertion message for a ring-0 assertion.
322 * Used for saving the assertion message text for the release log and guru
323 * meditation dump. */
324 char szRing0AssertMsg1[512];
325 /** Buffer for storing the custom message for a ring-0 assertion. */
326 char szRing0AssertMsg2[256];
327
328 /** Number of VMMR0_DO_RUN_GC calls. */
329 STAMCOUNTER StatRunRC;
330
331 /** Statistics for each of the RC/R0 return codes.
332 * @{ */
333 STAMCOUNTER StatRZRetNormal;
334 STAMCOUNTER StatRZRetInterrupt;
335 STAMCOUNTER StatRZRetInterruptHyper;
336 STAMCOUNTER StatRZRetGuestTrap;
337 STAMCOUNTER StatRZRetRingSwitch;
338 STAMCOUNTER StatRZRetRingSwitchInt;
339 STAMCOUNTER StatRZRetStaleSelector;
340 STAMCOUNTER StatRZRetIRETTrap;
341 STAMCOUNTER StatRZRetEmulate;
342 STAMCOUNTER StatRZRetIOBlockEmulate;
343 STAMCOUNTER StatRZRetPatchEmulate;
344 STAMCOUNTER StatRZRetIORead;
345 STAMCOUNTER StatRZRetIOWrite;
346 STAMCOUNTER StatRZRetMMIORead;
347 STAMCOUNTER StatRZRetMMIOWrite;
348 STAMCOUNTER StatRZRetMMIOPatchRead;
349 STAMCOUNTER StatRZRetMMIOPatchWrite;
350 STAMCOUNTER StatRZRetMMIOReadWrite;
351 STAMCOUNTER StatRZRetLDTFault;
352 STAMCOUNTER StatRZRetGDTFault;
353 STAMCOUNTER StatRZRetIDTFault;
354 STAMCOUNTER StatRZRetTSSFault;
355 STAMCOUNTER StatRZRetPDFault;
356 STAMCOUNTER StatRZRetCSAMTask;
357 STAMCOUNTER StatRZRetSyncCR3;
358 STAMCOUNTER StatRZRetMisc;
359 STAMCOUNTER StatRZRetPatchInt3;
360 STAMCOUNTER StatRZRetPatchPF;
361 STAMCOUNTER StatRZRetPatchGP;
362 STAMCOUNTER StatRZRetPatchIretIRQ;
363 STAMCOUNTER StatRZRetRescheduleREM;
364 STAMCOUNTER StatRZRetToR3;
365 STAMCOUNTER StatRZRetToR3Unknown;
366 STAMCOUNTER StatRZRetToR3TMVirt;
367 STAMCOUNTER StatRZRetToR3HandyPages;
368 STAMCOUNTER StatRZRetToR3PDMQueues;
369 STAMCOUNTER StatRZRetToR3Rendezvous;
370 STAMCOUNTER StatRZRetToR3Timer;
371 STAMCOUNTER StatRZRetToR3DMA;
372 STAMCOUNTER StatRZRetToR3CritSect;
373 STAMCOUNTER StatRZRetTimerPending;
374 STAMCOUNTER StatRZRetInterruptPending;
375 STAMCOUNTER StatRZRetCallRing3;
376 STAMCOUNTER StatRZRetPATMDuplicateFn;
377 STAMCOUNTER StatRZRetPGMChangeMode;
378 STAMCOUNTER StatRZRetPendingRequest;
379 STAMCOUNTER StatRZRetPGMFlushPending;
380 STAMCOUNTER StatRZRetPatchTPR;
381 STAMCOUNTER StatRZCallPDMLock;
382 STAMCOUNTER StatRZCallLogFlush;
383 STAMCOUNTER StatRZCallPGMPoolGrow;
384 STAMCOUNTER StatRZCallPGMMapChunk;
385 STAMCOUNTER StatRZCallPGMAllocHandy;
386 STAMCOUNTER StatRZCallRemReplay;
387 STAMCOUNTER StatRZCallVMSetError;
388 STAMCOUNTER StatRZCallVMSetRuntimeError;
389 STAMCOUNTER StatRZCallPGMLock;
390 /** @} */
391} VMM;
392/** Pointer to VMM. */
393typedef VMM *PVMM;
394
395
396/**
397 * VMMCPU Data (part of VMCPU)
398 */
399typedef struct VMMCPU
400{
401 /** Offset to the VMCPU structure.
402 * See VMM2VMCPU(). */
403 RTINT offVMCPU;
404
405 /** The last RC/R0 return code. */
406 int32_t iLastGZRc;
407
408 /** VMM stack, pointer to the top of the stack in R3.
409 * Stack is allocated from the hypervisor heap and is page aligned
410 * and always writable in RC. */
411 R3PTRTYPE(uint8_t *) pbEMTStackR3;
412 /** Pointer to the bottom of the stack - needed for doing relocations. */
413 RCPTRTYPE(uint8_t *) pbEMTStackRC;
414 /** Pointer to the bottom of the stack - needed for doing relocations. */
415 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC;
416
417#ifdef LOG_ENABLED
418 /** Pointer to the R0 logger instance - R3 Ptr.
419 * This is NULL if logging is disabled. */
420 R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3;
421 /** Pointer to the R0 logger instance - R0 Ptr.
422 * This is NULL if logging is disabled. */
423 R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
424#endif
425
426 /** @name Call Ring-3
427 * Formerly known as host calls.
428 * @{ */
429 /** The disable counter. */
430 uint32_t cCallRing3Disabled;
431 /** The pending operation. */
432 VMMCALLRING3 enmCallRing3Operation;
433 /** The result of the last operation. */
434 int32_t rcCallRing3;
435#if HC_ARCH_BITS == 64
436 uint32_t padding;
437#endif
438 /** The argument to the operation. */
439 uint64_t u64CallRing3Arg;
440 /** The Ring-0 jmp buffer. */
441 VMMR0JMPBUF CallRing3JmpBufR0;
442 /** @} */
443
444} VMMCPU;
445/** Pointer to VMMCPU. */
446typedef VMMCPU *PVMMCPU;
447
448
449/**
450 * The VMMGCEntry() codes.
451 */
452typedef enum VMMGCOPERATION
453{
454 /** Do GC module init. */
455 VMMGC_DO_VMMGC_INIT = 1,
456
457 /** The first Trap testcase. */
458 VMMGC_DO_TESTCASE_TRAP_FIRST = 0x0dead000,
459 /** Trap 0 testcases, uArg selects the variation. */
460 VMMGC_DO_TESTCASE_TRAP_0 = VMMGC_DO_TESTCASE_TRAP_FIRST,
461 /** Trap 1 testcases, uArg selects the variation. */
462 VMMGC_DO_TESTCASE_TRAP_1,
463 /** Trap 2 testcases, uArg selects the variation. */
464 VMMGC_DO_TESTCASE_TRAP_2,
465 /** Trap 3 testcases, uArg selects the variation. */
466 VMMGC_DO_TESTCASE_TRAP_3,
467 /** Trap 4 testcases, uArg selects the variation. */
468 VMMGC_DO_TESTCASE_TRAP_4,
469 /** Trap 5 testcases, uArg selects the variation. */
470 VMMGC_DO_TESTCASE_TRAP_5,
471 /** Trap 6 testcases, uArg selects the variation. */
472 VMMGC_DO_TESTCASE_TRAP_6,
473 /** Trap 7 testcases, uArg selects the variation. */
474 VMMGC_DO_TESTCASE_TRAP_7,
475 /** Trap 8 testcases, uArg selects the variation. */
476 VMMGC_DO_TESTCASE_TRAP_8,
477 /** Trap 9 testcases, uArg selects the variation. */
478 VMMGC_DO_TESTCASE_TRAP_9,
479 /** Trap 0a testcases, uArg selects the variation. */
480 VMMGC_DO_TESTCASE_TRAP_0A,
481 /** Trap 0b testcases, uArg selects the variation. */
482 VMMGC_DO_TESTCASE_TRAP_0B,
483 /** Trap 0c testcases, uArg selects the variation. */
484 VMMGC_DO_TESTCASE_TRAP_0C,
485 /** Trap 0d testcases, uArg selects the variation. */
486 VMMGC_DO_TESTCASE_TRAP_0D,
487 /** Trap 0e testcases, uArg selects the variation. */
488 VMMGC_DO_TESTCASE_TRAP_0E,
489 /** The last trap testcase (exclusive). */
490 VMMGC_DO_TESTCASE_TRAP_LAST,
491 /** Testcase for checking interrupt forwarding. */
492 VMMGC_DO_TESTCASE_HYPER_INTERRUPT,
493 /** Switching testing and profiling stub. */
494 VMMGC_DO_TESTCASE_NOP,
495 /** Testcase for checking interrupt masking.. */
496 VMMGC_DO_TESTCASE_INTERRUPT_MASKING,
497 /** Switching testing and profiling stub. */
498 VMMGC_DO_TESTCASE_HWACCM_NOP,
499
500 /** The usual 32-bit hack. */
501 VMMGC_DO_32_BIT_HACK = 0x7fffffff
502} VMMGCOPERATION;
503
504
505RT_C_DECLS_BEGIN
506
507#ifdef IN_RING3
508int vmmR3SwitcherInit(PVM pVM);
509void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
510#endif /* IN_RING3 */
511
512#ifdef IN_RING0
513/**
514 * World switcher assembly routine.
515 * It will call VMMGCEntry().
516 *
517 * @returns return code from VMMGCEntry().
518 * @param pVM The VM in question.
519 * @param uArg See VMMGCEntry().
520 * @internal
521 */
522DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
523
524/**
525 * Callback function for vmmR0CallRing3SetJmp.
526 *
527 * @returns VBox status code.
528 * @param pVM The VM handle.
529 */
530typedef DECLCALLBACK(int) FNVMMR0SETJMP(PVM pVM, PVMCPU pVCpu);
531/** Pointer to FNVMMR0SETJMP(). */
532typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
533
534/**
535 * The setjmp variant used for calling Ring-3.
536 *
537 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
538 * in the middle of a ring-3 call. Another differences is the function pointer and
539 * argument. This has to do with resuming code and the stack frame of the caller.
540 *
541 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
542 * @param pJmpBuf The jmp_buf to set.
543 * @param pfn The function to be called when not resuming..
544 * @param pVM The argument of that function.
545 */
546DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
547
548/**
549 * Callback function for vmmR0CallRing3SetJmpEx.
550 *
551 * @returns VBox status code.
552 * @param pvUser The user argument.
553 */
554typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser);
555/** Pointer to FNVMMR0SETJMP(). */
556typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
557
558/**
559 * Same as vmmR0CallRing3SetJmp except for the function signature.
560 *
561 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
562 * @param pJmpBuf The jmp_buf to set.
563 * @param pfn The function to be called when not resuming..
564 * @param pvUser The argument of that function.
565 */
566DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser);
567
568
569/**
570 * Worker for VMMRZCallRing3.
571 * This will save the stack and registers.
572 *
573 * @returns rc.
574 * @param pJmpBuf Pointer to the jump buffer.
575 * @param rc The return code.
576 */
577DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
578
579/**
580 * Internal R0 logger worker: Logger wrapper.
581 */
582VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...);
583
584/**
585 * Internal R0 logger worker: Flush logger.
586 *
587 * @param pLogger The logger instance to flush.
588 * @remark This function must be exported!
589 */
590VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger);
591
592/**
593 * Internal R0 logger worker: Custom prefix.
594 *
595 * @returns Number of chars written.
596 *
597 * @param pLogger The logger instance.
598 * @param pchBuf The output buffer.
599 * @param cchBuf The size of the buffer.
600 * @param pvUser User argument (ignored).
601 */
602VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
603
604#endif /* IN_RING0 */
605#ifdef IN_RC
606
607/**
608 * Internal GC logger worker: Logger wrapper.
609 */
610VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
611
612/**
613 * Internal GC release logger worker: Logger wrapper.
614 */
615VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
616
617/**
618 * Internal GC logger worker: Flush logger.
619 *
620 * @returns VINF_SUCCESS.
621 * @param pLogger The logger instance to flush.
622 * @remark This function must be exported!
623 */
624VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger);
625
626/** @name Trap testcases and related labels.
627 * @{ */
628DECLASM(void) vmmGCEnableWP(void);
629DECLASM(void) vmmGCDisableWP(void);
630DECLASM(int) vmmGCTestTrap3(void);
631DECLASM(int) vmmGCTestTrap8(void);
632DECLASM(int) vmmGCTestTrap0d(void);
633DECLASM(int) vmmGCTestTrap0e(void);
634DECLASM(int) vmmGCTestTrap0e_FaultEIP(void); /**< a label */
635DECLASM(int) vmmGCTestTrap0e_ResumeEIP(void); /**< a label */
636/** @} */
637
638#endif /* IN_RC */
639
640RT_C_DECLS_END
641
642/** @} */
643
644#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette