VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 97358

Last change on this file since 97358 was 96925, checked in by vboxsync, 2 years ago

VMM,RuntimeR0: Flush the log buffer directly to the parent VMM if that's the only destination, and honor the unbuffered setting when that's the case. Setting VBOX_LOG_DEST='nofile vmm' & excluding 'buffered' from VBOX_LOG_FLAGS for the inner VBox instance will result in the logging to be intermixed w/o any buffering delays and no need to try correlate events between outer and inner vbox instances. bugref:10092

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.0 KB
Line 
1/* $Id: VMMInternal.h 96925 2022-09-28 20:39:43Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
29#define VMM_INCLUDED_SRC_include_VMMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/sup.h>
36#include <VBox/vmm/stam.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/param.h>
39#include <VBox/log.h>
40#include <iprt/critsect.h>
41
42#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
43# error "Not in VMM! This is an internal header!"
44#endif
45#if HC_ARCH_BITS == 32
46# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
47#endif
48
49
50
51/** @defgroup grp_vmm_int Internals
52 * @ingroup grp_vmm
53 * @internal
54 * @{
55 */
56
57/** @def VBOX_WITH_RC_RELEASE_LOGGING
58 * Enables RC release logging. */
59#define VBOX_WITH_RC_RELEASE_LOGGING
60
61/** @def VBOX_WITH_R0_LOGGING
62 * Enables Ring-0 logging (non-release).
63 *
64 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
65 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
66 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
67 */
68#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
69# define VBOX_WITH_R0_LOGGING
70#endif
71
72/** @def VBOX_STRICT_VMM_STACK
73 * Enables VMM stack guard pages to catch stack over- and underruns. */
74#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
75# define VBOX_STRICT_VMM_STACK
76#endif
77
78
79/** Number of buffers per logger. */
80#define VMMLOGGER_BUFFER_COUNT 4
81
82/**
83 * R0 logger data (ring-0 only data).
84 */
85typedef struct VMMR0PERVCPULOGGER
86{
87 /** Pointer to the logger instance.
88 * The RTLOGGER::u32UserValue1 member is used for flags and magic, while the
89 * RTLOGGER::u64UserValue2 member is the corresponding PGVMCPU value.
90 * RTLOGGER::u64UserValue3 is currently and set to the PGVMCPU value too. */
91 R0PTRTYPE(PRTLOGGER) pLogger;
92 /** Log buffer descriptor.
93 * The buffer is allocated in a common block for all VCpus, see VMMR0PERVM. */
94 RTLOGBUFFERDESC aBufDescs[VMMLOGGER_BUFFER_COUNT];
95 /** Flag indicating whether we've registered the instance already. */
96 bool fRegistered;
97 /** Set if the EMT is waiting on hEventFlushWait. */
98 bool fEmtWaiting;
99 /** Set while we're inside vmmR0LoggerFlushCommon to prevent recursion. */
100 bool fFlushing;
101 /** Flush to parent VMM's debug log instead of ring-3. */
102 bool fFlushToParentVmmDbg : 1;
103 /** Flush to parent VMM's debug log instead of ring-3. */
104 bool fFlushToParentVmmRel : 1;
105 /** Number of buffers currently queued for flushing. */
106 uint32_t volatile cFlushing;
107 /** The event semaphore the EMT waits on while the buffer is being flushed. */
108 RTSEMEVENT hEventFlushWait;
109} VMMR0PERVCPULOGGER;
110/** Pointer to the R0 logger data (ring-0 only). */
111typedef VMMR0PERVCPULOGGER *PVMMR0PERVCPULOGGER;
112
113
114/**
115 * R0 logger data shared with ring-3 (per CPU).
116 */
117typedef struct VMMR3CPULOGGER
118{
119 /** Buffer info. */
120 struct
121 {
122 /** Auxiliary buffer descriptor. */
123 RTLOGBUFFERAUXDESC AuxDesc;
124 /** Ring-3 mapping of the logging buffer. */
125 R3PTRTYPE(char *) pchBufR3;
126 } aBufs[VMMLOGGER_BUFFER_COUNT];
127 /** The current buffer. */
128 uint32_t idxBuf;
129 /** Number of buffers currently queued for flushing (copy of
130 * VMMR0PERVCPULOGGER::cFlushing). */
131 uint32_t volatile cFlushing;
132 /** The buffer size. */
133 uint32_t cbBuf;
134 /** Number of bytes dropped because the flush context didn't allow waiting. */
135 uint32_t cbDropped;
136 STAMCOUNTER StatFlushes;
137 STAMCOUNTER StatCannotBlock;
138 STAMPROFILE StatWait;
139 STAMPROFILE StatRaces;
140 STAMCOUNTER StatRacesToR0;
141} VMMR3CPULOGGER;
142/** Pointer to r0 logger data shared with ring-3. */
143typedef VMMR3CPULOGGER *PVMMR3CPULOGGER;
144
145/** @name Logger indexes for VMMR0PERVCPU::u.aLoggers and VMMCPU::u.aLoggers.
146 * @{ */
147#define VMMLOGGER_IDX_REGULAR 0
148#define VMMLOGGER_IDX_RELEASE 1
149#define VMMLOGGER_IDX_MAX 2
150/** @} */
151
152
153/** Pointer to a ring-0 jump buffer. */
154typedef struct VMMR0JMPBUF *PVMMR0JMPBUF;
155/**
156 * Jump buffer for the setjmp/longjmp like constructs used to
157 * quickly 'call' back into Ring-3.
158 */
159typedef struct VMMR0JMPBUF
160{
161 /** Traditional jmp_buf stuff
162 * @{ */
163#if HC_ARCH_BITS == 32
164 uint32_t ebx;
165 uint32_t esi;
166 uint32_t edi;
167 uint32_t ebp;
168 uint32_t esp;
169 uint32_t eip;
170 uint32_t eflags;
171#endif
172#if HC_ARCH_BITS == 64
173 uint64_t rbx;
174# ifdef RT_OS_WINDOWS
175 uint64_t rsi;
176 uint64_t rdi;
177# endif
178 uint64_t rbp;
179 uint64_t r12;
180 uint64_t r13;
181 uint64_t r14;
182 uint64_t r15;
183 uint64_t rsp;
184 uint64_t rip;
185# ifdef RT_OS_WINDOWS
186 uint128_t xmm6;
187 uint128_t xmm7;
188 uint128_t xmm8;
189 uint128_t xmm9;
190 uint128_t xmm10;
191 uint128_t xmm11;
192 uint128_t xmm12;
193 uint128_t xmm13;
194 uint128_t xmm14;
195 uint128_t xmm15;
196# endif
197 uint64_t rflags;
198#endif
199 /** @} */
200
201 /** RSP/ESP at the time of the stack mirroring (what pvStackBuf starts with). */
202 RTHCUINTREG UnwindSp;
203 /** RSP/ESP at the time of the long jump call. */
204 RTHCUINTREG UnwindRetSp;
205 /** RBP/EBP inside the vmmR0CallRing3LongJmp frame. */
206 RTHCUINTREG UnwindBp;
207 /** RIP/EIP within vmmR0CallRing3LongJmp for assisting unwinding. */
208 RTHCUINTREG UnwindPc;
209 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
210 RTHCUINTREG UnwindRetPcValue;
211 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
212 RTHCUINTREG UnwindRetPcLocation;
213
214 /** The function last being executed here. */
215 RTHCUINTREG pfn;
216 /** The first argument to the function. */
217 RTHCUINTREG pvUser1;
218 /** The second argument to the function. */
219 RTHCUINTREG pvUser2;
220
221 /** Number of valid bytes in pvStackBuf. */
222 uint32_t cbStackValid;
223 /** Size of buffer pvStackBuf points to. */
224 uint32_t cbStackBuf;
225 /** Pointer to buffer for mirroring the stack. Optional. */
226 RTR0PTR pvStackBuf;
227 /** Pointer to a ring-3 accessible jump buffer structure for automatic
228 * mirroring on longjmp. Optional. */
229 R0PTRTYPE(PVMMR0JMPBUF) pMirrorBuf;
230} VMMR0JMPBUF;
231
232
233/**
234 * Log flusher job.
235 *
236 * There is a ring buffer of these in ring-0 (VMMR0PERVM::aLogFlushRing) and a
237 * copy of the current one in the shared VM structure (VMM::LogFlusherItem).
238 */
239typedef union VMMLOGFLUSHERENTRY
240{
241 struct
242 {
243 /** The virtual CPU ID. */
244 uint32_t idCpu : 16;
245 /** The logger: 0 for release, 1 for debug. */
246 uint32_t idxLogger : 8;
247 /** The buffer to be flushed. */
248 uint32_t idxBuffer : 7;
249 /** Set by the flusher thread once it fetched the entry and started
250 * processing it. */
251 uint32_t fProcessing : 1;
252 } s;
253 uint32_t u32;
254} VMMLOGFLUSHERENTRY;
255
256
257/**
258 * VMM Data (part of VM)
259 */
260typedef struct VMM
261{
262 /** Whether we should use the periodic preemption timers. */
263 bool fUsePeriodicPreemptionTimers;
264 /** Alignment padding. */
265 bool afPadding0[7];
266
267#if 0 /* pointless when timers doesn't run on EMT */
268 /** The EMT yield timer. */
269 TMTIMERHANDLE hYieldTimer;
270 /** The period to the next timeout when suspended or stopped.
271 * This is 0 when running. */
272 uint32_t cYieldResumeMillies;
273 /** The EMT yield timer interval (milliseconds). */
274 uint32_t cYieldEveryMillies;
275 /** The timestamp of the previous yield. (nano) */
276 uint64_t u64LastYield;
277#endif
278
279 /** @name EMT Rendezvous
280 * @{ */
281 /** Semaphore to wait on upon entering ordered execution. */
282 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
283 /** Semaphore to wait on upon entering for one-by-one execution. */
284 RTSEMEVENT hEvtRendezvousEnterOneByOne;
285 /** Semaphore to wait on upon entering for all-at-once execution. */
286 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
287 /** Semaphore to wait on when done. */
288 RTSEMEVENTMULTI hEvtMulRendezvousDone;
289 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
290 RTSEMEVENT hEvtRendezvousDoneCaller;
291 /** Semaphore to wait on upon recursing. */
292 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
293 /** Semaphore to wait on after done with recursion (caller restoring state). */
294 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
295 /** Semaphore the initiator waits on while the EMTs are getting into position
296 * on hEvtMulRendezvousRecursionPush. */
297 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
298 /** Semaphore the initiator waits on while the EMTs sitting on
299 * hEvtMulRendezvousRecursionPop wakes up and leave. */
300 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
301 /** Callback. */
302 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
303 /** The user argument for the callback. */
304 RTR3PTR volatile pvRendezvousUser;
305 /** Flags. */
306 volatile uint32_t fRendezvousFlags;
307 /** The number of EMTs that has entered. */
308 volatile uint32_t cRendezvousEmtsEntered;
309 /** The number of EMTs that has done their job. */
310 volatile uint32_t cRendezvousEmtsDone;
311 /** The number of EMTs that has returned. */
312 volatile uint32_t cRendezvousEmtsReturned;
313 /** The status code. */
314 volatile int32_t i32RendezvousStatus;
315 /** Spin lock. */
316 volatile uint32_t u32RendezvousLock;
317 /** The recursion depth. */
318 volatile uint32_t cRendezvousRecursions;
319 /** The number of EMTs that have entered the recursion routine. */
320 volatile uint32_t cRendezvousEmtsRecursingPush;
321 /** The number of EMTs that have leaft the recursion routine. */
322 volatile uint32_t cRendezvousEmtsRecursingPop;
323 /** Triggers rendezvous recursion in the other threads. */
324 volatile bool fRendezvousRecursion;
325
326 /** @} */
327
328 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
329 * release logging purposes. */
330 bool fIsPreemptPendingApiTrusty : 1;
331 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
332 * release logging purposes. */
333 bool fIsPreemptPossible : 1;
334 /** Set if ring-0 uses context hooks. */
335 bool fIsUsingContextHooks : 1;
336
337 bool afAlignment2[2]; /**< Alignment padding. */
338
339 /** Buffer for storing the standard assertion message for a ring-0 assertion.
340 * Used for saving the assertion message text for the release log and guru
341 * meditation dump. */
342 char szRing0AssertMsg1[512];
343 /** Buffer for storing the custom message for a ring-0 assertion. */
344 char szRing0AssertMsg2[256];
345
346 /** @name Logging
347 * @{ */
348 /** Used when setting up ring-0 logger. */
349 uint64_t nsProgramStart;
350 /** Log flusher thread. */
351 RTTHREAD hLogFlusherThread;
352 /** Copy of the current work log flusher work item. */
353 VMMLOGFLUSHERENTRY volatile LogFlusherItem;
354 STAMCOUNTER StatLogFlusherFlushes;
355 STAMCOUNTER StatLogFlusherNoWakeUp;
356 /** @} */
357
358 /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */
359 STAMCOUNTER StatRunGC;
360
361 /** Statistics for each of the RC/R0 return codes.
362 * @{ */
363 STAMCOUNTER StatRZRetNormal;
364 STAMCOUNTER StatRZRetInterrupt;
365 STAMCOUNTER StatRZRetInterruptHyper;
366 STAMCOUNTER StatRZRetGuestTrap;
367 STAMCOUNTER StatRZRetRingSwitch;
368 STAMCOUNTER StatRZRetRingSwitchInt;
369 STAMCOUNTER StatRZRetStaleSelector;
370 STAMCOUNTER StatRZRetIRETTrap;
371 STAMCOUNTER StatRZRetEmulate;
372 STAMCOUNTER StatRZRetPatchEmulate;
373 STAMCOUNTER StatRZRetIORead;
374 STAMCOUNTER StatRZRetIOWrite;
375 STAMCOUNTER StatRZRetIOCommitWrite;
376 STAMCOUNTER StatRZRetMMIORead;
377 STAMCOUNTER StatRZRetMMIOWrite;
378 STAMCOUNTER StatRZRetMMIOCommitWrite;
379 STAMCOUNTER StatRZRetMMIOPatchRead;
380 STAMCOUNTER StatRZRetMMIOPatchWrite;
381 STAMCOUNTER StatRZRetMMIOReadWrite;
382 STAMCOUNTER StatRZRetMSRRead;
383 STAMCOUNTER StatRZRetMSRWrite;
384 STAMCOUNTER StatRZRetLDTFault;
385 STAMCOUNTER StatRZRetGDTFault;
386 STAMCOUNTER StatRZRetIDTFault;
387 STAMCOUNTER StatRZRetTSSFault;
388 STAMCOUNTER StatRZRetCSAMTask;
389 STAMCOUNTER StatRZRetSyncCR3;
390 STAMCOUNTER StatRZRetMisc;
391 STAMCOUNTER StatRZRetPatchInt3;
392 STAMCOUNTER StatRZRetPatchPF;
393 STAMCOUNTER StatRZRetPatchGP;
394 STAMCOUNTER StatRZRetPatchIretIRQ;
395 STAMCOUNTER StatRZRetRescheduleREM;
396 STAMCOUNTER StatRZRetToR3Total;
397 STAMCOUNTER StatRZRetToR3FF;
398 STAMCOUNTER StatRZRetToR3Unknown;
399 STAMCOUNTER StatRZRetToR3TMVirt;
400 STAMCOUNTER StatRZRetToR3HandyPages;
401 STAMCOUNTER StatRZRetToR3PDMQueues;
402 STAMCOUNTER StatRZRetToR3Rendezvous;
403 STAMCOUNTER StatRZRetToR3Timer;
404 STAMCOUNTER StatRZRetToR3DMA;
405 STAMCOUNTER StatRZRetToR3CritSect;
406 STAMCOUNTER StatRZRetToR3Iem;
407 STAMCOUNTER StatRZRetToR3Iom;
408 STAMCOUNTER StatRZRetTimerPending;
409 STAMCOUNTER StatRZRetInterruptPending;
410 STAMCOUNTER StatRZRetPATMDuplicateFn;
411 STAMCOUNTER StatRZRetPendingRequest;
412 STAMCOUNTER StatRZRetPGMFlushPending;
413 STAMCOUNTER StatRZRetPatchTPR;
414 /** @} */
415} VMM;
416/** Pointer to VMM. */
417typedef VMM *PVMM;
418
419
420/**
421 * VMMCPU Data (part of VMCPU)
422 */
423typedef struct VMMCPU
424{
425 /** The last RC/R0 return code. */
426 int32_t iLastGZRc;
427 /** Alignment padding. */
428 uint32_t u32Padding0;
429
430 /** @name Rendezvous
431 * @{ */
432 /** Whether the EMT is executing a rendezvous right now. For detecting
433 * attempts at recursive rendezvous. */
434 bool volatile fInRendezvous;
435 bool afPadding1[2];
436 /** @} */
437
438 /** Whether we can HLT in VMMR0 rather than having to return to EM.
439 * Updated by vmR3SetHaltMethodU(). */
440 bool fMayHaltInRing0;
441 /** The minimum delta for which we can HLT in ring-0 for.
442 * The deadlines we can calculate are from TM, so, if it's too close
443 * we should just return to ring-3 and run the timer wheel, no point
444 * in spinning in ring-0.
445 * Updated by vmR3SetHaltMethodU(). */
446 uint32_t cNsSpinBlockThreshold;
447 /** Number of ring-0 halts (used for depreciating following values). */
448 uint32_t cR0Halts;
449 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
450 uint32_t cR0HaltsSucceeded;
451 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
452 uint32_t cR0HaltsToRing3;
453 /** Padding */
454 uint32_t u32Padding2;
455
456 /** @name Raw-mode context tracing data.
457 * @{ */
458 SUPDRVTRACERUSRCTX TracerCtx;
459 /** @} */
460
461 /** @name Ring-0 assertion info for this EMT.
462 * @{ */
463 /** Copy of the ring-0 jmp buffer after an assertion. */
464 VMMR0JMPBUF AssertJmpBuf;
465 /** Copy of the assertion stack. */
466 uint8_t abAssertStack[8192];
467 /** @} */
468
469 /**
470 * Loggers.
471 */
472 union
473 {
474 struct
475 {
476 /** The R0 logger data shared with ring-3. */
477 VMMR3CPULOGGER Logger;
478 /** The R0 release logger data shared with ring-3. */
479 VMMR3CPULOGGER RelLogger;
480 } s;
481 /** Array view. */
482 VMMR3CPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
483 } u;
484
485 STAMPROFILE StatR0HaltBlock;
486 STAMPROFILE StatR0HaltBlockOnTime;
487 STAMPROFILE StatR0HaltBlockOverslept;
488 STAMPROFILE StatR0HaltBlockInsomnia;
489 STAMCOUNTER StatR0HaltExec;
490 STAMCOUNTER StatR0HaltExecFromBlock;
491 STAMCOUNTER StatR0HaltExecFromSpin;
492 STAMCOUNTER StatR0HaltToR3;
493 STAMCOUNTER StatR0HaltToR3FromSpin;
494 STAMCOUNTER StatR0HaltToR3Other;
495 STAMCOUNTER StatR0HaltToR3PendingFF;
496 STAMCOUNTER StatR0HaltToR3SmallDelta;
497 STAMCOUNTER StatR0HaltToR3PostNoInt;
498 STAMCOUNTER StatR0HaltToR3PostPendingFF;
499} VMMCPU;
500AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
501AssertCompile( RTASSERT_OFFSET_OF(VMMCPU, u.s.Logger)
502 == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_REGULAR);
503AssertCompile(RTASSERT_OFFSET_OF(VMMCPU, u.s.RelLogger)
504 == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_RELEASE);
505
506/** Pointer to VMMCPU. */
507typedef VMMCPU *PVMMCPU;
508
509/**
510 * VMM per-VCpu ring-0 only instance data.
511 */
512typedef struct VMMR0PERVCPU
513{
514 /** The EMT hash table index. */
515 uint16_t idxEmtHash;
516 /** Flag indicating whether we've disabled flushing (world switch) or not. */
517 bool fLogFlushingDisabled;
518 bool afPadding1[5];
519 /** Pointer to the VMMR0EntryFast preemption state structure.
520 * This is used to temporarily restore preemption before blocking. */
521 R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState;
522 /** Thread context switching hook (ring-0). */
523 RTTHREADCTXHOOK hCtxHook;
524
525 /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
526 * @note Cannot be put on the stack as the location may change and upset the
527 * validation of resume-after-ring-3-call logic.
528 * @todo This no longer needs to be here now that we don't call ring-3 and mess
529 * around with stack restoring/switching.
530 * @{ */
531 PGVM pGVM;
532 VMCPUID idCpu;
533 VMMR0OPERATION enmOperation;
534 PSUPVMMR0REQHDR pReq;
535 uint64_t u64Arg;
536 PSUPDRVSESSION pSession;
537 /** @} */
538
539 /** @name Ring-0 setjmp / assertion handling.
540 * @{ */
541 /** The ring-0 setjmp buffer. */
542 VMMR0JMPBUF AssertJmpBuf;
543 /** The disable counter. */
544 uint32_t cCallRing3Disabled;
545 uint32_t u32Padding3;
546 /** Ring-0 assertion notification callback. */
547 R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnAssertCallback;
548 /** Argument for pfnRing0AssertionNotificationCallback. */
549 R0PTRTYPE(void *) pvAssertCallbackUser;
550 /** @} */
551
552 /**
553 * Loggers
554 */
555 union
556 {
557 struct
558 {
559 /** The R0 logger data. */
560 VMMR0PERVCPULOGGER Logger;
561 /** The R0 release logger data. */
562 VMMR0PERVCPULOGGER RelLogger;
563 } s;
564 /** Array view. */
565 VMMR0PERVCPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
566 } u;
567} VMMR0PERVCPU;
568AssertCompile( RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.Logger)
569 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_REGULAR);
570AssertCompile(RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.RelLogger)
571 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_RELEASE);
572AssertCompileMemberAlignment(VMMR0PERVCPU, AssertJmpBuf, 64);
573/** Pointer to VMM ring-0 VMCPU instance data. */
574typedef VMMR0PERVCPU *PVMMR0PERVCPU;
575
576/** @name RTLOGGER::u32UserValue1 Flags
577 * @{ */
578/** The magic value. */
579#define VMMR0_LOGGER_FLAGS_MAGIC_VALUE UINT32_C(0x7d297f05)
580/** Part of the flags value used for the magic. */
581#define VMMR0_LOGGER_FLAGS_MAGIC_MASK UINT32_C(0xffffff0f)
582/** @} */
583
584
585/**
586 * VMM data kept in the ring-0 GVM.
587 */
588typedef struct VMMR0PERVM
589{
590 /** Set if vmmR0InitVM has been called. */
591 bool fCalledInitVm;
592 bool afPadding1[7];
593
594 /** @name Logging
595 * @{ */
596 /** Logger (debug) buffer allocation.
597 * This covers all CPUs. */
598 RTR0MEMOBJ hMemObjLogger;
599 /** The ring-3 mapping object for hMemObjLogger. */
600 RTR0MEMOBJ hMapObjLogger;
601
602 /** Release logger buffer allocation.
603 * This covers all CPUs. */
604 RTR0MEMOBJ hMemObjReleaseLogger;
605 /** The ring-3 mapping object for hMemObjReleaseLogger. */
606 RTR0MEMOBJ hMapObjReleaseLogger;
607
608 struct
609 {
610 /** Spinlock protecting the logger ring buffer and associated variables. */
611 R0PTRTYPE(RTSPINLOCK) hSpinlock;
612 /** The log flusher thread handle to make sure there is only one. */
613 RTNATIVETHREAD hThread;
614 /** The handle to the event semaphore the log flusher waits on. */
615 RTSEMEVENT hEvent;
616 /** The index of the log flusher queue head (flusher thread side). */
617 uint32_t volatile idxRingHead;
618 /** The index of the log flusher queue tail (EMT side). */
619 uint32_t volatile idxRingTail;
620 /** Set if the log flusher thread is waiting for work and needs poking. */
621 bool volatile fThreadWaiting;
622 /** Set when the log flusher thread should shut down. */
623 bool volatile fThreadShutdown;
624 /** Indicates that the log flusher thread is running. */
625 bool volatile fThreadRunning;
626 bool afPadding2[5];
627 STAMCOUNTER StatFlushes;
628 STAMCOUNTER StatNoWakeUp;
629 /** Logger ring buffer.
630 * This is for communicating with the log flusher thread. */
631 VMMLOGFLUSHERENTRY aRing[VMM_MAX_CPU_COUNT * 2 /*loggers*/ * 1 /*buffer*/ + 16 /*fudge*/];
632 } LogFlusher;
633 /** @} */
634} VMMR0PERVM;
635
636RT_C_DECLS_BEGIN
637
638int vmmInitFormatTypes(void);
639void vmmTermFormatTypes(void);
640uint32_t vmmGetBuildType(void);
641
642#ifdef IN_RING3
643int vmmR3SwitcherInit(PVM pVM);
644void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
645#endif /* IN_RING3 */
646
647#ifdef IN_RING0
648
649/**
650 * World switcher assembly routine.
651 * It will call VMMRCEntry().
652 *
653 * @returns return code from VMMRCEntry().
654 * @param pVM The cross context VM structure.
655 * @param uArg See VMMRCEntry().
656 * @internal
657 */
658DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
659
660/**
661 * Callback function for vmmR0CallRing3SetJmp.
662 *
663 * @returns VBox status code.
664 * @param pVM The cross context VM structure.
665 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
666 */
667typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP,(PVMCC pVM, PVMCPUCC pVCpu));
668/** Pointer to FNVMMR0SETJMP(). */
669typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
670
671/**
672 * The setjmp variant used for calling Ring-3.
673 *
674 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
675 * in the middle of a ring-3 call. Another differences is the function pointer and
676 * argument. This has to do with resuming code and the stack frame of the caller.
677 *
678 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
679 * @param pJmpBuf The jmp_buf to set.
680 * @param pfn The function to be called when not resuming.
681 * @param pVM The cross context VM structure.
682 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
683 */
684DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
685
686
687/**
688 * Callback function for vmmR0CallRing3SetJmp2.
689 *
690 * @returns VBox status code.
691 * @param pGVM The ring-0 VM structure.
692 * @param idCpu The ID of the calling EMT.
693 */
694typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP2,(PGVM pGVM, VMCPUID idCpu));
695/** Pointer to FNVMMR0SETJMP2(). */
696typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
697
698/**
699 * Same as vmmR0CallRing3SetJmp except for the function signature.
700 *
701 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
702 * @param pJmpBuf The jmp_buf to set.
703 * @param pfn The function to be called when not resuming.
704 * @param pGVM The ring-0 VM structure.
705 * @param idCpu The ID of the calling EMT.
706 */
707DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
708
709
710/**
711 * Callback function for vmmR0CallRing3SetJmpEx.
712 *
713 * @returns VBox status code.
714 * @param pvUser The user argument.
715 */
716typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMPEX,(void *pvUser));
717/** Pointer to FNVMMR0SETJMPEX(). */
718typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
719
720/**
721 * Same as vmmR0CallRing3SetJmp except for the function signature.
722 *
723 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
724 * @param pJmpBuf The jmp_buf to set.
725 * @param pfn The function to be called when not resuming.
726 * @param pvUser The argument of that function.
727 * @param uCallKey Unused call parameter that should be used to help
728 * uniquely identify the call.
729 */
730DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser, uintptr_t uCallKey);
731
732
733/**
734 * Worker for VMMRZCallRing3.
735 * This will save the stack and registers.
736 *
737 * @returns rc.
738 * @param pJmpBuf Pointer to the jump buffer.
739 * @param rc The return code.
740 */
741DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
742
743# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
744int vmmR0TripleFaultHackInit(void);
745void vmmR0TripleFaultHackTerm(void);
746# endif
747
748#endif /* IN_RING0 */
749
750RT_C_DECLS_END
751
752/** @} */
753
754#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette