VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 96622

Last change on this file since 96622 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 27.8 KB
Line 
1/* $Id: VMMInternal.h 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
29#define VMM_INCLUDED_SRC_include_VMMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/sup.h>
36#include <VBox/vmm/stam.h>
37#include <VBox/vmm/vmm.h>
38#include <VBox/param.h>
39#include <VBox/log.h>
40#include <iprt/critsect.h>
41
42#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
43# error "Not in VMM! This is an internal header!"
44#endif
45#if HC_ARCH_BITS == 32
46# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
47#endif
48
49
50
51/** @defgroup grp_vmm_int Internals
52 * @ingroup grp_vmm
53 * @internal
54 * @{
55 */
56
57/** @def VBOX_WITH_RC_RELEASE_LOGGING
58 * Enables RC release logging. */
59#define VBOX_WITH_RC_RELEASE_LOGGING
60
61/** @def VBOX_WITH_R0_LOGGING
62 * Enables Ring-0 logging (non-release).
63 *
64 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
65 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
66 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
67 */
68#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
69# define VBOX_WITH_R0_LOGGING
70#endif
71
72/** @def VBOX_STRICT_VMM_STACK
73 * Enables VMM stack guard pages to catch stack over- and underruns. */
74#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
75# define VBOX_STRICT_VMM_STACK
76#endif
77
78
79/** Number of buffers per logger. */
80#define VMMLOGGER_BUFFER_COUNT 4
81
82/**
83 * R0 logger data (ring-0 only data).
84 */
85typedef struct VMMR0PERVCPULOGGER
86{
87 /** Pointer to the logger instance.
88 * The RTLOGGER::u32UserValue1 member is used for flags and magic, while the
89 * RTLOGGER::u64UserValue2 member is the corresponding PGVMCPU value.
90 * RTLOGGER::u64UserValue3 is currently and set to the PGVMCPU value too. */
91 R0PTRTYPE(PRTLOGGER) pLogger;
92 /** Log buffer descriptor.
93 * The buffer is allocated in a common block for all VCpus, see VMMR0PERVM. */
94 RTLOGBUFFERDESC aBufDescs[VMMLOGGER_BUFFER_COUNT];
95 /** Flag indicating whether we've registered the instance already. */
96 bool fRegistered;
97 /** Set if the EMT is waiting on hEventFlushWait. */
98 bool fEmtWaiting;
99 /** Set while we're inside vmmR0LoggerFlushCommon to prevent recursion. */
100 bool fFlushing;
101 bool afPadding[1];
102 /** Number of buffers currently queued for flushing. */
103 uint32_t volatile cFlushing;
104 /** The event semaphore the EMT waits on while the buffer is being flushed. */
105 RTSEMEVENT hEventFlushWait;
106} VMMR0PERVCPULOGGER;
107/** Pointer to the R0 logger data (ring-0 only). */
108typedef VMMR0PERVCPULOGGER *PVMMR0PERVCPULOGGER;
109
110
111/**
112 * R0 logger data shared with ring-3 (per CPU).
113 */
114typedef struct VMMR3CPULOGGER
115{
116 /** Buffer info. */
117 struct
118 {
119 /** Auxiliary buffer descriptor. */
120 RTLOGBUFFERAUXDESC AuxDesc;
121 /** Ring-3 mapping of the logging buffer. */
122 R3PTRTYPE(char *) pchBufR3;
123 } aBufs[VMMLOGGER_BUFFER_COUNT];
124 /** The current buffer. */
125 uint32_t idxBuf;
126 /** Number of buffers currently queued for flushing (copy of
127 * VMMR0PERVCPULOGGER::cFlushing). */
128 uint32_t volatile cFlushing;
129 /** The buffer size. */
130 uint32_t cbBuf;
131 /** Number of bytes dropped because the flush context didn't allow waiting. */
132 uint32_t cbDropped;
133 STAMCOUNTER StatFlushes;
134 STAMCOUNTER StatCannotBlock;
135 STAMPROFILE StatWait;
136 STAMPROFILE StatRaces;
137 STAMCOUNTER StatRacesToR0;
138} VMMR3CPULOGGER;
139/** Pointer to r0 logger data shared with ring-3. */
140typedef VMMR3CPULOGGER *PVMMR3CPULOGGER;
141
142/** @name Logger indexes for VMMR0PERVCPU::u.aLoggers and VMMCPU::u.aLoggers.
143 * @{ */
144#define VMMLOGGER_IDX_REGULAR 0
145#define VMMLOGGER_IDX_RELEASE 1
146#define VMMLOGGER_IDX_MAX 2
147/** @} */
148
149
150/** Pointer to a ring-0 jump buffer. */
151typedef struct VMMR0JMPBUF *PVMMR0JMPBUF;
152/**
153 * Jump buffer for the setjmp/longjmp like constructs used to
154 * quickly 'call' back into Ring-3.
155 */
156typedef struct VMMR0JMPBUF
157{
158 /** Traditional jmp_buf stuff
159 * @{ */
160#if HC_ARCH_BITS == 32
161 uint32_t ebx;
162 uint32_t esi;
163 uint32_t edi;
164 uint32_t ebp;
165 uint32_t esp;
166 uint32_t eip;
167 uint32_t eflags;
168#endif
169#if HC_ARCH_BITS == 64
170 uint64_t rbx;
171# ifdef RT_OS_WINDOWS
172 uint64_t rsi;
173 uint64_t rdi;
174# endif
175 uint64_t rbp;
176 uint64_t r12;
177 uint64_t r13;
178 uint64_t r14;
179 uint64_t r15;
180 uint64_t rsp;
181 uint64_t rip;
182# ifdef RT_OS_WINDOWS
183 uint128_t xmm6;
184 uint128_t xmm7;
185 uint128_t xmm8;
186 uint128_t xmm9;
187 uint128_t xmm10;
188 uint128_t xmm11;
189 uint128_t xmm12;
190 uint128_t xmm13;
191 uint128_t xmm14;
192 uint128_t xmm15;
193# endif
194 uint64_t rflags;
195#endif
196 /** @} */
197
198 /** RSP/ESP at the time of the stack mirroring (what pvStackBuf starts with). */
199 RTHCUINTREG UnwindSp;
200 /** RSP/ESP at the time of the long jump call. */
201 RTHCUINTREG UnwindRetSp;
202 /** RBP/EBP inside the vmmR0CallRing3LongJmp frame. */
203 RTHCUINTREG UnwindBp;
204 /** RIP/EIP within vmmR0CallRing3LongJmp for assisting unwinding. */
205 RTHCUINTREG UnwindPc;
206 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
207 RTHCUINTREG UnwindRetPcValue;
208 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
209 RTHCUINTREG UnwindRetPcLocation;
210
211 /** The function last being executed here. */
212 RTHCUINTREG pfn;
213 /** The first argument to the function. */
214 RTHCUINTREG pvUser1;
215 /** The second argument to the function. */
216 RTHCUINTREG pvUser2;
217
218 /** Number of valid bytes in pvStackBuf. */
219 uint32_t cbStackValid;
220 /** Size of buffer pvStackBuf points to. */
221 uint32_t cbStackBuf;
222 /** Pointer to buffer for mirroring the stack. Optional. */
223 RTR0PTR pvStackBuf;
224 /** Pointer to a ring-3 accessible jump buffer structure for automatic
225 * mirroring on longjmp. Optional. */
226 R0PTRTYPE(PVMMR0JMPBUF) pMirrorBuf;
227} VMMR0JMPBUF;
228
229
230/**
231 * Log flusher job.
232 *
233 * There is a ring buffer of these in ring-0 (VMMR0PERVM::aLogFlushRing) and a
234 * copy of the current one in the shared VM structure (VMM::LogFlusherItem).
235 */
236typedef union VMMLOGFLUSHERENTRY
237{
238 struct
239 {
240 /** The virtual CPU ID. */
241 uint32_t idCpu : 16;
242 /** The logger: 0 for release, 1 for debug. */
243 uint32_t idxLogger : 8;
244 /** The buffer to be flushed. */
245 uint32_t idxBuffer : 7;
246 /** Set by the flusher thread once it fetched the entry and started
247 * processing it. */
248 uint32_t fProcessing : 1;
249 } s;
250 uint32_t u32;
251} VMMLOGFLUSHERENTRY;
252
253
254/**
255 * VMM Data (part of VM)
256 */
257typedef struct VMM
258{
259 /** Whether we should use the periodic preemption timers. */
260 bool fUsePeriodicPreemptionTimers;
261 /** Alignment padding. */
262 bool afPadding0[7];
263
264#if 0 /* pointless when timers doesn't run on EMT */
265 /** The EMT yield timer. */
266 TMTIMERHANDLE hYieldTimer;
267 /** The period to the next timeout when suspended or stopped.
268 * This is 0 when running. */
269 uint32_t cYieldResumeMillies;
270 /** The EMT yield timer interval (milliseconds). */
271 uint32_t cYieldEveryMillies;
272 /** The timestamp of the previous yield. (nano) */
273 uint64_t u64LastYield;
274#endif
275
276 /** @name EMT Rendezvous
277 * @{ */
278 /** Semaphore to wait on upon entering ordered execution. */
279 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
280 /** Semaphore to wait on upon entering for one-by-one execution. */
281 RTSEMEVENT hEvtRendezvousEnterOneByOne;
282 /** Semaphore to wait on upon entering for all-at-once execution. */
283 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
284 /** Semaphore to wait on when done. */
285 RTSEMEVENTMULTI hEvtMulRendezvousDone;
286 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
287 RTSEMEVENT hEvtRendezvousDoneCaller;
288 /** Semaphore to wait on upon recursing. */
289 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
290 /** Semaphore to wait on after done with recursion (caller restoring state). */
291 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
292 /** Semaphore the initiator waits on while the EMTs are getting into position
293 * on hEvtMulRendezvousRecursionPush. */
294 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
295 /** Semaphore the initiator waits on while the EMTs sitting on
296 * hEvtMulRendezvousRecursionPop wakes up and leave. */
297 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
298 /** Callback. */
299 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
300 /** The user argument for the callback. */
301 RTR3PTR volatile pvRendezvousUser;
302 /** Flags. */
303 volatile uint32_t fRendezvousFlags;
304 /** The number of EMTs that has entered. */
305 volatile uint32_t cRendezvousEmtsEntered;
306 /** The number of EMTs that has done their job. */
307 volatile uint32_t cRendezvousEmtsDone;
308 /** The number of EMTs that has returned. */
309 volatile uint32_t cRendezvousEmtsReturned;
310 /** The status code. */
311 volatile int32_t i32RendezvousStatus;
312 /** Spin lock. */
313 volatile uint32_t u32RendezvousLock;
314 /** The recursion depth. */
315 volatile uint32_t cRendezvousRecursions;
316 /** The number of EMTs that have entered the recursion routine. */
317 volatile uint32_t cRendezvousEmtsRecursingPush;
318 /** The number of EMTs that have leaft the recursion routine. */
319 volatile uint32_t cRendezvousEmtsRecursingPop;
320 /** Triggers rendezvous recursion in the other threads. */
321 volatile bool fRendezvousRecursion;
322
323 /** @} */
324
325 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
326 * release logging purposes. */
327 bool fIsPreemptPendingApiTrusty : 1;
328 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
329 * release logging purposes. */
330 bool fIsPreemptPossible : 1;
331 /** Set if ring-0 uses context hooks. */
332 bool fIsUsingContextHooks : 1;
333
334 bool afAlignment2[2]; /**< Alignment padding. */
335
336 /** Buffer for storing the standard assertion message for a ring-0 assertion.
337 * Used for saving the assertion message text for the release log and guru
338 * meditation dump. */
339 char szRing0AssertMsg1[512];
340 /** Buffer for storing the custom message for a ring-0 assertion. */
341 char szRing0AssertMsg2[256];
342
343 /** @name Logging
344 * @{ */
345 /** Used when setting up ring-0 logger. */
346 uint64_t nsProgramStart;
347 /** Log flusher thread. */
348 RTTHREAD hLogFlusherThread;
349 /** Copy of the current work log flusher work item. */
350 VMMLOGFLUSHERENTRY volatile LogFlusherItem;
351 STAMCOUNTER StatLogFlusherFlushes;
352 STAMCOUNTER StatLogFlusherNoWakeUp;
353 /** @} */
354
355 /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */
356 STAMCOUNTER StatRunGC;
357
358 /** Statistics for each of the RC/R0 return codes.
359 * @{ */
360 STAMCOUNTER StatRZRetNormal;
361 STAMCOUNTER StatRZRetInterrupt;
362 STAMCOUNTER StatRZRetInterruptHyper;
363 STAMCOUNTER StatRZRetGuestTrap;
364 STAMCOUNTER StatRZRetRingSwitch;
365 STAMCOUNTER StatRZRetRingSwitchInt;
366 STAMCOUNTER StatRZRetStaleSelector;
367 STAMCOUNTER StatRZRetIRETTrap;
368 STAMCOUNTER StatRZRetEmulate;
369 STAMCOUNTER StatRZRetPatchEmulate;
370 STAMCOUNTER StatRZRetIORead;
371 STAMCOUNTER StatRZRetIOWrite;
372 STAMCOUNTER StatRZRetIOCommitWrite;
373 STAMCOUNTER StatRZRetMMIORead;
374 STAMCOUNTER StatRZRetMMIOWrite;
375 STAMCOUNTER StatRZRetMMIOCommitWrite;
376 STAMCOUNTER StatRZRetMMIOPatchRead;
377 STAMCOUNTER StatRZRetMMIOPatchWrite;
378 STAMCOUNTER StatRZRetMMIOReadWrite;
379 STAMCOUNTER StatRZRetMSRRead;
380 STAMCOUNTER StatRZRetMSRWrite;
381 STAMCOUNTER StatRZRetLDTFault;
382 STAMCOUNTER StatRZRetGDTFault;
383 STAMCOUNTER StatRZRetIDTFault;
384 STAMCOUNTER StatRZRetTSSFault;
385 STAMCOUNTER StatRZRetCSAMTask;
386 STAMCOUNTER StatRZRetSyncCR3;
387 STAMCOUNTER StatRZRetMisc;
388 STAMCOUNTER StatRZRetPatchInt3;
389 STAMCOUNTER StatRZRetPatchPF;
390 STAMCOUNTER StatRZRetPatchGP;
391 STAMCOUNTER StatRZRetPatchIretIRQ;
392 STAMCOUNTER StatRZRetRescheduleREM;
393 STAMCOUNTER StatRZRetToR3Total;
394 STAMCOUNTER StatRZRetToR3FF;
395 STAMCOUNTER StatRZRetToR3Unknown;
396 STAMCOUNTER StatRZRetToR3TMVirt;
397 STAMCOUNTER StatRZRetToR3HandyPages;
398 STAMCOUNTER StatRZRetToR3PDMQueues;
399 STAMCOUNTER StatRZRetToR3Rendezvous;
400 STAMCOUNTER StatRZRetToR3Timer;
401 STAMCOUNTER StatRZRetToR3DMA;
402 STAMCOUNTER StatRZRetToR3CritSect;
403 STAMCOUNTER StatRZRetToR3Iem;
404 STAMCOUNTER StatRZRetToR3Iom;
405 STAMCOUNTER StatRZRetTimerPending;
406 STAMCOUNTER StatRZRetInterruptPending;
407 STAMCOUNTER StatRZRetPATMDuplicateFn;
408 STAMCOUNTER StatRZRetPendingRequest;
409 STAMCOUNTER StatRZRetPGMFlushPending;
410 STAMCOUNTER StatRZRetPatchTPR;
411 /** @} */
412} VMM;
413/** Pointer to VMM. */
414typedef VMM *PVMM;
415
416
417/**
418 * VMMCPU Data (part of VMCPU)
419 */
420typedef struct VMMCPU
421{
422 /** The last RC/R0 return code. */
423 int32_t iLastGZRc;
424 /** Alignment padding. */
425 uint32_t u32Padding0;
426
427 /** @name Rendezvous
428 * @{ */
429 /** Whether the EMT is executing a rendezvous right now. For detecting
430 * attempts at recursive rendezvous. */
431 bool volatile fInRendezvous;
432 bool afPadding1[2];
433 /** @} */
434
435 /** Whether we can HLT in VMMR0 rather than having to return to EM.
436 * Updated by vmR3SetHaltMethodU(). */
437 bool fMayHaltInRing0;
438 /** The minimum delta for which we can HLT in ring-0 for.
439 * The deadlines we can calculate are from TM, so, if it's too close
440 * we should just return to ring-3 and run the timer wheel, no point
441 * in spinning in ring-0.
442 * Updated by vmR3SetHaltMethodU(). */
443 uint32_t cNsSpinBlockThreshold;
444 /** Number of ring-0 halts (used for depreciating following values). */
445 uint32_t cR0Halts;
446 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
447 uint32_t cR0HaltsSucceeded;
448 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
449 uint32_t cR0HaltsToRing3;
450 /** Padding */
451 uint32_t u32Padding2;
452
453 /** @name Raw-mode context tracing data.
454 * @{ */
455 SUPDRVTRACERUSRCTX TracerCtx;
456 /** @} */
457
458 /** @name Ring-0 assertion info for this EMT.
459 * @{ */
460 /** Copy of the ring-0 jmp buffer after an assertion. */
461 VMMR0JMPBUF AssertJmpBuf;
462 /** Copy of the assertion stack. */
463 uint8_t abAssertStack[8192];
464 /** @} */
465
466 /**
467 * Loggers.
468 */
469 union
470 {
471 struct
472 {
473 /** The R0 logger data shared with ring-3. */
474 VMMR3CPULOGGER Logger;
475 /** The R0 release logger data shared with ring-3. */
476 VMMR3CPULOGGER RelLogger;
477 } s;
478 /** Array view. */
479 VMMR3CPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
480 } u;
481
482 STAMPROFILE StatR0HaltBlock;
483 STAMPROFILE StatR0HaltBlockOnTime;
484 STAMPROFILE StatR0HaltBlockOverslept;
485 STAMPROFILE StatR0HaltBlockInsomnia;
486 STAMCOUNTER StatR0HaltExec;
487 STAMCOUNTER StatR0HaltExecFromBlock;
488 STAMCOUNTER StatR0HaltExecFromSpin;
489 STAMCOUNTER StatR0HaltToR3;
490 STAMCOUNTER StatR0HaltToR3FromSpin;
491 STAMCOUNTER StatR0HaltToR3Other;
492 STAMCOUNTER StatR0HaltToR3PendingFF;
493 STAMCOUNTER StatR0HaltToR3SmallDelta;
494 STAMCOUNTER StatR0HaltToR3PostNoInt;
495 STAMCOUNTER StatR0HaltToR3PostPendingFF;
496} VMMCPU;
497AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
498AssertCompile( RTASSERT_OFFSET_OF(VMMCPU, u.s.Logger)
499 == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_REGULAR);
500AssertCompile(RTASSERT_OFFSET_OF(VMMCPU, u.s.RelLogger)
501 == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_RELEASE);
502
503/** Pointer to VMMCPU. */
504typedef VMMCPU *PVMMCPU;
505
506/**
507 * VMM per-VCpu ring-0 only instance data.
508 */
509typedef struct VMMR0PERVCPU
510{
511 /** The EMT hash table index. */
512 uint16_t idxEmtHash;
513 /** Flag indicating whether we've disabled flushing (world switch) or not. */
514 bool fLogFlushingDisabled;
515 bool afPadding1[5];
516 /** Pointer to the VMMR0EntryFast preemption state structure.
517 * This is used to temporarily restore preemption before blocking. */
518 R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState;
519 /** Thread context switching hook (ring-0). */
520 RTTHREADCTXHOOK hCtxHook;
521
522 /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
523 * @note Cannot be put on the stack as the location may change and upset the
524 * validation of resume-after-ring-3-call logic.
525 * @todo This no longer needs to be here now that we don't call ring-3 and mess
526 * around with stack restoring/switching.
527 * @{ */
528 PGVM pGVM;
529 VMCPUID idCpu;
530 VMMR0OPERATION enmOperation;
531 PSUPVMMR0REQHDR pReq;
532 uint64_t u64Arg;
533 PSUPDRVSESSION pSession;
534 /** @} */
535
536 /** @name Ring-0 setjmp / assertion handling.
537 * @{ */
538 /** The ring-0 setjmp buffer. */
539 VMMR0JMPBUF AssertJmpBuf;
540 /** The disable counter. */
541 uint32_t cCallRing3Disabled;
542 uint32_t u32Padding3;
543 /** Ring-0 assertion notification callback. */
544 R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnAssertCallback;
545 /** Argument for pfnRing0AssertionNotificationCallback. */
546 R0PTRTYPE(void *) pvAssertCallbackUser;
547 /** @} */
548
549 /**
550 * Loggers
551 */
552 union
553 {
554 struct
555 {
556 /** The R0 logger data. */
557 VMMR0PERVCPULOGGER Logger;
558 /** The R0 release logger data. */
559 VMMR0PERVCPULOGGER RelLogger;
560 } s;
561 /** Array view. */
562 VMMR0PERVCPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
563 } u;
564} VMMR0PERVCPU;
565AssertCompile( RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.Logger)
566 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_REGULAR);
567AssertCompile(RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.RelLogger)
568 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_RELEASE);
569AssertCompileMemberAlignment(VMMR0PERVCPU, AssertJmpBuf, 64);
570/** Pointer to VMM ring-0 VMCPU instance data. */
571typedef VMMR0PERVCPU *PVMMR0PERVCPU;
572
573/** @name RTLOGGER::u32UserValue1 Flags
574 * @{ */
575/** The magic value. */
576#define VMMR0_LOGGER_FLAGS_MAGIC_VALUE UINT32_C(0x7d297f05)
577/** Part of the flags value used for the magic. */
578#define VMMR0_LOGGER_FLAGS_MAGIC_MASK UINT32_C(0xffffff0f)
579/** @} */
580
581
582/**
583 * VMM data kept in the ring-0 GVM.
584 */
585typedef struct VMMR0PERVM
586{
587 /** Set if vmmR0InitVM has been called. */
588 bool fCalledInitVm;
589 bool afPadding1[7];
590
591 /** @name Logging
592 * @{ */
593 /** Logger (debug) buffer allocation.
594 * This covers all CPUs. */
595 RTR0MEMOBJ hMemObjLogger;
596 /** The ring-3 mapping object for hMemObjLogger. */
597 RTR0MEMOBJ hMapObjLogger;
598
599 /** Release logger buffer allocation.
600 * This covers all CPUs. */
601 RTR0MEMOBJ hMemObjReleaseLogger;
602 /** The ring-3 mapping object for hMemObjReleaseLogger. */
603 RTR0MEMOBJ hMapObjReleaseLogger;
604
605 struct
606 {
607 /** Spinlock protecting the logger ring buffer and associated variables. */
608 R0PTRTYPE(RTSPINLOCK) hSpinlock;
609 /** The log flusher thread handle to make sure there is only one. */
610 RTNATIVETHREAD hThread;
611 /** The handle to the event semaphore the log flusher waits on. */
612 RTSEMEVENT hEvent;
613 /** The index of the log flusher queue head (flusher thread side). */
614 uint32_t volatile idxRingHead;
615 /** The index of the log flusher queue tail (EMT side). */
616 uint32_t volatile idxRingTail;
617 /** Set if the log flusher thread is waiting for work and needs poking. */
618 bool volatile fThreadWaiting;
619 /** Set when the log flusher thread should shut down. */
620 bool volatile fThreadShutdown;
621 /** Indicates that the log flusher thread is running. */
622 bool volatile fThreadRunning;
623 bool afPadding2[5];
624 STAMCOUNTER StatFlushes;
625 STAMCOUNTER StatNoWakeUp;
626 /** Logger ring buffer.
627 * This is for communicating with the log flusher thread. */
628 VMMLOGFLUSHERENTRY aRing[VMM_MAX_CPU_COUNT * 2 /*loggers*/ * 1 /*buffer*/ + 16 /*fudge*/];
629 } LogFlusher;
630 /** @} */
631} VMMR0PERVM;
632
633RT_C_DECLS_BEGIN
634
635int vmmInitFormatTypes(void);
636void vmmTermFormatTypes(void);
637uint32_t vmmGetBuildType(void);
638
639#ifdef IN_RING3
640int vmmR3SwitcherInit(PVM pVM);
641void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
642#endif /* IN_RING3 */
643
644#ifdef IN_RING0
645
646/**
647 * World switcher assembly routine.
648 * It will call VMMRCEntry().
649 *
650 * @returns return code from VMMRCEntry().
651 * @param pVM The cross context VM structure.
652 * @param uArg See VMMRCEntry().
653 * @internal
654 */
655DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
656
657/**
658 * Callback function for vmmR0CallRing3SetJmp.
659 *
660 * @returns VBox status code.
661 * @param pVM The cross context VM structure.
662 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
663 */
664typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP,(PVMCC pVM, PVMCPUCC pVCpu));
665/** Pointer to FNVMMR0SETJMP(). */
666typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
667
668/**
669 * The setjmp variant used for calling Ring-3.
670 *
671 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
672 * in the middle of a ring-3 call. Another differences is the function pointer and
673 * argument. This has to do with resuming code and the stack frame of the caller.
674 *
675 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
676 * @param pJmpBuf The jmp_buf to set.
677 * @param pfn The function to be called when not resuming.
678 * @param pVM The cross context VM structure.
679 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
680 */
681DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
682
683
684/**
685 * Callback function for vmmR0CallRing3SetJmp2.
686 *
687 * @returns VBox status code.
688 * @param pGVM The ring-0 VM structure.
689 * @param idCpu The ID of the calling EMT.
690 */
691typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP2,(PGVM pGVM, VMCPUID idCpu));
692/** Pointer to FNVMMR0SETJMP2(). */
693typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
694
695/**
696 * Same as vmmR0CallRing3SetJmp except for the function signature.
697 *
698 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
699 * @param pJmpBuf The jmp_buf to set.
700 * @param pfn The function to be called when not resuming.
701 * @param pGVM The ring-0 VM structure.
702 * @param idCpu The ID of the calling EMT.
703 */
704DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
705
706
707/**
708 * Callback function for vmmR0CallRing3SetJmpEx.
709 *
710 * @returns VBox status code.
711 * @param pvUser The user argument.
712 */
713typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMPEX,(void *pvUser));
714/** Pointer to FNVMMR0SETJMPEX(). */
715typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
716
717/**
718 * Same as vmmR0CallRing3SetJmp except for the function signature.
719 *
720 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
721 * @param pJmpBuf The jmp_buf to set.
722 * @param pfn The function to be called when not resuming.
723 * @param pvUser The argument of that function.
724 * @param uCallKey Unused call parameter that should be used to help
725 * uniquely identify the call.
726 */
727DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser, uintptr_t uCallKey);
728
729
730/**
731 * Worker for VMMRZCallRing3.
732 * This will save the stack and registers.
733 *
734 * @returns rc.
735 * @param pJmpBuf Pointer to the jump buffer.
736 * @param rc The return code.
737 */
738DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
739
740# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
741int vmmR0TripleFaultHackInit(void);
742void vmmR0TripleFaultHackTerm(void);
743# endif
744
745#endif /* IN_RING0 */
746
747RT_C_DECLS_END
748
749/** @} */
750
751#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette