VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMInternal.h@ 107044

Last change on this file since 107044 was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.4 KB
Line 
1/* $Id: VMInternal.h 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * VM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_VMInternal_h
29#define VMM_INCLUDED_SRC_include_VMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/vmm/vmapi.h>
36#include <iprt/assert.h>
37#include <iprt/critsect.h>
38#include <iprt/setjmp-without-sigmask.h>
39
40
41
42/** @defgroup grp_vm_int Internals
43 * @ingroup grp_vm
44 * @internal
45 * @{
46 */
47
48
49/**
50 * VM state change callback.
51 */
52typedef struct VMATSTATE
53{
54 /** Pointer to the next one. */
55 struct VMATSTATE *pNext;
56 /** Pointer to the callback. */
57 PFNVMATSTATE pfnAtState;
58 /** The user argument. */
59 void *pvUser;
60} VMATSTATE;
61/** Pointer to a VM state change callback. */
62typedef VMATSTATE *PVMATSTATE;
63
64
65/**
66 * VM error callback.
67 */
68typedef struct VMATERROR
69{
70 /** Pointer to the next one. */
71 struct VMATERROR *pNext;
72 /** Pointer to the callback. */
73 PFNVMATERROR pfnAtError;
74 /** The user argument. */
75 void *pvUser;
76} VMATERROR;
77/** Pointer to a VM error callback. */
78typedef VMATERROR *PVMATERROR;
79
80
81/**
82 * Chunk of memory allocated off the hypervisor heap in which
83 * we copy the error details.
84 */
85typedef struct VMERROR
86{
87 /** The size of the chunk. */
88 uint32_t cbAllocated;
89 /** The current offset into the chunk.
90 * We start by putting the filename and function immediately
91 * after the end of the buffer. */
92 uint32_t off;
93 /** Offset from the start of this structure to the file name. */
94 uint32_t offFile;
95 /** The line number. */
96 uint32_t iLine;
97 /** Offset from the start of this structure to the function name. */
98 uint32_t offFunction;
99 /** Offset from the start of this structure to the formatted message text. */
100 uint32_t offMessage;
101 /** The VBox status code. */
102 int32_t rc;
103} VMERROR, *PVMERROR;
104
105
106/**
107 * VM runtime error callback.
108 */
109typedef struct VMATRUNTIMEERROR
110{
111 /** Pointer to the next one. */
112 struct VMATRUNTIMEERROR *pNext;
113 /** Pointer to the callback. */
114 PFNVMATRUNTIMEERROR pfnAtRuntimeError;
115 /** The user argument. */
116 void *pvUser;
117} VMATRUNTIMEERROR;
118/** Pointer to a VM error callback. */
119typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
120
121
122/**
123 * Chunk of memory allocated off the hypervisor heap in which
124 * we copy the runtime error details.
125 */
126typedef struct VMRUNTIMEERROR
127{
128 /** The size of the chunk. */
129 uint32_t cbAllocated;
130 /** The current offset into the chunk.
131 * We start by putting the error ID immediately
132 * after the end of the buffer. */
133 uint32_t off;
134 /** Offset from the start of this structure to the error ID. */
135 uint32_t offErrorId;
136 /** Offset from the start of this structure to the formatted message text. */
137 uint32_t offMessage;
138 /** Error flags. */
139 uint32_t fFlags;
140} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
141
142/** The halt method. */
143typedef enum
144{
145 /** The usual invalid value. */
146 VMHALTMETHOD_INVALID = 0,
147 /** Use the method used during bootstrapping. */
148 VMHALTMETHOD_BOOTSTRAP,
149 /** Use the default method. */
150 VMHALTMETHOD_DEFAULT,
151 /** The old spin/yield/block method. */
152 VMHALTMETHOD_OLD,
153 /** The first go at a block/spin method. */
154 VMHALTMETHOD_1,
155 /** The first go at a more global approach. */
156 VMHALTMETHOD_GLOBAL_1,
157 /** The end of valid methods. (not inclusive of course) */
158 VMHALTMETHOD_END,
159 /** The usual 32-bit max value. */
160 VMHALTMETHOD_32BIT_HACK = 0x7fffffff
161} VMHALTMETHOD;
162
163
164/**
165 * VM Internal Data (part of the VM structure).
166 *
167 * @todo Move this and all related things to VMM. The VM component was, to some
168 * extent at least, a bad ad hoc design which should all have been put in
169 * VMM. @see pg_vm.
170 */
171typedef struct VMINT
172{
173 /** VM Error Message. */
174 R3PTRTYPE(PVMERROR) pErrorR3;
175 /** VM Runtime Error Message. */
176 R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
177 /** The VM was/is-being teleported and has not yet been fully resumed. */
178 bool fTeleportedAndNotFullyResumedYet;
179 /** The VM should power off instead of reset. */
180 bool fPowerOffInsteadOfReset;
181 /** Reset counter (soft + hard). */
182 uint32_t cResets;
183 /** Hard reset counter. */
184 uint32_t cHardResets;
185 /** Soft reset counter. */
186 uint32_t cSoftResets;
187} VMINT;
188/** Pointer to the VM Internal Data (part of the VM structure). */
189typedef VMINT *PVMINT;
190
191
192#ifdef IN_RING3
193
194/**
195 * VM internal data kept in the UVM.
196 */
197typedef struct VMINTUSERPERVM
198{
199 /** Head of the standard request queue. Atomic. */
200 volatile PVMREQ pNormalReqs;
201 /** Head of the priority request queue. Atomic. */
202 volatile PVMREQ pPriorityReqs;
203 /** The last index used during alloc/free. */
204 volatile uint32_t iReqFree;
205 /** Number of free request packets. */
206 volatile uint32_t cReqFree;
207 /** Array of pointers to lists of free request packets. Atomic. */
208 volatile PVMREQ apReqFree[16 - (HC_ARCH_BITS == 32 ? 5 : 4)];
209
210 /** The reference count of the UVM handle. */
211 volatile uint32_t cUvmRefs;
212
213 /** Number of active EMTs. */
214 volatile uint32_t cActiveEmts;
215
216# ifdef VBOX_WITH_STATISTICS
217# if HC_ARCH_BITS == 32
218 uint32_t uPadding;
219# endif
220 /** Number of VMR3ReqAlloc returning a new packet. */
221 STAMCOUNTER StatReqAllocNew;
222 /** Number of VMR3ReqAlloc causing races. */
223 STAMCOUNTER StatReqAllocRaces;
224 /** Number of VMR3ReqAlloc returning a recycled packet. */
225 STAMCOUNTER StatReqAllocRecycled;
226 /** Number of VMR3ReqFree calls. */
227 STAMCOUNTER StatReqFree;
228 /** Number of times the request was actually freed. */
229 STAMCOUNTER StatReqFreeOverflow;
230 /** Number of requests served. */
231 STAMCOUNTER StatReqProcessed;
232 /** Number of times there are more than one request and the others needed to be
233 * pushed back onto the list. */
234 STAMCOUNTER StatReqMoreThan1;
235 /** Number of times we've raced someone when pushing the other requests back
236 * onto the list. */
237 STAMCOUNTER StatReqPushBackRaces;
238# endif
239
240 /** Pointer to the support library session.
241 * Mainly for creation and destruction. */
242 PSUPDRVSESSION pSession;
243
244 /** Force EMT to terminate. */
245 bool volatile fTerminateEMT;
246
247 /** Critical section for pAtState and enmPrevVMState. */
248 RTCRITSECT AtStateCritSect;
249 /** List of registered state change callbacks. */
250 PVMATSTATE pAtState;
251 /** List of registered state change callbacks. */
252 PVMATSTATE *ppAtStateNext;
253 /** The previous VM state.
254 * This is mainly used for the 'Resetting' state, but may come in handy later
255 * and when debugging. */
256 VMSTATE enmPrevVMState;
257
258 /** Reason for the most recent suspend operation. */
259 VMSUSPENDREASON enmSuspendReason;
260 /** Reason for the most recent operation. */
261 VMRESUMEREASON enmResumeReason;
262
263 /** Critical section for pAtError and pAtRuntimeError. */
264 RTCRITSECT AtErrorCritSect;
265
266 /** List of registered error callbacks. */
267 PVMATERROR pAtError;
268 /** List of registered error callbacks. */
269 PVMATERROR *ppAtErrorNext;
270 /** The error message count.
271 * This is incremented every time an error is raised. */
272 uint32_t volatile cErrors;
273
274 /** The runtime error message count.
275 * This is incremented every time a runtime error is raised. */
276 uint32_t volatile cRuntimeErrors;
277 /** List of registered error callbacks. */
278 PVMATRUNTIMEERROR pAtRuntimeError;
279 /** List of registered error callbacks. */
280 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
281
282 /** @name Generic Halt data
283 * @{
284 */
285 /** The current halt method.
286 * Can be selected by CFGM option 'VM/HaltMethod'. */
287 VMHALTMETHOD enmHaltMethod;
288 /** The index into g_aHaltMethods of the current halt method. */
289 uint32_t volatile iHaltMethod;
290 /** @} */
291
292 /** @todo Do NOT add new members here or reuse the current, we need to store the config for
293 * each halt method separately because we're racing on SMP guest rigs. */
294 union
295 {
296 /**
297 * Method 1 & 2 - Block whenever possible, and when lagging behind
298 * switch to spinning with regular blocking every 5-200ms (defaults)
299 * depending on the accumulated lag. The blocking interval is adjusted
300 * with the average oversleeping of the last 64 times.
301 *
302 * The difference between 1 and 2 is that we use native absolute
303 * time APIs for the blocking instead of the millisecond based IPRT
304 * interface.
305 */
306 struct
307 {
308 /** The max interval without blocking (when spinning). */
309 uint32_t u32MinBlockIntervalCfg;
310 /** The minimum interval between blocking (when spinning). */
311 uint32_t u32MaxBlockIntervalCfg;
312 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
313 uint32_t u32LagBlockIntervalDivisorCfg;
314 /** When to start spinning (lag / nano secs). */
315 uint32_t u32StartSpinningCfg;
316 /** When to stop spinning (lag / nano secs). */
317 uint32_t u32StopSpinningCfg;
318 } Method12;
319
320 /**
321 * The GVMM manages halted and waiting EMTs.
322 */
323 struct
324 {
325 /** The threshold between spinning and blocking. */
326 uint32_t cNsSpinBlockThresholdCfg;
327 } Global1;
328 } Halt;
329
330 /** Pointer to the DBGC instance data. */
331 void *pvDBGC;
332
333 /** TLS index for the VMINTUSERPERVMCPU pointer. */
334 RTTLS idxTLS;
335
336 /** The VM name. (Set after the config constructure has been called.) */
337 char *pszName;
338 /** The VM UUID. (Set after the config constructure has been called.) */
339 RTUUID Uuid;
340} VMINTUSERPERVM;
341# ifdef VBOX_WITH_STATISTICS
342AssertCompileMemberAlignment(VMINTUSERPERVM, StatReqAllocNew, 8);
343# endif
344
345/** Pointer to the VM internal data kept in the UVM. */
346typedef VMINTUSERPERVM *PVMINTUSERPERVM;
347
348
349/**
350 * VMCPU internal data kept in the UVM.
351 *
352 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
353 */
354typedef struct VMINTUSERPERVMCPU
355{
356 /** Head of the normal request queue. Atomic. */
357 volatile PVMREQ pNormalReqs;
358 /** Head of the priority request queue. Atomic. */
359 volatile PVMREQ pPriorityReqs;
360
361 /** The handle to the EMT thread. */
362 RTTHREAD ThreadEMT;
363 /** The native of the EMT thread. */
364 RTNATIVETHREAD NativeThreadEMT;
365 /** Wait event semaphore. */
366 RTSEMEVENT EventSemWait;
367 /** Wait/Idle indicator. */
368 bool volatile fWait;
369 /** Set if we've been thru vmR3Destroy and decremented the active EMT count
370 * already. */
371 bool volatile fBeenThruVmDestroy;
372 /** Align the next bit. */
373 bool afAlignment[HC_ARCH_BITS == 32 ? 2 : 6];
374
375 /** @name Generic Halt data
376 * @{
377 */
378 /** The average time (ns) between two halts in the last second. (updated once per second) */
379 uint32_t HaltInterval;
380 /** The average halt frequency for the last second. (updated once per second) */
381 uint32_t HaltFrequency;
382 /** The number of halts in the current period. */
383 uint32_t cHalts;
384 uint32_t padding; /**< alignment padding. */
385 /** When we started counting halts in cHalts (RTTimeNanoTS). */
386 uint64_t u64HaltsStartTS;
387 /** @} */
388
389 /** Union containing data and config for the different halt algorithms. */
390 union
391 {
392 /**
393 * Method 1 & 2 - Block whenever possible, and when lagging behind
394 * switch to spinning with regular blocking every 5-200ms (defaults)
395 * depending on the accumulated lag. The blocking interval is adjusted
396 * with the average oversleeping of the last 64 times.
397 *
398 * The difference between 1 and 2 is that we use native absolute
399 * time APIs for the blocking instead of the millisecond based IPRT
400 * interface.
401 */
402 struct
403 {
404 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
405 uint32_t cBlocks;
406 /** Align the next member. */
407 uint32_t u32Alignment;
408 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
409 uint64_t cNSBlockedTooLongAvg;
410 /** Total time spend oversleeping when blocking. */
411 uint64_t cNSBlockedTooLong;
412 /** Total time spent blocking. */
413 uint64_t cNSBlocked;
414 /** The timestamp (RTTimeNanoTS) of the last block. */
415 uint64_t u64LastBlockTS;
416
417 /** When we started spinning relentlessly in order to catch up some of the oversleeping.
418 * This is 0 when we're not spinning. */
419 uint64_t u64StartSpinTS;
420 } Method12;
421
422# if 0
423 /**
424 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
425 * sprinkle it with yields.
426 */
427 struct
428 {
429 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
430 uint32_t cBlocks;
431 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
432 uint64_t cBlockedTooLongNSAvg;
433 /** Total time spend oversleeping when blocking. */
434 uint64_t cBlockedTooLongNS;
435 /** Total time spent blocking. */
436 uint64_t cBlockedNS;
437 /** The timestamp (RTTimeNanoTS) of the last block. */
438 uint64_t u64LastBlockTS;
439
440 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
441 uint32_t cYields;
442 /** Avg. time spend oversleeping when yielding. */
443 uint32_t cYieldTooLongNSAvg;
444 /** Total time spend oversleeping when yielding. */
445 uint64_t cYieldTooLongNS;
446 /** Total time spent yielding. */
447 uint64_t cYieldedNS;
448 /** The timestamp (RTTimeNanoTS) of the last block. */
449 uint64_t u64LastYieldTS;
450
451 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
452 uint64_t u64StartSpinTS;
453 } Method34;
454# endif
455 } Halt;
456
457 /** Profiling the halted state; yielding vs blocking.
458 * @{ */
459 STAMPROFILE StatHaltYield;
460 STAMPROFILE StatHaltBlock;
461 STAMPROFILE StatHaltBlockOverslept;
462 STAMPROFILE StatHaltBlockInsomnia;
463 STAMPROFILE StatHaltBlockOnTime;
464 STAMPROFILE StatHaltTimers;
465 STAMPROFILE StatHaltPoll;
466 /** @} */
467} VMINTUSERPERVMCPU;
468AssertCompileMemberAlignment(VMINTUSERPERVMCPU, u64HaltsStartTS, 8);
469AssertCompileMemberAlignment(VMINTUSERPERVMCPU, Halt.Method12.cNSBlockedTooLongAvg, 8);
470AssertCompileMemberAlignment(VMINTUSERPERVMCPU, StatHaltYield, 8);
471
472/** Pointer to the VM internal data kept in the UVM. */
473typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
474
475#endif /* IN_RING3 */
476
477RT_C_DECLS_BEGIN
478
479DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
480int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
481DECLCALLBACK(int) vmR3Destroy(PVM pVM);
482DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
483void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
484DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage);
485DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa);
486void vmSetRuntimeErrorCopy(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va);
487void vmR3SetTerminated(PVM pVM);
488
489RT_C_DECLS_END
490
491
492/** @} */
493
494#endif /* !VMM_INCLUDED_SRC_include_VMInternal_h */
495
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette