VirtualBox

source: vbox/trunk/include/VBox/vmm/vmm.h@ 93963

Last change on this file since 93963 was 93716, checked in by vboxsync, 3 years ago

VMM/PGM: Moved the physical handler allocation off the hyper heap and into its own slab, changing the it to the 'hardened' avl tree code. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.4 KB
Line 
1/** @file
2 * VMM - The Virtual Machine Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2022 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_vmm_h
27#define VBOX_INCLUDED_vmm_vmm_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/types.h>
33#include <VBox/vmm/vmapi.h>
34#include <VBox/sup.h>
35#include <VBox/log.h>
36#include <iprt/stdarg.h>
37#include <iprt/thread.h>
38
39RT_C_DECLS_BEGIN
40
41/** @defgroup grp_vmm The Virtual Machine Monitor
42 * @{
43 */
44
45/** @defgroup grp_vmm_api The Virtual Machine Monitor API
46 * @{
47 */
48
49
50/**
51 * Ring-0 assertion notification callback.
52 *
53 * @returns VBox status code.
54 * @param pVCpu The cross context virtual CPU structure.
55 * @param pvUser The user argument.
56 */
57typedef DECLCALLBACKTYPE(int, FNVMMR0ASSERTIONNOTIFICATION,(PVMCPUCC pVCpu, void *pvUser));
58/** Pointer to a FNVMMR0ASSERTIONNOTIFICATION(). */
59typedef FNVMMR0ASSERTIONNOTIFICATION *PFNVMMR0ASSERTIONNOTIFICATION;
60
61/**
62 * Rendezvous callback.
63 *
64 * @returns VBox strict status code - EM scheduling. Do not return
65 * informational status code other than the ones used by EM for
66 * scheduling.
67 *
68 * @param pVM The cross context VM structure.
69 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
70 * @param pvUser The user argument.
71 */
72typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMMEMTRENDEZVOUS,(PVM pVM, PVMCPU pVCpu, void *pvUser));
73/** Pointer to a rendezvous callback function. */
74typedef FNVMMEMTRENDEZVOUS *PFNVMMEMTRENDEZVOUS;
75
76/**
77 * Method table that the VMM uses to call back the user of the VMM.
78 */
79typedef struct VMM2USERMETHODS
80{
81 /** Magic value (VMM2USERMETHODS_MAGIC). */
82 uint32_t u32Magic;
83 /** Structure version (VMM2USERMETHODS_VERSION). */
84 uint32_t u32Version;
85
86 /**
87 * Save the VM state.
88 *
89 * @returns VBox status code.
90 * @param pThis Pointer to the callback method table.
91 * @param pUVM The user mode VM handle.
92 *
93 * @remarks This member shall be set to NULL if the operation is not
94 * supported.
95 */
96 DECLR3CALLBACKMEMBER(int, pfnSaveState,(PCVMM2USERMETHODS pThis, PUVM pUVM));
97 /** @todo Move pfnVMAtError and pfnCFGMConstructor here? */
98
99 /**
100 * EMT initialization notification callback.
101 *
102 * This is intended for doing per-thread initialization for EMTs (like COM
103 * init).
104 *
105 * @param pThis Pointer to the callback method table.
106 * @param pUVM The user mode VM handle.
107 * @param pUVCpu The user mode virtual CPU handle.
108 *
109 * @remarks This is optional and shall be set to NULL if not wanted.
110 */
111 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
112
113 /**
114 * EMT termination notification callback.
115 *
116 * This is intended for doing per-thread cleanups for EMTs (like COM).
117 *
118 * @param pThis Pointer to the callback method table.
119 * @param pUVM The user mode VM handle.
120 * @param pUVCpu The user mode virtual CPU handle.
121 *
122 * @remarks This is optional and shall be set to NULL if not wanted.
123 */
124 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
125
126 /**
127 * PDM thread initialization notification callback.
128 *
129 * This is intended for doing per-thread initialization (like COM init).
130 *
131 * @param pThis Pointer to the callback method table.
132 * @param pUVM The user mode VM handle.
133 *
134 * @remarks This is optional and shall be set to NULL if not wanted.
135 */
136 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM));
137
138 /**
139 * EMT termination notification callback.
140 *
141 * This is intended for doing per-thread cleanups for EMTs (like COM).
142 *
143 * @param pThis Pointer to the callback method table.
144 * @param pUVM The user mode VM handle.
145 *
146 * @remarks This is optional and shall be set to NULL if not wanted.
147 */
148 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM));
149
150 /**
151 * Notification callback that that a VM reset will be turned into a power off.
152 *
153 * @param pThis Pointer to the callback method table.
154 * @param pUVM The user mode VM handle.
155 *
156 * @remarks This is optional and shall be set to NULL if not wanted.
157 */
158 DECLR3CALLBACKMEMBER(void, pfnNotifyResetTurnedIntoPowerOff,(PCVMM2USERMETHODS pThis, PUVM pUVM));
159
160 /**
161 * Generic object query by UUID.
162 *
163 * @returns pointer to queried the object on success, NULL if not found.
164 *
165 * @param pThis Pointer to the callback method table.
166 * @param pUVM The user mode VM handle.
167 * @param pUuid The UUID of what's being queried. The UUIDs and the
168 * usage conventions are defined by the user.
169 *
170 * @remarks This is optional and shall be set to NULL if not wanted.
171 */
172 DECLR3CALLBACKMEMBER(void *, pfnQueryGenericObject,(PCVMM2USERMETHODS pThis, PUVM pUVM, PCRTUUID pUuid));
173
174 /** Magic value (VMM2USERMETHODS_MAGIC) marking the end of the structure. */
175 uint32_t u32EndMagic;
176} VMM2USERMETHODS;
177
178/** Magic value of the VMM2USERMETHODS (Franz Kafka). */
179#define VMM2USERMETHODS_MAGIC UINT32_C(0x18830703)
180/** The VMM2USERMETHODS structure version. */
181#define VMM2USERMETHODS_VERSION UINT32_C(0x00030000)
182
183
184/**
185 * Checks whether we've armed the ring-0 long jump machinery.
186 *
187 * @returns @c true / @c false
188 * @param a_pVCpu The caller's cross context virtual CPU structure.
189 * @thread EMT
190 * @sa VMMR0IsLongJumpArmed
191 */
192#ifdef IN_RING0
193# define VMMIsLongJumpArmed(a_pVCpu) VMMR0IsLongJumpArmed(a_pVCpu)
194#else
195# define VMMIsLongJumpArmed(a_pVCpu) (false)
196#endif
197
198
199VMMDECL(VMCPUID) VMMGetCpuId(PVMCC pVM);
200VMMDECL(PVMCPUCC) VMMGetCpu(PVMCC pVM);
201VMMDECL(PVMCPUCC) VMMGetCpu0(PVMCC pVM);
202VMMDECL(PVMCPUCC) VMMGetCpuById(PVMCC pVM, VMCPUID idCpu);
203VMMR3DECL(PVMCPUCC) VMMR3GetCpuByIdU(PUVM pVM, VMCPUID idCpu);
204VMM_INT_DECL(uint32_t) VMMGetSvnRev(void);
205VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void);
206
207
208/** @defgroup grp_vmm_api_r0 The VMM Host Context Ring 0 API
209 * @{
210 */
211
212/**
213 * The VMMR0Entry() codes.
214 */
215typedef enum VMMR0OPERATION
216{
217 /** Run guest code using the available hardware acceleration technology. */
218 VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
219 /** Official NOP that we use for profiling. */
220 VMMR0_DO_NEM_RUN = SUP_VMMR0_DO_NEM_RUN,
221 /** Official NOP that we use for profiling. */
222 VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
223 /** Official slow iocl NOP that we use for profiling. */
224 VMMR0_DO_SLOW_NOP,
225
226 /** Ask the GVMM to create a new VM. */
227 VMMR0_DO_GVMM_CREATE_VM = 32,
228 /** Ask the GVMM to destroy the VM. */
229 VMMR0_DO_GVMM_DESTROY_VM,
230 /** Call GVMMR0RegisterVCpu(). */
231 VMMR0_DO_GVMM_REGISTER_VMCPU,
232 /** Call GVMMR0DeregisterVCpu(). */
233 VMMR0_DO_GVMM_DEREGISTER_VMCPU,
234 /** Call GVMMR0RegisterWorkerThread(). */
235 VMMR0_DO_GVMM_REGISTER_WORKER_THREAD,
236 /** Call GVMMR0DeregisterWorkerThread(). */
237 VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD,
238 /** Call GVMMR0SchedHalt(). */
239 VMMR0_DO_GVMM_SCHED_HALT,
240 /** Call GVMMR0SchedWakeUp(). */
241 VMMR0_DO_GVMM_SCHED_WAKE_UP,
242 /** Call GVMMR0SchedPoke(). */
243 VMMR0_DO_GVMM_SCHED_POKE,
244 /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
245 VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
246 /** Call GVMMR0SchedPoll(). */
247 VMMR0_DO_GVMM_SCHED_POLL,
248 /** Call GVMMR0QueryStatistics(). */
249 VMMR0_DO_GVMM_QUERY_STATISTICS,
250 /** Call GVMMR0ResetStatistics(). */
251 VMMR0_DO_GVMM_RESET_STATISTICS,
252
253 /** Call VMMR0 Per VM Init. */
254 VMMR0_DO_VMMR0_INIT = 64,
255 /** Call VMMR0 Per VM EMT Init */
256 VMMR0_DO_VMMR0_INIT_EMT,
257 /** Call VMMR0 Per VM Termination. */
258 VMMR0_DO_VMMR0_TERM,
259 /** Copy logger settings from userland, VMMR0UpdateLoggersReq(). */
260 VMMR0_DO_VMMR0_UPDATE_LOGGERS,
261 /** Used by the log flusher, VMMR0LogFlusher. */
262 VMMR0_DO_VMMR0_LOG_FLUSHER,
263 /** Used by EMTs to wait for the log flusher to finish, VMMR0LogWaitFlushed. */
264 VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED,
265
266 /** Setup hardware-assisted VM session. */
267 VMMR0_DO_HM_SETUP_VM = 128,
268 /** Attempt to enable or disable hardware-assisted mode. */
269 VMMR0_DO_HM_ENABLE,
270
271 /** Call PGMR0PhysAllocateHandyPages(). */
272 VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
273 /** Call PGMR0PhysFlushHandyPages(). */
274 VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
275 /** Call PGMR0AllocateLargePage(). */
276 VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE,
277 /** Call PGMR0PhysSetupIommu(). */
278 VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
279 /** Call PGMR0PoolGrow(). */
280 VMMR0_DO_PGM_POOL_GROW,
281 /** Call PGMR0PhysHandlerInitReqHandler(). */
282 VMMR0_DO_PGM_PHYS_HANDLER_INIT,
283
284 /** Call GMMR0InitialReservation(). */
285 VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
286 /** Call GMMR0UpdateReservation(). */
287 VMMR0_DO_GMM_UPDATE_RESERVATION,
288 /** Call GMMR0AllocatePages(). */
289 VMMR0_DO_GMM_ALLOCATE_PAGES,
290 /** Call GMMR0FreePages(). */
291 VMMR0_DO_GMM_FREE_PAGES,
292 /** Call GMMR0FreeLargePage(). */
293 VMMR0_DO_GMM_FREE_LARGE_PAGE,
294 /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
295 VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
296 /** Call GMMR0QueryMemoryStatsReq(). */
297 VMMR0_DO_GMM_QUERY_MEM_STATS,
298 /** Call GMMR0BalloonedPages(). */
299 VMMR0_DO_GMM_BALLOONED_PAGES,
300 /** Call GMMR0MapUnmapChunk(). */
301 VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
302 /** Call GMMR0RegisterSharedModule. */
303 VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
304 /** Call GMMR0UnregisterSharedModule. */
305 VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
306 /** Call GMMR0ResetSharedModules. */
307 VMMR0_DO_GMM_RESET_SHARED_MODULES,
308 /** Call GMMR0CheckSharedModules. */
309 VMMR0_DO_GMM_CHECK_SHARED_MODULES,
310 /** Call GMMR0FindDuplicatePage. */
311 VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
312 /** Call GMMR0QueryStatistics(). */
313 VMMR0_DO_GMM_QUERY_STATISTICS,
314 /** Call GMMR0ResetStatistics(). */
315 VMMR0_DO_GMM_RESET_STATISTICS,
316
317 /** Call PDMR0DriverCallReqHandler. */
318 VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
319 /** Call PDMR0DeviceCreateReqHandler. */
320 VMMR0_DO_PDM_DEVICE_CREATE,
321 /** Call PDMR0DeviceGenCallReqHandler. */
322 VMMR0_DO_PDM_DEVICE_GEN_CALL,
323 /** Old style device compat: Set ring-0 critical section. */
324 VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT,
325 /** Call PDMR0QueueCreateReqHandler. */
326 VMMR0_DO_PDM_QUEUE_CREATE,
327
328 /** Set a GVMM or GMM configuration value. */
329 VMMR0_DO_GCFGM_SET_VALUE = 400,
330 /** Query a GVMM or GMM configuration value. */
331 VMMR0_DO_GCFGM_QUERY_VALUE,
332
333 /** The start of the R0 service operations. */
334 VMMR0_DO_SRV_START = 448,
335 /** Call IntNetR0Open(). */
336 VMMR0_DO_INTNET_OPEN,
337 /** Call IntNetR0IfClose(). */
338 VMMR0_DO_INTNET_IF_CLOSE,
339 /** Call IntNetR0IfGetBufferPtrs(). */
340 VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
341 /** Call IntNetR0IfSetPromiscuousMode(). */
342 VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
343 /** Call IntNetR0IfSetMacAddress(). */
344 VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
345 /** Call IntNetR0IfSetActive(). */
346 VMMR0_DO_INTNET_IF_SET_ACTIVE,
347 /** Call IntNetR0IfSend(). */
348 VMMR0_DO_INTNET_IF_SEND,
349 /** Call IntNetR0IfWait(). */
350 VMMR0_DO_INTNET_IF_WAIT,
351 /** Call IntNetR0IfAbortWait(). */
352 VMMR0_DO_INTNET_IF_ABORT_WAIT,
353
354#if 0
355 /** Forward call to the PCI driver */
356 VMMR0_DO_PCIRAW_REQ = 512,
357#endif
358
359 /** The end of the R0 service operations. */
360 VMMR0_DO_SRV_END,
361
362 /** Call NEMR0InitVM() (host specific). */
363 VMMR0_DO_NEM_INIT_VM = 576,
364 /** Call NEMR0InitVMPart2() (host specific). */
365 VMMR0_DO_NEM_INIT_VM_PART_2,
366 /** Call NEMR0MapPages() (host specific). */
367 VMMR0_DO_NEM_MAP_PAGES,
368 /** Call NEMR0UnmapPages() (host specific). */
369 VMMR0_DO_NEM_UNMAP_PAGES,
370 /** Call NEMR0ExportState() (host specific). */
371 VMMR0_DO_NEM_EXPORT_STATE,
372 /** Call NEMR0ImportState() (host specific). */
373 VMMR0_DO_NEM_IMPORT_STATE,
374 /** Call NEMR0QueryCpuTick() (host specific). */
375 VMMR0_DO_NEM_QUERY_CPU_TICK,
376 /** Call NEMR0ResumeCpuTickOnAll() (host specific). */
377 VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL,
378 /** Call NEMR0UpdateStatistics() (host specific). */
379 VMMR0_DO_NEM_UPDATE_STATISTICS,
380 /** Call NEMR0DoExperiment() (host specific, experimental, debug only). */
381 VMMR0_DO_NEM_EXPERIMENT,
382
383 /** Grow the I/O port registration tables. */
384 VMMR0_DO_IOM_GROW_IO_PORTS = 640,
385 /** Grow the I/O port statistics tables. */
386 VMMR0_DO_IOM_GROW_IO_PORT_STATS,
387 /** Grow the MMIO registration tables. */
388 VMMR0_DO_IOM_GROW_MMIO_REGS,
389 /** Grow the MMIO statistics tables. */
390 VMMR0_DO_IOM_GROW_MMIO_STATS,
391 /** Synchronize statistics indices for I/O ports and MMIO regions. */
392 VMMR0_DO_IOM_SYNC_STATS_INDICES,
393
394 /** Call DBGFR0TraceCreateReqHandler. */
395 VMMR0_DO_DBGF_TRACER_CREATE = 704,
396 /** Call DBGFR0TraceCallReqHandler. */
397 VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER,
398 /** Call DBGFR0BpInitReqHandler(). */
399 VMMR0_DO_DBGF_BP_INIT,
400 /** Call DBGFR0BpChunkAllocReqHandler(). */
401 VMMR0_DO_DBGF_BP_CHUNK_ALLOC,
402 /** Call DBGFR0BpL2TblChunkAllocReqHandler(). */
403 VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC,
404 /** Call DBGFR0BpOwnerInitReqHandler(). */
405 VMMR0_DO_DBGF_BP_OWNER_INIT,
406 /** Call DBGFR0BpPortIoInitReqHandler(). */
407 VMMR0_DO_DBGF_BP_PORTIO_INIT,
408
409 /** Grow a timer queue. */
410 VMMR0_DO_TM_GROW_TIMER_QUEUE = 768,
411
412 /** Official call we use for testing Ring-0 APIs. */
413 VMMR0_DO_TESTS = 2048,
414
415 /** The usual 32-bit type blow up. */
416 VMMR0_DO_32BIT_HACK = 0x7fffffff
417} VMMR0OPERATION;
418
419
420/**
421 * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
422 * @todo Move got GCFGM.h when it's implemented.
423 */
424typedef struct GCFGMVALUEREQ
425{
426 /** The request header.*/
427 SUPVMMR0REQHDR Hdr;
428 /** The support driver session handle. */
429 PSUPDRVSESSION pSession;
430 /** The value.
431 * This is input for the set request and output for the query. */
432 uint64_t u64Value;
433 /** The variable name.
434 * This is fixed sized just to make things simple for the mock-up. */
435 char szName[48];
436} GCFGMVALUEREQ;
437/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
438 * @todo Move got GCFGM.h when it's implemented.
439 */
440typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
441
442
443/**
444 * Request package for VMMR0_DO_VMMR0_UPDATE_LOGGERS.
445 *
446 * In addition the u64Arg selects the logger sets: @c false for debug, @c true
447 * for release.
448 */
449typedef struct VMMR0UPDATELOGGERSREQ
450{
451 /** The request header. */
452 SUPVMMR0REQHDR Hdr;
453 /** The current logger flags (RTLOGFLAGS). */
454 uint64_t fFlags;
455 /** Groups, assuming same group layout as ring-3. */
456 uint32_t cGroups;
457 /** CRC32 of the group names. */
458 uint32_t uGroupCrc32;
459 /** Per-group settings, variable size. */
460 RT_FLEXIBLE_ARRAY_EXTENSION
461 uint32_t afGroups[RT_FLEXIBLE_ARRAY];
462} VMMR0UPDATELOGGERSREQ;
463/** Pointer to a VMMR0_DO_VMMR0_UPDATE_LOGGERS request. */
464typedef VMMR0UPDATELOGGERSREQ *PVMMR0UPDATELOGGERSREQ;
465
466#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
467
468/**
469 * Structure VMMR0EmtPrepareToBlock uses to pass info to
470 * VMMR0EmtResumeAfterBlocking.
471 */
472typedef struct VMMR0EMTBLOCKCTX
473{
474 /** Magic value (VMMR0EMTBLOCKCTX_MAGIC). */
475 uint32_t uMagic;
476 /** Set if we were in HM context, clear if not. */
477 bool fWasInHmContext;
478} VMMR0EMTBLOCKCTX;
479/** Pointer to a VMMR0EmtPrepareToBlock context structure. */
480typedef VMMR0EMTBLOCKCTX *PVMMR0EMTBLOCKCTX;
481/** Magic value for VMMR0EMTBLOCKCTX::uMagic (Paul Desmond). */
482#define VMMR0EMTBLOCKCTX_MAGIC UINT32_C(0x19261125)
483/** Magic value for VMMR0EMTBLOCKCTX::uMagic when its out of context. */
484#define VMMR0EMTBLOCKCTX_MAGIC_DEAD UINT32_C(0x19770530)
485
486VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
487VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
488 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
489VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM);
490VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu);
491VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM);
492VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu);
493VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu);
494VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu);
495VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu);
496VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu);
497VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
498 PVMMR0EMTBLOCKCTX pCtx);
499VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx);
500VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout);
501VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent);
502VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent);
503VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser);
504VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu);
505VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu);
506
507/** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
508 * @{ */
509/** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */
510#define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED RT_BIT_32(0)
511/** @} */
512#endif /* IN_RING0 */
513
514VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu);
515/** @} */
516
517
518#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
519/** @defgroup grp_vmm_api_r3 The VMM Host Context Ring 3 API
520 * @{
521 */
522VMMR3DECL(PCVMMR3VTABLE) VMMR3GetVTable(void);
523VMMR3_INT_DECL(int) VMMR3Init(PVM pVM);
524VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM);
525VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
526VMMR3_INT_DECL(int) VMMR3Term(PVM pVM);
527VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
528VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM);
529VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM);
530VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM);
531VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu);
532VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
533VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
534VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation);
535VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
536VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM);
537VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM);
538VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM);
539VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector);
540VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu);
541VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
542VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
543VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
544/** @defgroup grp_VMMR3EmtRendezvous_fFlags VMMR3EmtRendezvous flags
545 * @{ */
546/** Execution type mask. */
547#define VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK UINT32_C(0x00000007)
548/** Invalid execution type. */
549#define VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID UINT32_C(0)
550/** Let the EMTs execute the callback one by one (in no particular order).
551 * Recursion from within the callback possible. */
552#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE UINT32_C(1)
553/** Let all the EMTs execute the callback at the same time.
554 * Cannot recurse from the callback. */
555#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE UINT32_C(2)
556/** Only execute the callback on one EMT (no particular one).
557 * Recursion from within the callback possible. */
558#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE UINT32_C(3)
559/** Let the EMTs execute the callback one by one in ascending order.
560 * Recursion from within the callback possible. */
561#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING UINT32_C(4)
562/** Let the EMTs execute the callback one by one in descending order.
563 * Recursion from within the callback possible. */
564#define VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING UINT32_C(5)
565/** Stop after the first error.
566 * This is not valid for any execution type where more than one EMT is active
567 * at a time. */
568#define VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR UINT32_C(0x00000008)
569/** Use VMREQFLAGS_PRIORITY when contacting the EMTs. */
570#define VMMEMTRENDEZVOUS_FLAGS_PRIORITY UINT32_C(0x00000010)
571/** The valid flags. */
572#define VMMEMTRENDEZVOUS_FLAGS_VALID_MASK UINT32_C(0x0000001f)
573/** @} */
574VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu);
575VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold);
576VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead);
577VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, PRTDBGUNWINDSTATE pState);
578/** @} */
579#endif /* IN_RING3 */
580
581
582#if defined(IN_RC) || defined(IN_RING0) || defined(DOXYGEN_RUNNING)
583/** @defgroup grp_vmm_api_rz The VMM Raw-Mode and Ring-0 Context API
584 * @{
585 */
586VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPUCC pVCpu);
587VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPUCC pVCpu);
588VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPUCC pVCpu);
589/** @} */
590#endif
591
592
593/** Wrapper around AssertReleaseMsgReturn that avoid tripping up in the
594 * kernel when we don't have a setjmp in place. */
595#ifdef IN_RING0
596# define VMM_ASSERT_RELEASE_MSG_RETURN(a_pVM, a_Expr, a_Msg, a_rc) do { \
597 if (RT_LIKELY(a_Expr)) { /* likely */ } \
598 else \
599 { \
600 PVMCPUCC pVCpuAssert = VMMGetCpu(a_pVM); \
601 if (pVCpuAssert && VMMR0IsLongJumpArmed(pVCpuAssert)) \
602 AssertReleaseMsg(a_Expr, a_Msg); \
603 else \
604 AssertLogRelMsg(a_Expr, a_Msg); \
605 return (a_rc); \
606 } \
607 } while (0)
608#else
609# define VMM_ASSERT_RELEASE_MSG_RETURN(a_pVM, a_Expr, a_Msg, a_rc) AssertReleaseMsgReturn(a_Expr, a_Msg, a_rc)
610#endif
611
612/** @} */
613
614/** @} */
615RT_C_DECLS_END
616
617#endif /* !VBOX_INCLUDED_vmm_vmm_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette