VirtualBox

source: vbox/trunk/include/VBox/pgm.h@ 17373

Last change on this file since 17373 was 17373, checked in by vboxsync, 16 years ago

pgm.h: build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.3 KB
Line 
1/** @file
2 * PGM - Page Monitor / Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_pgm_h
31#define ___VBox_pgm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/sup.h>
36#include <VBox/vmapi.h>
37#include <VBox/x86.h>
38#include <VBox/hwacc_vmx.h>
39
40__BEGIN_DECLS
41
42/** @defgroup grp_pgm The Page Monitor / Manager API
43 * @{
44 */
45
46/** Chunk size for dynamically allocated physical memory. */
47#define PGM_DYNAMIC_CHUNK_SIZE (1*1024*1024)
48/** Shift GC physical address by 20 bits to get the offset into the pvHCChunkHC array. */
49#define PGM_DYNAMIC_CHUNK_SHIFT 20
50/** Dynamic chunk offset mask. */
51#define PGM_DYNAMIC_CHUNK_OFFSET_MASK 0xfffff
52/** Dynamic chunk base mask. */
53#define PGM_DYNAMIC_CHUNK_BASE_MASK (~(RTGCPHYS)PGM_DYNAMIC_CHUNK_OFFSET_MASK)
54
55
56/**
57 * FNPGMRELOCATE callback mode.
58 */
59typedef enum PGMRELOCATECALL
60{
61 /** The callback is for checking if the suggested address is suitable. */
62 PGMRELOCATECALL_SUGGEST = 1,
63 /** The callback is for executing the relocation. */
64 PGMRELOCATECALL_RELOCATE
65} PGMRELOCATECALL;
66
67
68/**
69 * Callback function which will be called when PGM is trying to find
70 * a new location for the mapping.
71 *
72 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
73 * In 1) the callback should say if it objects to a suggested new location. If it
74 * accepts the new location, it is called again for doing it's relocation.
75 *
76 *
77 * @returns true if the location is ok.
78 * @returns false if another location should be found.
79 * @param GCPtrOld The old virtual address.
80 * @param GCPtrNew The new virtual address.
81 * @param enmMode Used to indicate the callback mode.
82 * @param pvUser User argument.
83 * @remark The return value is no a failure indicator, it's an acceptance
84 * indicator. Relocation can not fail!
85 */
86typedef DECLCALLBACK(bool) FNPGMRELOCATE(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
87/** Pointer to a relocation callback function. */
88typedef FNPGMRELOCATE *PFNPGMRELOCATE;
89
90
91/**
92 * Physical page access handler type.
93 */
94typedef enum PGMPHYSHANDLERTYPE
95{
96 /** MMIO range. Pages are not present, all access is done in interpreter or recompiler. */
97 PGMPHYSHANDLERTYPE_MMIO = 1,
98 /** Handler all write access to a physical page range. */
99 PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
100 /** Handler all access to a physical page range. */
101 PGMPHYSHANDLERTYPE_PHYSICAL_ALL
102
103} PGMPHYSHANDLERTYPE;
104
105/**
106 * \#PF Handler callback for physical access handler ranges in RC.
107 *
108 * @returns VBox status code (appropriate for RC return).
109 * @param pVM VM Handle.
110 * @param uErrorCode CPU Error code.
111 * @param pRegFrame Trap register frame.
112 * NULL on DMA and other non CPU access.
113 * @param pvFault The fault address (cr2).
114 * @param GCPhysFault The GC physical address corresponding to pvFault.
115 * @param pvUser User argument.
116 */
117typedef DECLCALLBACK(int) FNPGMRCPHYSHANDLER(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
118/** Pointer to PGM access callback. */
119typedef FNPGMRCPHYSHANDLER *PFNPGMRCPHYSHANDLER;
120
121/**
122 * \#PF Handler callback for physical access handler ranges in R0.
123 *
124 * @returns VBox status code (appropriate for R0 return).
125 * @param pVM VM Handle.
126 * @param uErrorCode CPU Error code.
127 * @param pRegFrame Trap register frame.
128 * NULL on DMA and other non CPU access.
129 * @param pvFault The fault address (cr2).
130 * @param GCPhysFault The GC physical address corresponding to pvFault.
131 * @param pvUser User argument.
132 */
133typedef DECLCALLBACK(int) FNPGMR0PHYSHANDLER(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
134/** Pointer to PGM access callback. */
135typedef FNPGMR0PHYSHANDLER *PFNPGMR0PHYSHANDLER;
136
137/**
138 * Guest Access type
139 */
140typedef enum PGMACCESSTYPE
141{
142 /** Read access. */
143 PGMACCESSTYPE_READ = 1,
144 /** Write access. */
145 PGMACCESSTYPE_WRITE
146} PGMACCESSTYPE;
147
148/**
149 * \#PF Handler callback for physical access handler ranges (MMIO among others) in HC.
150 *
151 * The handler can not raise any faults, it's mainly for monitoring write access
152 * to certain pages.
153 *
154 * @returns VINF_SUCCESS if the handler have carried out the operation.
155 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
156 * @param pVM VM Handle.
157 * @param GCPhys The physical address the guest is writing to.
158 * @param pvPhys The HC mapping of that address.
159 * @param pvBuf What the guest is reading/writing.
160 * @param cbBuf How much it's reading/writing.
161 * @param enmAccessType The access type.
162 * @param pvUser User argument.
163 */
164typedef DECLCALLBACK(int) FNPGMR3PHYSHANDLER(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
165/** Pointer to PGM access callback. */
166typedef FNPGMR3PHYSHANDLER *PFNPGMR3PHYSHANDLER;
167
168
169/**
170 * Virtual access handler type.
171 */
172typedef enum PGMVIRTHANDLERTYPE
173{
174 /** Write access handled. */
175 PGMVIRTHANDLERTYPE_WRITE = 1,
176 /** All access handled. */
177 PGMVIRTHANDLERTYPE_ALL,
178 /** Hypervisor write access handled.
179 * This is used to catch the guest trying to write to LDT, TSS and any other
180 * system structure which the brain dead intel guys let unprivilegde code find. */
181 PGMVIRTHANDLERTYPE_HYPERVISOR
182} PGMVIRTHANDLERTYPE;
183
184/**
185 * \#PF Handler callback for virtual access handler ranges, RC.
186 *
187 * Important to realize that a physical page in a range can have aliases, and
188 * for ALL and WRITE handlers these will also trigger.
189 *
190 * @returns VBox status code (appropriate for GC return).
191 * @param pVM VM Handle.
192 * @param uErrorCode CPU Error code.
193 * @param pRegFrame Trap register frame.
194 * @param pvFault The fault address (cr2).
195 * @param pvRange The base address of the handled virtual range.
196 * @param offRange The offset of the access into this range.
197 * (If it's a EIP range this's the EIP, if not it's pvFault.)
198 */
199typedef DECLCALLBACK(int) FNPGMRCVIRTHANDLER(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
200/** Pointer to PGM access callback. */
201typedef FNPGMRCVIRTHANDLER *PFNPGMRCVIRTHANDLER;
202
203/**
204 * \#PF Handler callback for virtual access handler ranges, R3.
205 *
206 * Important to realize that a physical page in a range can have aliases, and
207 * for ALL and WRITE handlers these will also trigger.
208 *
209 * @returns VINF_SUCCESS if the handler have carried out the operation.
210 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
211 * @param pVM VM Handle.
212 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
213 * @param pvPtr The HC mapping of that address.
214 * @param pvBuf What the guest is reading/writing.
215 * @param cbBuf How much it's reading/writing.
216 * @param enmAccessType The access type.
217 * @param pvUser User argument.
218 */
219typedef DECLCALLBACK(int) FNPGMR3VIRTHANDLER(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
220/** Pointer to PGM access callback. */
221typedef FNPGMR3VIRTHANDLER *PFNPGMR3VIRTHANDLER;
222
223
224/**
225 * \#PF Handler callback for invalidation of virtual access handler ranges.
226 *
227 * @param pVM VM Handle.
228 * @param GCPtr The virtual address the guest has changed.
229 */
230typedef DECLCALLBACK(int) FNPGMR3VIRTINVALIDATE(PVM pVM, RTGCPTR GCPtr);
231/** Pointer to PGM invalidation callback. */
232typedef FNPGMR3VIRTINVALIDATE *PFNPGMR3VIRTINVALIDATE;
233
234/**
235 * Paging mode.
236 */
237typedef enum PGMMODE
238{
239 /** The usual invalid value. */
240 PGMMODE_INVALID = 0,
241 /** Real mode. */
242 PGMMODE_REAL,
243 /** Protected mode, no paging. */
244 PGMMODE_PROTECTED,
245 /** 32-bit paging. */
246 PGMMODE_32_BIT,
247 /** PAE paging. */
248 PGMMODE_PAE,
249 /** PAE paging with NX enabled. */
250 PGMMODE_PAE_NX,
251 /** 64-bit AMD paging (long mode). */
252 PGMMODE_AMD64,
253 /** 64-bit AMD paging (long mode) with NX enabled. */
254 PGMMODE_AMD64_NX,
255 /** Nested paging mode (shadow only; guest physical to host physical). */
256 PGMMODE_NESTED,
257 /** Extended paging (Intel) mode. */
258 PGMMODE_EPT,
259 /** The max number of modes */
260 PGMMODE_MAX,
261 /** 32bit hackishness. */
262 PGMMODE_32BIT_HACK = 0x7fffffff
263} PGMMODE;
264
265/** Macro for checking if the guest is using paging.
266 * @param enmMode PGMMODE_*.
267 * @remark ASSUMES certain order of the PGMMODE_* values.
268 */
269#define PGMMODE_WITH_PAGING(enmMode) ((enmMode) >= PGMMODE_32_BIT)
270
271/** Macro for checking if it's one of the long mode modes.
272 * @param enmMode PGMMODE_*.
273 */
274#define PGMMODE_IS_LONG_MODE(enmMode) ((enmMode) == PGMMODE_AMD64_NX || (enmMode) == PGMMODE_AMD64)
275
276/**
277 * Is the ROM mapped (true) or is the shadow RAM mapped (false).
278 *
279 * @returns boolean.
280 * @param enmProt The PGMROMPROT value, must be valid.
281 */
282#define PGMROMPROT_IS_ROM(enmProt) \
283 ( (enmProt) == PGMROMPROT_READ_ROM_WRITE_IGNORE \
284 || (enmProt) == PGMROMPROT_READ_ROM_WRITE_RAM )
285
286VMMDECL(int) PGMRegisterStringFormatTypes(void);
287VMMDECL(void) PGMDeregisterStringFormatTypes(void);
288VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM);
289VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode);
290VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM);
291VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM);
292VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM);
293VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM);
294VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM);
295VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM);
296VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM);
297VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM);
298VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM);
299VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
300VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage);
301VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess);
302VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess);
303VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
304VMMDECL(int) PGMMap(PVM pVM, RTGCPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags);
305VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags);
306VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
307#ifndef IN_RING0
308VMMDECL(bool) PGMMapHasConflicts(PVM pVM);
309VMMDECL(int) PGMMapResolveConflicts(PVM pVM);
310#endif
311#ifdef VBOX_STRICT
312VMMDECL(void) PGMMapCheck(PVM pVM);
313#endif
314VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
315VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags);
316VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
317VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
318VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr);
319VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags);
320VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
321VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdPt);
322
323VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage);
324VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal);
325VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
326VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3);
327VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer);
328VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM);
329VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM);
330VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM);
331VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode);
332VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
333 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
334 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
335 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
336 R3PTRTYPE(const char *) pszDesc);
337VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast);
338VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys);
339VMMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
340 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
341 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
342 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
343 R3PTRTYPE(const char *) pszDesc);
344VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit);
345VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2);
346VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage);
347VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap);
348VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys);
349VMMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage);
350VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys);
351VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr);
352VMMDECL(bool) PGMPhysIsA20Enabled(PVM pVM);
353VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys);
354VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys);
355VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys);
356VMMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys);
357VMMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys);
358VMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM);
359VMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM);
360VMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM);
361
362/**
363 * Page mapping lock.
364 *
365 * @remarks This doesn't work in structures shared between
366 * ring-3, ring-0 and/or GC.
367 */
368typedef struct PGMPAGEMAPLOCK
369{
370 /** @todo see PGMPhysIsPageMappingLockValid for possibly incorrect assumptions */
371#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
372 /** Just a dummy for the time being. */
373 uint32_t u32Dummy;
374#else
375 /** Pointer to the PGMPAGE. */
376 void *pvPage;
377 /** Pointer to the PGMCHUNKR3MAP. */
378 void *pvMap;
379#endif
380} PGMPAGEMAPLOCK;
381/** Pointer to a page mapping lock. */
382typedef PGMPAGEMAPLOCK *PPGMPAGEMAPLOCK;
383
384VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
385VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock);
386VMMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock);
387VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock);
388VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock);
389
390/**
391 * Checks if the lock structure is valid
392 *
393 * @param pVM The VM handle.
394 * @param pLock The lock structure initialized by the mapping function.
395 */
396DECLINLINE(bool) PGMPhysIsPageMappingLockValid(PVM pVM, PPGMPAGEMAPLOCK pLock)
397{
398 /** @todo -> complete/change this */
399#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
400 return !!(pLock->u32Dummy);
401#else
402 return !!(pLock->pvPage);
403#endif
404}
405
406VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr);
407VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
408VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVM pVM, RTGCPTR GCPtr, PRTR3PTR pR3Ptr);
409VMMDECL(int) PGMPhysGCPtr2R3PtrByGstCR3(PVM pVM, RTGCPTR GCPtr, uint64_t cr3, unsigned fFlags, PRTR3PTR pR3Ptr);
410VMMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead);
411VMMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite);
412VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb);
413#ifndef IN_RC /* Only ring 0 & 3. */
414VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb);
415VMMDECL(int) PGMPhysSimpleReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
416VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
417VMMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
418VMMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
419VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
420#endif /* !IN_RC */
421VMMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
422#ifdef VBOX_STRICT
423VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM);
424VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM);
425VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4);
426#endif /* VBOX_STRICT */
427
428#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
429VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv);
430VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv);
431# ifdef IN_RC
432VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv);
433VMMDECL(int) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage);
434VMMDECL(int) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage);
435# ifdef VBOX_STRICT
436VMMDECL(void) PGMDynCheckLocks(PVM pVM);
437# endif
438# endif
439VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu);
440VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu);
441VMMDECL(void) PGMDynMapFlushAutoSet(PVMCPU pVCpu);
442VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu);
443VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVMCPU pVCpu);
444VMMDECL(void) PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset);
445#endif
446
447
448#ifdef IN_RC
449/** @defgroup grp_pgm_gc The PGM Guest Context API
450 * @ingroup grp_pgm
451 * @{
452 */
453/** @} */
454#endif /* IN_RC */
455
456
457#ifdef IN_RING0
458/** @defgroup grp_pgm_r0 The PGM Host Context Ring-0 API
459 * @ingroup grp_pgm
460 * @{
461 */
462VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM);
463VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault);
464# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
465VMMR0DECL(int) PGMR0DynMapInit(void);
466VMMR0DECL(void) PGMR0DynMapTerm(void);
467VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM);
468VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM);
469VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void);
470# endif
471/** @} */
472#endif /* IN_RING0 */
473
474
475
476#ifdef IN_RING3
477/** @defgroup grp_pgm_r3 The PGM Host Context Ring-3 API
478 * @ingroup grp_pgm
479 * @{
480 */
481VMMR3DECL(int) PGMR3Init(PVM pVM);
482VMMR3DECL(int) PGMR3InitCPU(PVM pVM);
483VMMR3DECL(int) PGMR3InitDynMap(PVM pVM);
484VMMR3DECL(int) PGMR3InitFinalize(PVM pVM);
485VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
486VMMR3DECL(void) PGMR3Reset(PVM pVM);
487VMMR3DECL(int) PGMR3Term(PVM pVM);
488VMMR3DECL(int) PGMR3TermCPU(PVM pVM);
489VMMR3DECL(int) PGMR3LockCall(PVM pVM);
490VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PGMMODE enmGuestMode);
491
492#ifndef VBOX_WITH_NEW_PHYS_CODE
493VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS GCPhys);
494#endif /* !VBOX_WITH_NEW_PHYS_CODE */
495VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc);
496VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
497 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
498 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
499 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
500 R3PTRTYPE(const char *) pszDesc);
501VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb);
502VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc);
503VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion);
504VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys);
505VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys);
506VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys);
507VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys);
508VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr);
509
510/** @group PGMR3PhysRegisterRom flags.
511 * @{ */
512/** Inidicates that ROM shadowing should be enabled. */
513#define PGMPHYS_ROM_FLAG_SHADOWED RT_BIT_32(0)
514/** Indicates that what pvBinary points to won't go away
515 * and can be used for strictness checks. */
516#define PGMPHYS_ROM_FLAG_PERMANENT_BINARY RT_BIT_32(1)
517/** @} */
518
519VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
520 const void *pvBinary, uint32_t fFlags, const char *pszDesc);
521VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt);
522VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc);
523#ifndef VBOX_WITH_NEW_PHYS_CODE
524VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc);
525VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask);
526#endif /* !VBOX_WITH_NEW_PHYS_CODE */
527VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable);
528VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc);
529VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr);
530VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM);
531VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb);
532VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
533VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM);
534VMMR3DECL(int) PGMR3MappingsDisable(PVM pVM);
535VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages);
536VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
537
538VMMR3DECL(int) PGMR3HandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
539 PFNPGMR3PHYSHANDLER pfnHandlerR3, void *pvUserR3,
540 const char *pszModR0, const char *pszHandlerR0, RTR0PTR pvUserR0,
541 const char *pszModRC, const char *pszHandlerRC, RTRCPTR pvUserRC, const char *pszDesc);
542VMMDECL(int) PGMR3HandlerVirtualRegisterEx(PVM pVM, PGMVIRTHANDLERTYPE enmType, RTGCPTR GCPtr, RTGCPTR GCPtrLast,
543 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3,
544 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3,
545 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC,
546 R3PTRTYPE(const char *) pszDesc);
547VMMR3DECL(int) PGMR3HandlerVirtualRegister(PVM pVM, PGMVIRTHANDLERTYPE enmType, RTGCPTR GCPtr, RTGCPTR GCPtrLast,
548 PFNPGMR3VIRTINVALIDATE pfnInvalidateR3,
549 PFNPGMR3VIRTHANDLER pfnHandlerR3,
550 const char *pszHandlerRC, const char *pszModRC, const char *pszDesc);
551VMMDECL(int) PGMHandlerVirtualChangeInvalidateCallback(PVM pVM, RTGCPTR GCPtr, R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3);
552VMMDECL(int) PGMHandlerVirtualDeregister(PVM pVM, RTGCPTR GCPtr);
553VMMR3DECL(int) PGMR3PoolGrow(PVM pVM);
554#ifdef ___VBox_dbgf_h /** @todo fix this! */
555VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
556#endif
557VMMR3DECL(int) PGMR3DumpHierarchyGC(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPHYS PhysSearch);
558
559VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv);
560VMMR3DECL(uint8_t) PGMR3PhysReadU8(PVM pVM, RTGCPHYS GCPhys);
561VMMR3DECL(uint16_t) PGMR3PhysReadU16(PVM pVM, RTGCPHYS GCPhys);
562VMMR3DECL(uint32_t) PGMR3PhysReadU32(PVM pVM, RTGCPHYS GCPhys);
563VMMR3DECL(uint64_t) PGMR3PhysReadU64(PVM pVM, RTGCPHYS GCPhys);
564VMMR3DECL(void) PGMR3PhysWriteU8(PVM pVM, RTGCPHYS GCPhys, uint8_t Value);
565VMMR3DECL(void) PGMR3PhysWriteU16(PVM pVM, RTGCPHYS GCPhys, uint16_t Value);
566VMMR3DECL(void) PGMR3PhysWriteU32(PVM pVM, RTGCPHYS GCPhys, uint32_t Value);
567VMMR3DECL(void) PGMR3PhysWriteU64(PVM pVM, RTGCPHYS GCPhys, uint64_t Value);
568VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk);
569VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM);
570VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM);
571
572VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM);
573
574VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PVM pVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys);
575VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PVM pVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys);
576VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PVM pVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys);
577VMMR3DECL(int) PGMR3DbgReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb, uint32_t fFlags, size_t *pcbRead);
578VMMR3DECL(int) PGMR3DbgWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten);
579VMMR3DECL(int) PGMR3DbgReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, uint32_t fFlags, size_t *pcbRead);
580VMMR3DECL(int) PGMR3DbgWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, void const *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten);
581VMMR3DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange, const uint8_t *pabNeedle, size_t cbNeedle, PRTGCPHYS pGCPhysHit);
582VMMR3DECL(int) PGMR3DbgScanVirtual(PVM pVM, RTGCPTR GCPtr, RTGCPTR cbRange, const uint8_t *pabNeedle, size_t cbNeedle, PRTGCUINTPTR pGCPhysHit);
583/** @} */
584#endif /* IN_RING3 */
585
586__END_DECLS
587
588/** @} */
589#endif
590
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette