VirtualBox

source: vbox/trunk/include/VBox/mm.h@ 6981

Last change on this file since 6981 was 6927, checked in by vboxsync, 17 years ago

Converted MM_RAM_FLAGS_VIRTUAL_HANDLER, MM_RAM_FLAGS_VIRTUAL_WRITE
and MM_RAM_FLAGS_VIRTUAL_ALL into a two bit state variable in PGMPAGE.
I've checked this trice because, like last time, bugs may have odd
sideeffects and hide for a while before showing up. Hope I got this
right (unlike for phys).

Fixed a regression from the MM_RAM_FLAGS_PHYSICAL in the physical read/write code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.9 KB
Line 
1/** @file
2 * MM - The Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2006-2007 innotek GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_mm_h
27#define ___VBox_mm_h
28
29#include <VBox/cdefs.h>
30#include <VBox/types.h>
31#include <VBox/x86.h>
32#include <VBox/sup.h>
33
34
35__BEGIN_DECLS
36
37/** @defgroup grp_mm The Memory Manager API
38 * @{
39 */
40
41/** @name RAM Page Flags
42 * Since internal ranges have a byte granularity it's possible for a
43 * page be flagged for several uses. The access virtualization in PGM
44 * will choose the most restricted one and use EM to emulate access to
45 * the less restricted areas of the page.
46 *
47 * Bits 0-11 only since they are fitted into the offset part of a physical memory address.
48 * @{
49 */
50/** Reserved - Not RAM, ROM nor MMIO2.
51 * If this bit is cleared the memory is assumed to be some kind of RAM.
52 * Normal MMIO may set it but that depends on whether the RAM range was
53 * created specially for the MMIO or not.
54 *
55 * @remarks The current implementation will always reserve backing
56 * memory for reserved ranges to simplify things.
57 */
58#define MM_RAM_FLAGS_RESERVED RT_BIT(0)
59/** ROM - Read Only Memory.
60 * The page have a HC physical address which contains the BIOS code. All write
61 * access is trapped and ignored.
62 *
63 * HACK: Writable shadow ROM is indicated by both ROM and MMIO2 being
64 * set. (We're out of bits.)
65 */
66#define MM_RAM_FLAGS_ROM RT_BIT(1)
67/** MMIO - Memory Mapped I/O.
68 * All access is trapped and emulated. No physical backing is required, but
69 * might for various reasons be present.
70 */
71#define MM_RAM_FLAGS_MMIO RT_BIT(2)
72/** MMIO2 - Memory Mapped I/O, variation 2.
73 * The virtualization is performed using real memory and only catching
74 * a few accesses for like keeping track for dirty pages.
75 * @remark Involved in the shadow ROM hack.
76 */
77#define MM_RAM_FLAGS_MMIO2 RT_BIT(3)
78
79#ifndef VBOX_WITH_NEW_PHYS_CODE
80/** Physical backing memory is allocated dynamically. Not set implies a one time static allocation. */
81#define MM_RAM_FLAGS_DYNAMIC_ALLOC RT_BIT(11)
82#endif /* !VBOX_WITH_NEW_PHYS_CODE */
83
84/** The shift used to get the reference count. */
85#define MM_RAM_FLAGS_CREFS_SHIFT 62
86/** The mask applied to the the page pool idx after using MM_RAM_FLAGS_CREFS_SHIFT to shift it down. */
87#define MM_RAM_FLAGS_CREFS_MASK 0x3
88/** The (shifted) cRef value used to indiciate that the idx is the head of a
89 * physical cross reference extent list. */
90#define MM_RAM_FLAGS_CREFS_PHYSEXT MM_RAM_FLAGS_CREFS_MASK
91/** The shift used to get the page pool idx. (Apply MM_RAM_FLAGS_IDX_MASK to the result when shifting down). */
92#define MM_RAM_FLAGS_IDX_SHIFT 48
93/** The mask applied to the the page pool idx after using MM_RAM_FLAGS_IDX_SHIFT to shift it down. */
94#define MM_RAM_FLAGS_IDX_MASK 0x3fff
95/** The idx value when we're out of of extents or there are simply too many mappings of this page. */
96#define MM_RAM_FLAGS_IDX_OVERFLOWED MM_RAM_FLAGS_IDX_MASK
97
98/** Mask for masking off any references to the page. */
99#define MM_RAM_FLAGS_NO_REFS_MASK UINT64_C(0x0000ffffffffffff)
100/** @} */
101
102#ifndef VBOX_WITH_NEW_PHYS_CODE
103/** @name MMR3PhysRegisterEx registration type
104 * @{
105 */
106typedef enum
107{
108 /** Normal physical region (flags specify exact page type) */
109 MM_PHYS_TYPE_NORMAL = 0,
110 /** Allocate part of a dynamically allocated physical region */
111 MM_PHYS_TYPE_DYNALLOC_CHUNK,
112
113 MM_PHYS_TYPE_32BIT_HACK = 0x7fffffff
114} MMPHYSREG;
115/** @} */
116#endif
117
118/**
119 * Memory Allocation Tags.
120 * For use with MMHyperAlloc(), MMR3HeapAlloc(), MMR3HeapAllocEx(),
121 * MMR3HeapAllocZ() and MMR3HeapAllocZEx().
122 *
123 * @remark Don't forget to update the dump command in MMHeap.cpp!
124 */
125typedef enum MMTAG
126{
127 MM_TAG_INVALID = 0,
128
129 MM_TAG_CFGM,
130 MM_TAG_CFGM_BYTES,
131 MM_TAG_CFGM_STRING,
132 MM_TAG_CFGM_USER,
133
134 MM_TAG_CSAM,
135 MM_TAG_CSAM_PATCH,
136
137 MM_TAG_DBGF,
138 MM_TAG_DBGF_INFO,
139 MM_TAG_DBGF_LINE,
140 MM_TAG_DBGF_LINE_DUP,
141 MM_TAG_DBGF_STACK,
142 MM_TAG_DBGF_SYMBOL,
143 MM_TAG_DBGF_SYMBOL_DUP,
144 MM_TAG_DBGF_MODULE,
145
146 MM_TAG_EM,
147
148 MM_TAG_IOM,
149 MM_TAG_IOM_STATS,
150
151 MM_TAG_MM,
152 MM_TAG_MM_LOOKUP_GUEST,
153 MM_TAG_MM_LOOKUP_PHYS,
154 MM_TAG_MM_LOOKUP_VIRT,
155 MM_TAG_MM_PAGE,
156
157 MM_TAG_PATM,
158 MM_TAG_PATM_PATCH,
159
160 MM_TAG_PDM,
161 MM_TAG_PDM_ASYNC_COMPLETION,
162 MM_TAG_PDM_DEVICE,
163 MM_TAG_PDM_DEVICE_USER,
164 MM_TAG_PDM_DRIVER,
165 MM_TAG_PDM_DRIVER_USER,
166 MM_TAG_PDM_USB,
167 MM_TAG_PDM_USB_USER,
168 MM_TAG_PDM_LUN,
169 MM_TAG_PDM_QUEUE,
170 MM_TAG_PDM_THREAD,
171
172 MM_TAG_PGM,
173 MM_TAG_PGM_CHUNK_MAPPING,
174 MM_TAG_PGM_HANDLERS,
175 MM_TAG_PGM_PHYS,
176 MM_TAG_PGM_POOL,
177
178 MM_TAG_REM,
179
180 MM_TAG_SELM,
181
182 MM_TAG_SSM,
183
184 MM_TAG_STAM,
185
186 MM_TAG_TM,
187
188 MM_TAG_TRPM,
189
190 MM_TAG_VM,
191 MM_TAG_VM_REQ,
192
193 MM_TAG_VMM,
194
195 MM_TAG_HWACCM,
196
197 MM_TAG_32BIT_HACK = 0x7fffffff
198} MMTAG;
199
200
201
202
203/** @defgroup grp_mm_hyper Hypervisor Memory Management
204 * @ingroup grp_mm
205 * @{ */
206
207MMDECL(RTR3PTR) MMHyperR0ToR3(PVM pVM, RTR0PTR R0Ptr);
208MMDECL(RTGCPTR) MMHyperR0ToGC(PVM pVM, RTR0PTR R0Ptr);
209#ifndef IN_RING0
210MMDECL(void *) MMHyperR0ToCC(PVM pVM, RTR0PTR R0Ptr);
211#endif
212MMDECL(RTR0PTR) MMHyperR3ToR0(PVM pVM, RTR3PTR R3Ptr);
213MMDECL(RTGCPTR) MMHyperR3ToGC(PVM pVM, RTR3PTR R3Ptr);
214MMDECL(RTR3PTR) MMHyperGCToR3(PVM pVM, RTGCPTR GCPtr);
215MMDECL(RTR0PTR) MMHyperGCToR0(PVM pVM, RTGCPTR GCPtr);
216
217#ifndef IN_RING3
218MMDECL(void *) MMHyperR3ToCC(PVM pVM, RTR3PTR R3Ptr);
219#else
220DECLINLINE(void *) MMHyperR3ToCC(PVM pVM, RTR3PTR R3Ptr)
221{
222 NOREF(pVM);
223 return R3Ptr;
224}
225#endif
226
227
228#ifndef IN_GC
229MMDECL(void *) MMHyperGCToCC(PVM pVM, RTGCPTR GCPtr);
230#else
231DECLINLINE(void *) MMHyperGCToCC(PVM pVM, RTGCPTR GCPtr)
232{
233 NOREF(pVM);
234 return GCPtr;
235}
236#endif
237
238#ifndef IN_RING3
239MMDECL(RTR3PTR) MMHyperCCToR3(PVM pVM, void *pv);
240#else
241DECLINLINE(RTR3PTR) MMHyperCCToR3(PVM pVM, void *pv)
242{
243 NOREF(pVM);
244 return pv;
245}
246#endif
247
248#ifndef IN_RING0
249MMDECL(RTR0PTR) MMHyperCCToR0(PVM pVM, void *pv);
250#else
251DECLINLINE(RTR0PTR) MMHyperCCToR0(PVM pVM, void *pv)
252{
253 NOREF(pVM);
254 return pv;
255}
256#endif
257
258#ifndef IN_GC
259MMDECL(RTGCPTR) MMHyperCCToGC(PVM pVM, void *pv);
260#else
261DECLINLINE(RTGCPTR) MMHyperCCToGC(PVM pVM, void *pv)
262{
263 NOREF(pVM);
264 return pv;
265}
266#endif
267
268
269#ifdef IN_GC
270MMDECL(RTHCPTR) MMHyper2HC(PVM pVM, uintptr_t Ptr);
271#else
272DECLINLINE(RTHCPTR) MMHyper2HC(PVM pVM, uintptr_t Ptr)
273{
274 NOREF(pVM);
275 return (RTHCPTR)Ptr;
276}
277#endif
278
279#ifndef IN_GC
280MMDECL(RTGCPTR) MMHyper2GC(PVM pVM, uintptr_t Ptr);
281#else
282DECLINLINE(RTGCPTR) MMHyper2GC(PVM pVM, uintptr_t Ptr)
283{
284 NOREF(pVM);
285 return (RTGCPTR)Ptr;
286}
287#endif
288
289MMDECL(RTGCPTR) MMHyperHC2GC(PVM pVM, RTHCPTR HCPtr);
290MMDECL(RTHCPTR) MMHyperGC2HC(PVM pVM, RTGCPTR GCPtr);
291MMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
292MMDECL(int) MMHyperFree(PVM pVM, void *pv);
293#ifdef DEBUG
294MMDECL(void) MMHyperHeapDump(PVM pVM);
295#endif
296MMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM);
297MMDECL(size_t) MMHyperHeapGetSize(PVM pVM);
298MMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb);
299MMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr);
300
301
302MMDECL(RTHCPHYS) MMPage2Phys(PVM pVM, void *pvPage);
303MMDECL(void *) MMPagePhys2Page(PVM pVM, RTHCPHYS HCPhysPage);
304MMDECL(int) MMPagePhys2PageEx(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
305MMDECL(int) MMPagePhys2PageTry(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
306MMDECL(void *) MMPhysGCPhys2HCVirt(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
307
308
309/** @def MMHYPER_GC_ASSERT_GCPTR
310 * Asserts that an address is either NULL or inside the hypervisor memory area.
311 * This assertion only works while IN_GC, it's a NOP everywhere else.
312 * @thread The Emulation Thread.
313 */
314#ifdef IN_GC
315# define MMHYPER_GC_ASSERT_GCPTR(pVM, GCPtr) Assert(MMHyperIsInsideArea((pVM), (GCPtr)) || !(GCPtr))
316#else
317# define MMHYPER_GC_ASSERT_GCPTR(pVM, GCPtr) do { } while (0)
318#endif
319
320/** @} */
321
322
323#ifdef IN_RING3
324/** @defgroup grp_mm_r3 The MM Host Context Ring-3 API
325 * @ingroup grp_mm
326 * @{
327 */
328
329MMR3DECL(int) MMR3InitUVM(PUVM pUVM);
330MMR3DECL(int) MMR3Init(PVM pVM);
331MMR3DECL(int) MMR3InitPaging(PVM pVM);
332MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM);
333MMR3DECL(int) MMR3Term(PVM pVM);
334MMR3DECL(void) MMR3TermUVM(PUVM pUVM);
335MMR3DECL(void) MMR3Reset(PVM pVM);
336MMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages);
337MMR3DECL(int) MMR3IncreaseFixedReservation(PVM pVM, uint32_t cAddFixedPages);
338MMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages);
339
340MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv);
341MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);
342MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
343
344
345/** @defgroup grp_mm_r3_hyper Hypervisor Memory Manager (HC R3 Portion)
346 * @ingroup grp_mm_r3
347 * @{ */
348MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
349MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);
350MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);
351MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr);
352MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr);
353MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr);
354MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC);
355MMR3DECL(int) MMR3HyperHCVirt2HCPhysEx(PVM pVM, void *pvHC, PRTHCPHYS pHCPhys);
356MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys);
357MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv);
358MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);
359/** @} */
360
361
362/** @defgroup grp_mm_phys Guest Physical Memory Manager
363 * @ingroup grp_mm_r3
364 * @{ */
365MMR3DECL(int) MMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, const char *pszDesc);
366#ifndef VBOX_WITH_NEW_PHYS_CODE
367MMR3DECL(int) MMR3PhysRegisterEx(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, MMPHYSREG enmType, const char *pszDesc);
368#endif
369MMR3DECL(int) MMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, unsigned cb);
370MMR3DECL(int) MMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTUINT cbRange, const void *pvBinary, bool fShadow, const char *pszDesc);
371MMR3DECL(int) MMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
372MMR3DECL(int) MMR3PhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, const char *pszDesc);
373MMR3DECL(uint64_t) MMR3PhysGetRamSize(PVM pVM);
374/** @} */
375
376
377/** @defgroup grp_mm_page Physical Page Pool
378 * @ingroup grp_mm_r3
379 * @{ */
380MMR3DECL(void *) MMR3PageAlloc(PVM pVM);
381MMR3DECL(RTHCPHYS) MMR3PageAllocPhys(PVM pVM);
382MMR3DECL(void) MMR3PageFree(PVM pVM, void *pvPage);
383MMR3DECL(void *) MMR3PageAllocLow(PVM pVM);
384MMR3DECL(void) MMR3PageFreeLow(PVM pVM, void *pvPage);
385MMR3DECL(void) MMR3PageFreeByPhys(PVM pVM, RTHCPHYS HCPhysPage);
386MMR3DECL(void *) MMR3PageDummyHCPtr(PVM pVM);
387MMR3DECL(RTHCPHYS) MMR3PageDummyHCPhys(PVM pVM);
388/** @} */
389
390
391/** @defgroup grp_mm_heap Heap Manager
392 * @ingroup grp_mm_r3
393 * @{ */
394MMR3DECL(void *) MMR3HeapAlloc(PVM pVM, MMTAG enmTag, size_t cbSize);
395MMR3DECL(void *) MMR3HeapAllocU(PUVM pUVM, MMTAG enmTag, size_t cbSize);
396MMR3DECL(int) MMR3HeapAllocEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv);
397MMR3DECL(int) MMR3HeapAllocExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv);
398MMR3DECL(void *) MMR3HeapAllocZ(PVM pVM, MMTAG enmTag, size_t cbSize);
399MMR3DECL(void *) MMR3HeapAllocZU(PUVM pUVM, MMTAG enmTag, size_t cbSize);
400MMR3DECL(int) MMR3HeapAllocZEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv);
401MMR3DECL(int) MMR3HeapAllocZExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv);
402MMR3DECL(void *) MMR3HeapRealloc(void *pv, size_t cbNewSize);
403MMR3DECL(char *) MMR3HeapStrDup(PVM pVM, MMTAG enmTag, const char *psz);
404MMR3DECL(char *) MMR3HeapStrDupU(PUVM pUVM, MMTAG enmTag, const char *psz);
405MMR3DECL(void) MMR3HeapFree(void *pv);
406/** @} */
407
408/** @} */
409#endif /* IN_RING3 */
410
411
412
413#ifdef IN_GC
414/** @defgroup grp_mm_gc The MM Guest Context API
415 * @ingroup grp_mm
416 * @{
417 */
418
419MMGCDECL(void) MMGCRamRegisterTrapHandler(PVM pVM);
420MMGCDECL(void) MMGCRamDeregisterTrapHandler(PVM pVM);
421MMGCDECL(int) MMGCRamReadNoTrapHandler(void *pDst, void *pSrc, size_t cb);
422MMGCDECL(int) MMGCRamWriteNoTrapHandler(void *pDst, void *pSrc, size_t cb);
423MMGCDECL(int) MMGCRamRead(PVM pVM, void *pDst, void *pSrc, size_t cb);
424MMGCDECL(int) MMGCRamWrite(PVM pVM, void *pDst, void *pSrc, size_t cb);
425
426/** @} */
427#endif /* IN_GC */
428
429/** @} */
430__END_DECLS
431
432
433#endif
434
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette