VirtualBox

source: vbox/trunk/include/VBox/mm.h@ 10353

Last change on this file since 10353 was 9212, checked in by vboxsync, 17 years ago

Major changes for sizeof(RTGCPTR) == uint64_t.
Introduced RCPTRTYPE for pointers valid in raw mode only (RTGCPTR32).

Disabled by default. Enable by adding VBOX_WITH_64_BITS_GUESTS to your LocalConfig.kmk.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
Line 
1/** @file
2 * MM - The Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_mm_h
31#define ___VBox_mm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/x86.h>
36#include <VBox/sup.h>
37
38
39__BEGIN_DECLS
40
41/** @defgroup grp_mm The Memory Manager API
42 * @{
43 */
44
45/** @name RAM Page Flags
46 * Since internal ranges have a byte granularity it's possible for a
47 * page be flagged for several uses. The access virtualization in PGM
48 * will choose the most restricted one and use EM to emulate access to
49 * the less restricted areas of the page.
50 *
51 * Bits 0-11 only since they are fitted into the offset part of a physical memory address.
52 * @{
53 */
54#if 1
55/** Reserved - Not RAM, ROM nor MMIO2.
56 * If this bit is cleared the memory is assumed to be some kind of RAM.
57 * Normal MMIO may set it but that depends on whether the RAM range was
58 * created specially for the MMIO or not.
59 *
60 * @remarks The current implementation will always reserve backing
61 * memory for reserved ranges to simplify things.
62 */
63#define MM_RAM_FLAGS_RESERVED RT_BIT(0)
64/** ROM - Read Only Memory.
65 * The page have a HC physical address which contains the BIOS code. All write
66 * access is trapped and ignored.
67 *
68 * HACK: Writable shadow ROM is indicated by both ROM and MMIO2 being
69 * set. (We're out of bits.)
70 */
71#define MM_RAM_FLAGS_ROM RT_BIT(1)
72/** MMIO - Memory Mapped I/O.
73 * All access is trapped and emulated. No physical backing is required, but
74 * might for various reasons be present.
75 */
76#define MM_RAM_FLAGS_MMIO RT_BIT(2)
77/** MMIO2 - Memory Mapped I/O, variation 2.
78 * The virtualization is performed using real memory and only catching
79 * a few accesses for like keeping track for dirty pages.
80 * @remark Involved in the shadow ROM hack.
81 */
82#define MM_RAM_FLAGS_MMIO2 RT_BIT(3)
83#endif
84
85#ifndef VBOX_WITH_NEW_PHYS_CODE
86/** Physical backing memory is allocated dynamically. Not set implies a one time static allocation. */
87#define MM_RAM_FLAGS_DYNAMIC_ALLOC RT_BIT(11)
88#endif /* !VBOX_WITH_NEW_PHYS_CODE */
89
90/** The shift used to get the reference count. */
91#define MM_RAM_FLAGS_CREFS_SHIFT 62
92/** The mask applied to the the page pool idx after using MM_RAM_FLAGS_CREFS_SHIFT to shift it down. */
93#define MM_RAM_FLAGS_CREFS_MASK 0x3
94/** The (shifted) cRef value used to indiciate that the idx is the head of a
95 * physical cross reference extent list. */
96#define MM_RAM_FLAGS_CREFS_PHYSEXT MM_RAM_FLAGS_CREFS_MASK
97/** The shift used to get the page pool idx. (Apply MM_RAM_FLAGS_IDX_MASK to the result when shifting down). */
98#define MM_RAM_FLAGS_IDX_SHIFT 48
99/** The mask applied to the the page pool idx after using MM_RAM_FLAGS_IDX_SHIFT to shift it down. */
100#define MM_RAM_FLAGS_IDX_MASK 0x3fff
101/** The idx value when we're out of of extents or there are simply too many mappings of this page. */
102#define MM_RAM_FLAGS_IDX_OVERFLOWED MM_RAM_FLAGS_IDX_MASK
103
104/** Mask for masking off any references to the page. */
105#define MM_RAM_FLAGS_NO_REFS_MASK UINT64_C(0x0000ffffffffffff)
106/** @} */
107
108#ifndef VBOX_WITH_NEW_PHYS_CODE
109/** @name MMR3PhysRegisterEx registration type
110 * @{
111 */
112typedef enum
113{
114 /** Normal physical region (flags specify exact page type) */
115 MM_PHYS_TYPE_NORMAL = 0,
116 /** Allocate part of a dynamically allocated physical region */
117 MM_PHYS_TYPE_DYNALLOC_CHUNK,
118
119 MM_PHYS_TYPE_32BIT_HACK = 0x7fffffff
120} MMPHYSREG;
121/** @} */
122#endif
123
124/**
125 * Memory Allocation Tags.
126 * For use with MMHyperAlloc(), MMR3HeapAlloc(), MMR3HeapAllocEx(),
127 * MMR3HeapAllocZ() and MMR3HeapAllocZEx().
128 *
129 * @remark Don't forget to update the dump command in MMHeap.cpp!
130 */
131typedef enum MMTAG
132{
133 MM_TAG_INVALID = 0,
134
135 MM_TAG_CFGM,
136 MM_TAG_CFGM_BYTES,
137 MM_TAG_CFGM_STRING,
138 MM_TAG_CFGM_USER,
139
140 MM_TAG_CSAM,
141 MM_TAG_CSAM_PATCH,
142
143 MM_TAG_DBGF,
144 MM_TAG_DBGF_INFO,
145 MM_TAG_DBGF_LINE,
146 MM_TAG_DBGF_LINE_DUP,
147 MM_TAG_DBGF_MODULE,
148 MM_TAG_DBGF_OS,
149 MM_TAG_DBGF_STACK,
150 MM_TAG_DBGF_SYMBOL,
151 MM_TAG_DBGF_SYMBOL_DUP,
152
153 MM_TAG_EM,
154
155 MM_TAG_IOM,
156 MM_TAG_IOM_STATS,
157
158 MM_TAG_MM,
159 MM_TAG_MM_LOOKUP_GUEST,
160 MM_TAG_MM_LOOKUP_PHYS,
161 MM_TAG_MM_LOOKUP_VIRT,
162 MM_TAG_MM_PAGE,
163
164 MM_TAG_PATM,
165 MM_TAG_PATM_PATCH,
166
167 MM_TAG_PDM,
168 MM_TAG_PDM_ASYNC_COMPLETION,
169 MM_TAG_PDM_DEVICE,
170 MM_TAG_PDM_DEVICE_USER,
171 MM_TAG_PDM_DRIVER,
172 MM_TAG_PDM_DRIVER_USER,
173 MM_TAG_PDM_USB,
174 MM_TAG_PDM_USB_USER,
175 MM_TAG_PDM_LUN,
176 MM_TAG_PDM_QUEUE,
177 MM_TAG_PDM_THREAD,
178
179 MM_TAG_PGM,
180 MM_TAG_PGM_CHUNK_MAPPING,
181 MM_TAG_PGM_HANDLERS,
182 MM_TAG_PGM_PHYS,
183 MM_TAG_PGM_POOL,
184
185 MM_TAG_REM,
186
187 MM_TAG_SELM,
188
189 MM_TAG_SSM,
190
191 MM_TAG_STAM,
192
193 MM_TAG_TM,
194
195 MM_TAG_TRPM,
196
197 MM_TAG_VM,
198 MM_TAG_VM_REQ,
199
200 MM_TAG_VMM,
201
202 MM_TAG_HWACCM,
203
204 MM_TAG_32BIT_HACK = 0x7fffffff
205} MMTAG;
206
207
208
209
210/** @defgroup grp_mm_hyper Hypervisor Memory Management
211 * @ingroup grp_mm
212 * @{ */
213
214MMDECL(RTR3PTR) MMHyperR0ToR3(PVM pVM, RTR0PTR R0Ptr);
215MMDECL(RTGCPTR) MMHyperR0ToGC(PVM pVM, RTR0PTR R0Ptr);
216#ifndef IN_RING0
217MMDECL(void *) MMHyperR0ToCC(PVM pVM, RTR0PTR R0Ptr);
218#endif
219MMDECL(RTR0PTR) MMHyperR3ToR0(PVM pVM, RTR3PTR R3Ptr);
220MMDECL(RTGCPTR) MMHyperR3ToGC(PVM pVM, RTR3PTR R3Ptr);
221MMDECL(RTR3PTR) MMHyperGCToR3(PVM pVM, RTGCPTR GCPtr);
222MMDECL(RTR0PTR) MMHyperGCToR0(PVM pVM, RTGCPTR GCPtr);
223
224#ifndef IN_RING3
225MMDECL(void *) MMHyperR3ToCC(PVM pVM, RTR3PTR R3Ptr);
226#else
227DECLINLINE(void *) MMHyperR3ToCC(PVM pVM, RTR3PTR R3Ptr)
228{
229 NOREF(pVM);
230 return R3Ptr;
231}
232#endif
233
234
235#ifndef IN_GC
236MMDECL(void *) MMHyperGCToCC(PVM pVM, RTGCPTR GCPtr);
237#else
238DECLINLINE(void *) MMHyperGCToCC(PVM pVM, RTGCPTR GCPtr)
239{
240 NOREF(pVM);
241 return (void *)GCPtr;
242}
243#endif
244
245#ifndef IN_RING3
246MMDECL(RTR3PTR) MMHyperCCToR3(PVM pVM, void *pv);
247#else
248DECLINLINE(RTR3PTR) MMHyperCCToR3(PVM pVM, void *pv)
249{
250 NOREF(pVM);
251 return pv;
252}
253#endif
254
255#ifndef IN_RING0
256MMDECL(RTR0PTR) MMHyperCCToR0(PVM pVM, void *pv);
257#else
258DECLINLINE(RTR0PTR) MMHyperCCToR0(PVM pVM, void *pv)
259{
260 NOREF(pVM);
261 return pv;
262}
263#endif
264
265#ifndef IN_GC
266MMDECL(RCPTRTYPE(void *)) MMHyperCCToGC(PVM pVM, void *pv);
267#else
268DECLINLINE(RCPTRTYPE(void *)) MMHyperCCToGC(PVM pVM, void *pv)
269{
270 NOREF(pVM);
271 return (RCPTRTYPE(void *))pv;
272}
273#endif
274
275
276#ifdef IN_GC
277MMDECL(RTHCPTR) MMHyper2HC(PVM pVM, uintptr_t Ptr);
278#else
279DECLINLINE(RTHCPTR) MMHyper2HC(PVM pVM, uintptr_t Ptr)
280{
281 NOREF(pVM);
282 return (RTHCPTR)Ptr;
283}
284#endif
285
286#ifndef IN_GC
287MMDECL(RCPTRTYPE(void *)) MMHyper2GC(PVM pVM, uintptr_t Ptr);
288#else
289DECLINLINE(RCPTRTYPE(void *)) MMHyper2GC(PVM pVM, uintptr_t Ptr)
290{
291 NOREF(pVM);
292 return (RCPTRTYPE(void *))Ptr;
293}
294#endif
295
296MMDECL(RCPTRTYPE(void *)) MMHyperHC2GC(PVM pVM, RTHCPTR HCPtr);
297MMDECL(RTHCPTR) MMHyperGC2HC(PVM pVM, RCPTRTYPE(void *) GCPtr);
298MMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
299MMDECL(int) MMHyperFree(PVM pVM, void *pv);
300MMDECL(void) MMHyperHeapCheck(PVM pVM);
301#ifdef DEBUG
302MMDECL(void) MMHyperHeapDump(PVM pVM);
303#endif
304MMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM);
305MMDECL(size_t) MMHyperHeapGetSize(PVM pVM);
306MMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb);
307MMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr);
308
309
310MMDECL(RTHCPHYS) MMPage2Phys(PVM pVM, void *pvPage);
311MMDECL(void *) MMPagePhys2Page(PVM pVM, RTHCPHYS HCPhysPage);
312MMDECL(int) MMPagePhys2PageEx(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
313MMDECL(int) MMPagePhys2PageTry(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
314MMDECL(void *) MMPhysGCPhys2HCVirt(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
315
316
317/** @def MMHYPER_GC_ASSERT_GCPTR
318 * Asserts that an address is either NULL or inside the hypervisor memory area.
319 * This assertion only works while IN_GC, it's a NOP everywhere else.
320 * @thread The Emulation Thread.
321 */
322#ifdef IN_GC
323# define MMHYPER_GC_ASSERT_GCPTR(pVM, GCPtr) Assert(MMHyperIsInsideArea((pVM), (GCPtr)) || !(GCPtr))
324#else
325# define MMHYPER_GC_ASSERT_GCPTR(pVM, GCPtr) do { } while (0)
326#endif
327
328/** @} */
329
330
331#ifdef IN_RING3
332/** @defgroup grp_mm_r3 The MM Host Context Ring-3 API
333 * @ingroup grp_mm
334 * @{
335 */
336
337MMR3DECL(int) MMR3InitUVM(PUVM pUVM);
338MMR3DECL(int) MMR3Init(PVM pVM);
339MMR3DECL(int) MMR3InitPaging(PVM pVM);
340MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM);
341MMR3DECL(int) MMR3Term(PVM pVM);
342MMR3DECL(void) MMR3TermUVM(PUVM pUVM);
343MMR3DECL(void) MMR3Reset(PVM pVM);
344MMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages);
345MMR3DECL(int) MMR3AdjustFixedReservation(PVM pVM, int32_t cDeltaFixedPages, const char *pszDesc);
346MMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages);
347
348MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv);
349MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);
350MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
351
352
353/** @defgroup grp_mm_r3_hyper Hypervisor Memory Manager (HC R3 Portion)
354 * @ingroup grp_mm_r3
355 * @{ */
356MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
357MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);
358MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);
359MMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTGCPTR pGCPtr);
360MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr);
361MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr);
362MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr);
363MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC);
364MMR3DECL(int) MMR3HyperHCVirt2HCPhysEx(PVM pVM, void *pvHC, PRTHCPHYS pHCPhys);
365MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys);
366MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv);
367MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);
368/** @} */
369
370
371/** @defgroup grp_mm_phys Guest Physical Memory Manager
372 * @ingroup grp_mm_r3
373 * @{ */
374MMR3DECL(int) MMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, const char *pszDesc);
375#ifndef VBOX_WITH_NEW_PHYS_CODE
376MMR3DECL(int) MMR3PhysRegisterEx(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, MMPHYSREG enmType, const char *pszDesc);
377#endif
378MMR3DECL(int) MMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTUINT cbRange, const void *pvBinary, bool fShadow, const char *pszDesc);
379MMR3DECL(int) MMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
380MMR3DECL(int) MMR3PhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, const char *pszDesc);
381MMR3DECL(uint64_t) MMR3PhysGetRamSize(PVM pVM);
382/** @} */
383
384
385/** @defgroup grp_mm_page Physical Page Pool
386 * @ingroup grp_mm_r3
387 * @{ */
388MMR3DECL(void *) MMR3PageAlloc(PVM pVM);
389MMR3DECL(RTHCPHYS) MMR3PageAllocPhys(PVM pVM);
390MMR3DECL(void) MMR3PageFree(PVM pVM, void *pvPage);
391MMR3DECL(void *) MMR3PageAllocLow(PVM pVM);
392MMR3DECL(void) MMR3PageFreeLow(PVM pVM, void *pvPage);
393MMR3DECL(void) MMR3PageFreeByPhys(PVM pVM, RTHCPHYS HCPhysPage);
394MMR3DECL(void *) MMR3PageDummyHCPtr(PVM pVM);
395MMR3DECL(RTHCPHYS) MMR3PageDummyHCPhys(PVM pVM);
396/** @} */
397
398
399/** @defgroup grp_mm_heap Heap Manager
400 * @ingroup grp_mm_r3
401 * @{ */
402MMR3DECL(void *) MMR3HeapAlloc(PVM pVM, MMTAG enmTag, size_t cbSize);
403MMR3DECL(void *) MMR3HeapAllocU(PUVM pUVM, MMTAG enmTag, size_t cbSize);
404MMR3DECL(int) MMR3HeapAllocEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv);
405MMR3DECL(int) MMR3HeapAllocExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv);
406MMR3DECL(void *) MMR3HeapAllocZ(PVM pVM, MMTAG enmTag, size_t cbSize);
407MMR3DECL(void *) MMR3HeapAllocZU(PUVM pUVM, MMTAG enmTag, size_t cbSize);
408MMR3DECL(int) MMR3HeapAllocZEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv);
409MMR3DECL(int) MMR3HeapAllocZExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv);
410MMR3DECL(void *) MMR3HeapRealloc(void *pv, size_t cbNewSize);
411MMR3DECL(char *) MMR3HeapStrDup(PVM pVM, MMTAG enmTag, const char *psz);
412MMR3DECL(char *) MMR3HeapStrDupU(PUVM pUVM, MMTAG enmTag, const char *psz);
413MMR3DECL(void) MMR3HeapFree(void *pv);
414/** @} */
415
416/** @} */
417#endif /* IN_RING3 */
418
419
420
421#ifdef IN_GC
422/** @defgroup grp_mm_gc The MM Guest Context API
423 * @ingroup grp_mm
424 * @{
425 */
426
427MMGCDECL(void) MMGCRamRegisterTrapHandler(PVM pVM);
428MMGCDECL(void) MMGCRamDeregisterTrapHandler(PVM pVM);
429MMGCDECL(int) MMGCRamReadNoTrapHandler(void *pDst, void *pSrc, size_t cb);
430MMGCDECL(int) MMGCRamWriteNoTrapHandler(void *pDst, void *pSrc, size_t cb);
431MMGCDECL(int) MMGCRamRead(PVM pVM, void *pDst, void *pSrc, size_t cb);
432MMGCDECL(int) MMGCRamWrite(PVM pVM, void *pDst, void *pSrc, size_t cb);
433
434/** @} */
435#endif /* IN_GC */
436
437/** @} */
438__END_DECLS
439
440
441#endif
442
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette