VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 5528

Last change on this file since 5528 was 4819, checked in by vboxsync, 17 years ago

Don't assert on invalid handles as it upsets the dprintf2 logging in vboxdrv.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev
File size: 28.7 KB
Line 
1/* $Revision: 4819 $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
23#include <iprt/memobj.h>
24#include <iprt/alloc.h>
25#include <iprt/process.h>
26#include <iprt/assert.h>
27#include <iprt/err.h>
28#include <iprt/log.h>
29#include <iprt/param.h>
30#include "internal/memobj.h"
31
32
33/**
34 * Internal function for allocating a new memory object.
35 *
36 * @returns The allocated and initialized handle.
37 * @param cbSelf The size of the memory object handle. 0 mean default size.
38 * @param enmType The memory object type.
39 * @param pv The memory object mapping.
40 * @param cb The size of the memory object.
41 */
42PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
43{
44 PRTR0MEMOBJINTERNAL pNew;
45
46 /* validate the size */
47 if (!cbSelf)
48 cbSelf = sizeof(*pNew);
49 Assert(cbSelf >= sizeof(*pNew));
50 Assert(cbSelf == (uint32_t)cbSelf);
51
52 /*
53 * Allocate and initialize the object.
54 */
55 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
56 if (pNew)
57 {
58 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
59 pNew->cbSelf = (uint32_t)cbSelf;
60 pNew->enmType = enmType;
61 pNew->cb = cb;
62 pNew->pv = pv;
63 }
64 return pNew;
65}
66
67
68/**
69 * Deletes an incomplete memory object.
70 *
71 * This is for cleaning up after failures during object creation.
72 *
73 * @param pMem The incomplete memory object to delete.
74 */
75void rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
76{
77 if (pMem)
78 {
79 pMem->u32Magic++;
80 pMem->enmType = RTR0MEMOBJTYPE_END;
81 RTMemFree(pMem);
82 }
83}
84
85
86/**
87 * Links a mapping object to a primary object.
88 *
89 * @returns IPRT status code.
90 * @retval VINF_SUCCESS on success.
91 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
92 * @param pParent The parent (primary) memory object.
93 * @param pChild The child (mapping) memory object.
94 */
95static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
96{
97 uint32_t i;
98
99 /* sanity */
100 Assert(rtR0MemObjIsMapping(pChild));
101 Assert(!rtR0MemObjIsMapping(pParent));
102
103 /* expand the array? */
104 i = pParent->uRel.Parent.cMappings;
105 if (i >= pParent->uRel.Parent.cMappingsAllocated)
106 {
107 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
108 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
109 if (!pv)
110 return VERR_NO_MEMORY;
111 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
112 pParent->uRel.Parent.cMappingsAllocated = i + 32;
113 Assert(i == pParent->uRel.Parent.cMappings);
114 }
115
116 /* do the linking. */
117 pParent->uRel.Parent.papMappings[i] = pChild;
118 pParent->uRel.Parent.cMappings++;
119 pChild->uRel.Child.pParent = pParent;
120
121 return VINF_SUCCESS;
122}
123
124
125/**
126 * Checks if this is mapping or not.
127 *
128 * @returns true if it's a mapping, otherwise false.
129 * @param MemObj The ring-0 memory object handle.
130 */
131RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
132{
133 /* Validate the object handle. */
134 PRTR0MEMOBJINTERNAL pMem;
135 AssertPtrReturn(MemObj, false);
136 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
137 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
138 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
139
140 /* hand it on to the inlined worker. */
141 return rtR0MemObjIsMapping(pMem);
142}
143
144
145/**
146 * Gets the address of a ring-0 memory object.
147 *
148 * @returns The address of the memory object.
149 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
150 * @param MemObj The ring-0 memory object handle.
151 */
152RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
153{
154 /* Validate the object handle. */
155 PRTR0MEMOBJINTERNAL pMem;
156 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
157 return NULL;
158 AssertPtrReturn(MemObj, NULL);
159 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
160 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
161 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
162
163 /* return the mapping address. */
164 return pMem->pv;
165}
166
167
168/**
169 * Gets the ring-3 address of a ring-0 memory object.
170 *
171 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
172 * locked user memory, reserved user address space and user mappings. This API should
173 * not be used on any other objects.
174 *
175 * @returns The address of the memory object.
176 * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
177 * Strict builds will assert in both cases.
178 * @param MemObj The ring-0 memory object handle.
179 */
180RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
181{
182 PRTR0MEMOBJINTERNAL pMem;
183
184 /* Validate the object handle. */
185 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
186 return NIL_RTR3PTR;
187 AssertPtrReturn(MemObj, NIL_RTR3PTR);
188 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
189 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
190 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
191 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
192 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
193 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
194 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
195 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
196 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
197 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
198 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
199 return NIL_RTR3PTR;
200
201 /* return the mapping address. */
202 return (RTR3PTR)pMem->pv;
203}
204
205
206/**
207 * Gets the size of a ring-0 memory object.
208 *
209 * @returns The address of the memory object.
210 * @returns 0 if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
211 * @param MemObj The ring-0 memory object handle.
212 */
213RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
214{
215 PRTR0MEMOBJINTERNAL pMem;
216
217 /* Validate the object handle. */
218 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
219 return 0;
220 AssertPtrReturn(MemObj, 0);
221 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
222 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
223 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
224
225 /* return the size. */
226 return pMem->cb;
227}
228
229
230/**
231 * Get the physical address of an page in the memory object.
232 *
233 * @returns The physical address.
234 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
235 * @returns NIL_RTHCPHYS if the iPage is out of range.
236 * @returns NIL_RTHCPHYS if the object handle isn't valid.
237 * @param MemObj The ring-0 memory object handle.
238 * @param iPage The page number within the object.
239 */
240RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
241{
242 /* Validate the object handle. */
243 PRTR0MEMOBJINTERNAL pMem;
244 size_t cPages;
245 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
246 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
247 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
248 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
249 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
250 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
251 cPages = (pMem->cb >> PAGE_SHIFT);
252 if (iPage >= cPages)
253 {
254 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
255 if (iPage == cPages)
256 return NIL_RTHCPHYS;
257 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
258 }
259
260 /*
261 * We know the address of physically contiguous allocations and mappings.
262 */
263 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
264 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
265 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
266 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
267
268 /*
269 * Do the job.
270 */
271 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
272}
273
274
275/**
276 * Frees a ring-0 memory object.
277 *
278 * @returns IPRT status code.
279 * @retval VERR_INVALID_HANDLE if
280 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
281 * @param fFreeMappings Whether or not to free mappings of the object.
282 */
283RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
284{
285 /*
286 * Validate the object handle.
287 */
288 PRTR0MEMOBJINTERNAL pMem;
289 int rc;
290
291 if (MemObj == NIL_RTR0MEMOBJ)
292 return VINF_SUCCESS;
293 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
294 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
295 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
296 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
297
298 /*
299 * Deal with mapings according to fFreeMappings.
300 */
301 if ( !rtR0MemObjIsMapping(pMem)
302 && pMem->uRel.Parent.cMappings > 0)
303 {
304 /* fail if not requested to free mappings. */
305 if (!fFreeMappings)
306 return VERR_MEMORY_BUSY;
307
308 while (pMem->uRel.Parent.cMappings > 0)
309 {
310 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
311 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
312
313 /* sanity checks. */
314 AssertPtr(pChild);
315 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
316 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
317 AssertFatal(rtR0MemObjIsMapping(pChild));
318
319 /* free the mapping. */
320 rc = rtR0MemObjNativeFree(pChild);
321 if (RT_FAILURE(rc))
322 {
323 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Vrc\n", pChild, pChild->pv, pChild->cb, rc));
324 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
325 return rc;
326 }
327 }
328 }
329
330 /*
331 * Free this object.
332 */
333 rc = rtR0MemObjNativeFree(pMem);
334 if (RT_SUCCESS(rc))
335 {
336 /*
337 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
338 */
339 if (rtR0MemObjIsMapping(pMem))
340 {
341 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
342 uint32_t i;
343
344 /* sanity checks */
345 AssertPtr(pParent);
346 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
347 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
348 AssertFatal(!rtR0MemObjIsMapping(pParent));
349 AssertFatal(pParent->uRel.Parent.cMappings > 0);
350 AssertPtr(pParent->uRel.Parent.papMappings);
351
352 /* locate and remove from the array of mappings. */
353 i = pParent->uRel.Parent.cMappings;
354 while (i-- > 0)
355 {
356 if (pParent->uRel.Parent.papMappings[i] == pMem)
357 {
358 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
359 break;
360 }
361 }
362 Assert(i != UINT32_MAX);
363 }
364 else
365 Assert(pMem->uRel.Parent.cMappings == 0);
366
367 /*
368 * Finally, destroy the handle.
369 */
370 pMem->u32Magic++;
371 pMem->enmType = RTR0MEMOBJTYPE_END;
372 if (!rtR0MemObjIsMapping(pMem))
373 RTMemFree(pMem->uRel.Parent.papMappings);
374 RTMemFree(pMem);
375 }
376 else
377 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Vrc\n",
378 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
379 return rc;
380}
381
382
383
384/**
385 * Allocates page aligned virtual kernel memory.
386 *
387 * The memory is taken from a non paged (= fixed physical memory backing) pool.
388 *
389 * @returns IPRT status code.
390 * @param pMemObj Where to store the ring-0 memory object handle.
391 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
392 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
393 */
394RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
395{
396 /* sanity checks. */
397 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
398 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
399 *pMemObj = NIL_RTR0MEMOBJ;
400 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
401 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
402
403 /* do the allocation. */
404 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
405}
406
407
408/**
409 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
410 *
411 * The physical memory backing the allocation is fixed.
412 *
413 * @returns IPRT status code.
414 * @param pMemObj Where to store the ring-0 memory object handle.
415 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
416 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
417 */
418RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
419{
420 /* sanity checks. */
421 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
422 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
423 *pMemObj = NIL_RTR0MEMOBJ;
424 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
425 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
426
427 /* do the allocation. */
428 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
429}
430
431
432/**
433 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
434 *
435 * The physical memory backing the allocation is fixed.
436 *
437 * @returns IPRT status code.
438 * @param pMemObj Where to store the ring-0 memory object handle.
439 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
440 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
441 */
442RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
443{
444 /* sanity checks. */
445 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
446 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
447 *pMemObj = NIL_RTR0MEMOBJ;
448 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
449 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
450
451 /* do the allocation. */
452 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
453}
454
455
456/**
457 * Locks a range of user virtual memory.
458 *
459 * @returns IPRT status code.
460 * @param pMemObj Where to store the ring-0 memory object handle.
461 * @param R3Ptr User virtual address. This is rounded down to a page boundrary.
462 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
463 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
464 *
465 * @remark RTR0MemGetAddressR3() and RTR0MemGetAddress() will return the rounded down address.
466 */
467RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
468{
469 /* sanity checks. */
470 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
471 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
472 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
473 *pMemObj = NIL_RTR0MEMOBJ;
474 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
475 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
476 if (R0Process == NIL_RTR0PROCESS)
477 R0Process = RTR0ProcHandleSelf();
478
479 /* do the locking. */
480 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, R0Process);
481}
482
483
484/**
485 * Locks a range of kernel virtual memory.
486 *
487 * @returns IPRT status code.
488 * @param pMemObj Where to store the ring-0 memory object handle.
489 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
490 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
491 *
492 * @remark RTR0MemGetAddress() will return the rounded down address.
493 */
494RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
495{
496 /* sanity checks. */
497 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
498 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
499 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
500 *pMemObj = NIL_RTR0MEMOBJ;
501 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
502 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
503 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
504
505 /* do the allocation. */
506 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
507}
508
509
510/**
511 * Allocates contiguous page aligned physical memory without (necessarily) any kernel mapping.
512 *
513 * @returns IPRT status code.
514 * @param pMemObj Where to store the ring-0 memory object handle.
515 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
516 * @param PhysHighest The highest permittable address (inclusive).
517 * Pass NIL_RTHCPHYS if any address is acceptable.
518 */
519RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
520{
521 /* sanity checks. */
522 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
523 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
524 *pMemObj = NIL_RTR0MEMOBJ;
525 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
526 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
527 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
528
529 /* do the allocation. */
530 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
531}
532
533
534/**
535 * Allocates non-contiguous page aligned physical memory without (necessarily) any kernel mapping.
536 *
537 * @returns IPRT status code.
538 * @param pMemObj Where to store the ring-0 memory object handle.
539 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
540 * @param PhysHighest The highest permittable address (inclusive).
541 * Pass NIL_RTHCPHYS if any address is acceptable.
542 */
543RTR0DECL(int) RTR0MemObjAllocPhysNC(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
544{
545 /* sanity checks. */
546 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
547 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
548 *pMemObj = NIL_RTR0MEMOBJ;
549 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
550 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
551 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
552
553 /* do the allocation. */
554 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest);
555}
556
557
558/**
559 * Creates a page aligned, contiguous, physical memory object.
560 *
561 * No physical memory is allocated, we trust you do know what you're doing.
562 *
563 * @returns IPRT status code.
564 * @param pMemObj Where to store the ring-0 memory object handle.
565 * @param Phys The physical address to start at. This is rounded down to the
566 * nearest page boundrary.
567 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
568 */
569RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
570{
571 /* sanity checks. */
572 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
573 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
574 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
575 *pMemObj = NIL_RTR0MEMOBJ;
576 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
577 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
578 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
579
580 /* do the allocation. */
581 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
582}
583
584
585/**
586 * Reserves kernel virtual address space.
587 *
588 * @returns IPRT status code.
589 * @param pMemObj Where to store the ring-0 memory object handle.
590 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
591 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
592 * @param uAlignment The alignment of the reserved memory.
593 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
594 */
595RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
596{
597 /* sanity checks. */
598 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
599 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
600 *pMemObj = NIL_RTR0MEMOBJ;
601 if (uAlignment == 0)
602 uAlignment = PAGE_SIZE;
603 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
604 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
605 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
606 if (pvFixed != (void *)-1)
607 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
608
609 /* do the reservation. */
610 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
611}
612
613
614/**
615 * Reserves user virtual address space in the current process.
616 *
617 * @returns IPRT status code.
618 * @param pMemObj Where to store the ring-0 memory object handle.
619 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
620 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
621 * @param uAlignment The alignment of the reserved memory.
622 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
623 * @param R0Process The process to reserve the memory in. NIL_R0PROCESS is an alias for the current one.
624 */
625RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
626{
627 /* sanity checks. */
628 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
629 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
630 *pMemObj = NIL_RTR0MEMOBJ;
631 if (uAlignment == 0)
632 uAlignment = PAGE_SIZE;
633 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
634 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
635 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
636 if (R3PtrFixed != (RTR3PTR)-1)
637 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
638 if (R0Process == NIL_RTR0PROCESS)
639 R0Process = RTR0ProcHandleSelf();
640
641 /* do the reservation. */
642 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process);
643}
644
645
646/**
647 * Maps a memory object into kernel virtual address space.
648 *
649 * @returns IPRT status code.
650 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
651 * @param MemObjToMap The object to be map.
652 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
653 * @param uAlignment The alignment of the reserved memory.
654 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
655 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
656 */
657RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
658{
659 /* sanity checks. */
660 PRTR0MEMOBJINTERNAL pMemToMap;
661 PRTR0MEMOBJINTERNAL pNew;
662 int rc;
663 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
664 *pMemObj = NIL_RTR0MEMOBJ;
665 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
666 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
667 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
668 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
669 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
670 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
671 if (uAlignment == 0)
672 uAlignment = PAGE_SIZE;
673 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
674 if (pvFixed != (void *)-1)
675 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
676 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
677 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
678
679
680 /* do the mapping. */
681 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt);
682 if (RT_SUCCESS(rc))
683 {
684 /* link it. */
685 rc = rtR0MemObjLink(pMemToMap, pNew);
686 if (RT_SUCCESS(rc))
687 *pMemObj = pNew;
688 else
689 {
690 /* damn, out of memory. bail out. */
691 int rc2 = rtR0MemObjNativeFree(pNew);
692 AssertRC(rc2);
693 pNew->u32Magic++;
694 pNew->enmType = RTR0MEMOBJTYPE_END;
695 RTMemFree(pNew);
696 }
697 }
698
699 return rc;
700}
701
702
703/**
704 * Maps a memory object into user virtual address space in the current process.
705 *
706 * @returns IPRT status code.
707 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
708 * @param MemObjToMap The object to be map.
709 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
710 * @param uAlignment The alignment of the reserved memory.
711 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
712 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
713 * @param R0Process The process to map the memory into. NIL_R0PROCESS is an alias for the current one.
714 */
715RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
716{
717 /* sanity checks. */
718 PRTR0MEMOBJINTERNAL pMemToMap;
719 PRTR0MEMOBJINTERNAL pNew;
720 int rc;
721 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
722 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
723 *pMemObj = NIL_RTR0MEMOBJ;
724 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
725 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
726 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
727 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
728 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
729 if (uAlignment == 0)
730 uAlignment = PAGE_SIZE;
731 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
732 if (R3PtrFixed != (RTR3PTR)-1)
733 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
734 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
735 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
736 if (R0Process == NIL_RTR0PROCESS)
737 R0Process = RTR0ProcHandleSelf();
738
739 /* do the mapping. */
740 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process);
741 if (RT_SUCCESS(rc))
742 {
743 /* link it. */
744 rc = rtR0MemObjLink(pMemToMap, pNew);
745 if (RT_SUCCESS(rc))
746 *pMemObj = pNew;
747 else
748 {
749 /* damn, out of memory. bail out. */
750 int rc2 = rtR0MemObjNativeFree(pNew);
751 AssertRC(rc2);
752 pNew->u32Magic++;
753 pNew->enmType = RTR0MEMOBJTYPE_END;
754 RTMemFree(pNew);
755 }
756 }
757
758 return rc;
759}
760
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette