VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp@ 19614

Last change on this file since 19614 was 15718, checked in by vboxsync, 16 years ago

RTR0MemObjLockUser: Replaced the warnings with more useful information.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev
File size: 31.7 KB
Line 
1/* $Revision: 15718 $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define LOG_GROUP RTLOGGROUP_DEFAULT ///@todo RTLOGGROUP_MEM
36#include <iprt/memobj.h>
37#include <iprt/alloc.h>
38#include <iprt/process.h>
39#include <iprt/assert.h>
40#include <iprt/err.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include "internal/memobj.h"
44
45
46/**
47 * Internal function for allocating a new memory object.
48 *
49 * @returns The allocated and initialized handle.
50 * @param cbSelf The size of the memory object handle. 0 mean default size.
51 * @param enmType The memory object type.
52 * @param pv The memory object mapping.
53 * @param cb The size of the memory object.
54 */
55PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
56{
57 PRTR0MEMOBJINTERNAL pNew;
58
59 /* validate the size */
60 if (!cbSelf)
61 cbSelf = sizeof(*pNew);
62 Assert(cbSelf >= sizeof(*pNew));
63 Assert(cbSelf == (uint32_t)cbSelf);
64
65 /*
66 * Allocate and initialize the object.
67 */
68 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
69 if (pNew)
70 {
71 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
72 pNew->cbSelf = (uint32_t)cbSelf;
73 pNew->enmType = enmType;
74 pNew->cb = cb;
75 pNew->pv = pv;
76 }
77 return pNew;
78}
79
80
81/**
82 * Deletes an incomplete memory object.
83 *
84 * This is for cleaning up after failures during object creation.
85 *
86 * @param pMem The incomplete memory object to delete.
87 */
88void rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
89{
90 if (pMem)
91 {
92 pMem->u32Magic++;
93 pMem->enmType = RTR0MEMOBJTYPE_END;
94 RTMemFree(pMem);
95 }
96}
97
98
99/**
100 * Links a mapping object to a primary object.
101 *
102 * @returns IPRT status code.
103 * @retval VINF_SUCCESS on success.
104 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
105 * @param pParent The parent (primary) memory object.
106 * @param pChild The child (mapping) memory object.
107 */
108static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
109{
110 uint32_t i;
111
112 /* sanity */
113 Assert(rtR0MemObjIsMapping(pChild));
114 Assert(!rtR0MemObjIsMapping(pParent));
115
116 /* expand the array? */
117 i = pParent->uRel.Parent.cMappings;
118 if (i >= pParent->uRel.Parent.cMappingsAllocated)
119 {
120 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
121 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
122 if (!pv)
123 return VERR_NO_MEMORY;
124 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
125 pParent->uRel.Parent.cMappingsAllocated = i + 32;
126 Assert(i == pParent->uRel.Parent.cMappings);
127 }
128
129 /* do the linking. */
130 pParent->uRel.Parent.papMappings[i] = pChild;
131 pParent->uRel.Parent.cMappings++;
132 pChild->uRel.Child.pParent = pParent;
133
134 return VINF_SUCCESS;
135}
136
137
138/**
139 * Checks if this is mapping or not.
140 *
141 * @returns true if it's a mapping, otherwise false.
142 * @param MemObj The ring-0 memory object handle.
143 */
144RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
145{
146 /* Validate the object handle. */
147 PRTR0MEMOBJINTERNAL pMem;
148 AssertPtrReturn(MemObj, false);
149 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
150 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
151 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
152
153 /* hand it on to the inlined worker. */
154 return rtR0MemObjIsMapping(pMem);
155}
156
157
158/**
159 * Gets the address of a ring-0 memory object.
160 *
161 * @returns The address of the memory object.
162 * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
163 * @param MemObj The ring-0 memory object handle.
164 */
165RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
166{
167 /* Validate the object handle. */
168 PRTR0MEMOBJINTERNAL pMem;
169 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
170 return NULL;
171 AssertPtrReturn(MemObj, NULL);
172 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
173 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
174 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
175
176 /* return the mapping address. */
177 return pMem->pv;
178}
179
180
181/**
182 * Gets the ring-3 address of a ring-0 memory object.
183 *
184 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
185 * locked user memory, reserved user address space and user mappings. This API should
186 * not be used on any other objects.
187 *
188 * @returns The address of the memory object.
189 * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
190 * Strict builds will assert in both cases.
191 * @param MemObj The ring-0 memory object handle.
192 */
193RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
194{
195 PRTR0MEMOBJINTERNAL pMem;
196
197 /* Validate the object handle. */
198 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
199 return NIL_RTR3PTR;
200 AssertPtrReturn(MemObj, NIL_RTR3PTR);
201 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
202 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
203 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
204 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
205 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
206 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
207 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
208 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
209 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
210 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
211 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
212 return NIL_RTR3PTR;
213
214 /* return the mapping address. */
215 return (RTR3PTR)pMem->pv;
216}
217
218
219/**
220 * Gets the size of a ring-0 memory object.
221 *
222 * @returns The address of the memory object.
223 * @returns 0 if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
224 * @param MemObj The ring-0 memory object handle.
225 */
226RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
227{
228 PRTR0MEMOBJINTERNAL pMem;
229
230 /* Validate the object handle. */
231 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
232 return 0;
233 AssertPtrReturn(MemObj, 0);
234 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
235 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
236 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
237
238 /* return the size. */
239 return pMem->cb;
240}
241
242
243/**
244 * Get the physical address of an page in the memory object.
245 *
246 * @returns The physical address.
247 * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
248 * @returns NIL_RTHCPHYS if the iPage is out of range.
249 * @returns NIL_RTHCPHYS if the object handle isn't valid.
250 * @param MemObj The ring-0 memory object handle.
251 * @param iPage The page number within the object.
252 */
253RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
254{
255 /* Validate the object handle. */
256 PRTR0MEMOBJINTERNAL pMem;
257 size_t cPages;
258 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
259 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
260 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
261 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
262 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
263 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
264 cPages = (pMem->cb >> PAGE_SHIFT);
265 if (iPage >= cPages)
266 {
267 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
268 if (iPage == cPages)
269 return NIL_RTHCPHYS;
270 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
271 }
272
273 /*
274 * We know the address of physically contiguous allocations and mappings.
275 */
276 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
277 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
278 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
279 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
280
281 /*
282 * Do the job.
283 */
284 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
285}
286
287
288/**
289 * Frees a ring-0 memory object.
290 *
291 * @returns IPRT status code.
292 * @retval VERR_INVALID_HANDLE if
293 * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
294 * @param fFreeMappings Whether or not to free mappings of the object.
295 */
296RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
297{
298 /*
299 * Validate the object handle.
300 */
301 PRTR0MEMOBJINTERNAL pMem;
302 int rc;
303
304 if (MemObj == NIL_RTR0MEMOBJ)
305 return VINF_SUCCESS;
306 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
307 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
308 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
309 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
310
311 /*
312 * Deal with mapings according to fFreeMappings.
313 */
314 if ( !rtR0MemObjIsMapping(pMem)
315 && pMem->uRel.Parent.cMappings > 0)
316 {
317 /* fail if not requested to free mappings. */
318 if (!fFreeMappings)
319 return VERR_MEMORY_BUSY;
320
321 while (pMem->uRel.Parent.cMappings > 0)
322 {
323 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
324 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
325
326 /* sanity checks. */
327 AssertPtr(pChild);
328 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
329 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
330 AssertFatal(rtR0MemObjIsMapping(pChild));
331
332 /* free the mapping. */
333 rc = rtR0MemObjNativeFree(pChild);
334 if (RT_FAILURE(rc))
335 {
336 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
337 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
338 return rc;
339 }
340 }
341 }
342
343 /*
344 * Free this object.
345 */
346 rc = rtR0MemObjNativeFree(pMem);
347 if (RT_SUCCESS(rc))
348 {
349 /*
350 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
351 */
352 if (rtR0MemObjIsMapping(pMem))
353 {
354 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
355 uint32_t i;
356
357 /* sanity checks */
358 AssertPtr(pParent);
359 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
360 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
361 AssertFatal(!rtR0MemObjIsMapping(pParent));
362 AssertFatal(pParent->uRel.Parent.cMappings > 0);
363 AssertPtr(pParent->uRel.Parent.papMappings);
364
365 /* locate and remove from the array of mappings. */
366 i = pParent->uRel.Parent.cMappings;
367 while (i-- > 0)
368 {
369 if (pParent->uRel.Parent.papMappings[i] == pMem)
370 {
371 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
372 break;
373 }
374 }
375 Assert(i != UINT32_MAX);
376 }
377 else
378 Assert(pMem->uRel.Parent.cMappings == 0);
379
380 /*
381 * Finally, destroy the handle.
382 */
383 pMem->u32Magic++;
384 pMem->enmType = RTR0MEMOBJTYPE_END;
385 if (!rtR0MemObjIsMapping(pMem))
386 RTMemFree(pMem->uRel.Parent.papMappings);
387 RTMemFree(pMem);
388 }
389 else
390 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
391 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
392 return rc;
393}
394
395
396
397/**
398 * Allocates page aligned virtual kernel memory.
399 *
400 * The memory is taken from a non paged (= fixed physical memory backing) pool.
401 *
402 * @returns IPRT status code.
403 * @param pMemObj Where to store the ring-0 memory object handle.
404 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
405 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
406 */
407RTR0DECL(int) RTR0MemObjAllocPage(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
408{
409 /* sanity checks. */
410 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
411 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
412 *pMemObj = NIL_RTR0MEMOBJ;
413 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
414 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
415
416 /* do the allocation. */
417 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
418}
419
420
421/**
422 * Allocates page aligned virtual kernel memory with physical backing below 4GB.
423 *
424 * The physical memory backing the allocation is fixed.
425 *
426 * @returns IPRT status code.
427 * @param pMemObj Where to store the ring-0 memory object handle.
428 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
429 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
430 */
431RTR0DECL(int) RTR0MemObjAllocLow(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
432{
433 /* sanity checks. */
434 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
435 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
436 *pMemObj = NIL_RTR0MEMOBJ;
437 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
438 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
439
440 /* do the allocation. */
441 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
442}
443
444
445/**
446 * Allocates page aligned virtual kernel memory with contiguous physical backing below 4GB.
447 *
448 * The physical memory backing the allocation is fixed.
449 *
450 * @returns IPRT status code.
451 * @param pMemObj Where to store the ring-0 memory object handle.
452 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
453 * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
454 */
455RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable)
456{
457 /* sanity checks. */
458 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
459 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
460 *pMemObj = NIL_RTR0MEMOBJ;
461 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
462 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
463
464 /* do the allocation. */
465 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
466}
467
468
469/**
470 * Locks a range of user virtual memory.
471 *
472 * @returns IPRT status code.
473 * @param pMemObj Where to store the ring-0 memory object handle.
474 * @param R3Ptr User virtual address. This is rounded down to a page boundrary.
475 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
476 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
477 *
478 * @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return therounded
479 * down address.
480 *
481 * @remarks Linux: This API requires that the memory begin locked is in a memory
482 * mapping that is not required in any forked off child process. This
483 * is not intented as permanent restriction, feel free to help out
484 * lifting it.
485 */
486RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
487{
488 /* sanity checks. */
489 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
490 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
491 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
492 *pMemObj = NIL_RTR0MEMOBJ;
493 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
494 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
495 if (R0Process == NIL_RTR0PROCESS)
496 R0Process = RTR0ProcHandleSelf();
497
498 /* do the locking. */
499 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, R0Process);
500}
501
502
503/**
504 * Locks a range of kernel virtual memory.
505 *
506 * @returns IPRT status code.
507 * @param pMemObj Where to store the ring-0 memory object handle.
508 * @param pv Kernel virtual address. This is rounded down to a page boundrary.
509 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
510 *
511 * @remark RTR0MemGetAddress() will return the rounded down address.
512 */
513RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
514{
515 /* sanity checks. */
516 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
517 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
518 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
519 *pMemObj = NIL_RTR0MEMOBJ;
520 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
521 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
522 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
523
524 /* do the allocation. */
525 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
526}
527
528
529/**
530 * Allocates contiguous page aligned physical memory without (necessarily) any kernel mapping.
531 *
532 * @returns IPRT status code.
533 * @param pMemObj Where to store the ring-0 memory object handle.
534 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
535 * @param PhysHighest The highest permittable address (inclusive).
536 * Pass NIL_RTHCPHYS if any address is acceptable.
537 */
538RTR0DECL(int) RTR0MemObjAllocPhys(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
539{
540 /* sanity checks. */
541 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
542 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
543 *pMemObj = NIL_RTR0MEMOBJ;
544 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
545 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
546 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
547
548 /* do the allocation. */
549 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest);
550}
551
552
553/**
554 * Allocates non-contiguous page aligned physical memory without (necessarily) any kernel mapping.
555 *
556 * @returns IPRT status code.
557 * @param pMemObj Where to store the ring-0 memory object handle.
558 * @param cb Number of bytes to allocate. This is rounded up to nearest page.
559 * @param PhysHighest The highest permittable address (inclusive).
560 * Pass NIL_RTHCPHYS if any address is acceptable.
561 */
562RTR0DECL(int) RTR0MemObjAllocPhysNC(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest)
563{
564 /* sanity checks. */
565 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
566 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
567 *pMemObj = NIL_RTR0MEMOBJ;
568 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
569 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
570 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
571
572 /* do the allocation. */
573 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest);
574}
575
576
577/**
578 * Creates a page aligned, contiguous, physical memory object.
579 *
580 * No physical memory is allocated, we trust you do know what you're doing.
581 *
582 * @returns IPRT status code.
583 * @param pMemObj Where to store the ring-0 memory object handle.
584 * @param Phys The physical address to start at. This is rounded down to the
585 * nearest page boundrary.
586 * @param cb The size of the object in bytes. This is rounded up to nearest page boundrary.
587 */
588RTR0DECL(int) RTR0MemObjEnterPhys(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb)
589{
590 /* sanity checks. */
591 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
592 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
593 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
594 *pMemObj = NIL_RTR0MEMOBJ;
595 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
596 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
597 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
598
599 /* do the allocation. */
600 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned);
601}
602
603
604/**
605 * Reserves kernel virtual address space.
606 *
607 * @returns IPRT status code.
608 * @param pMemObj Where to store the ring-0 memory object handle.
609 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
610 * @param cb The number of bytes to reserve. This is rounded up to nearest page.
611 * @param uAlignment The alignment of the reserved memory.
612 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
613 */
614RTR0DECL(int) RTR0MemObjReserveKernel(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment)
615{
616 /* sanity checks. */
617 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
618 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
619 *pMemObj = NIL_RTR0MEMOBJ;
620 if (uAlignment == 0)
621 uAlignment = PAGE_SIZE;
622 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
623 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
624 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
625 if (pvFixed != (void *)-1)
626 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
627
628 /* do the reservation. */
629 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
630}
631
632
633/**
634 * Reserves user virtual address space in the current process.
635 *
636 * @returns IPRT status code.
637 * @param pMemObj Where to store the ring-0 memory object handle.
638 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
639 * @param cb The number of bytes to reserve. This is rounded up to nearest PAGE_SIZE.
640 * @param uAlignment The alignment of the reserved memory.
641 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
642 * @param R0Process The process to reserve the memory in. NIL_R0PROCESS is an alias for the current one.
643 */
644RTR0DECL(int) RTR0MemObjReserveUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
645{
646 /* sanity checks. */
647 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
648 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
649 *pMemObj = NIL_RTR0MEMOBJ;
650 if (uAlignment == 0)
651 uAlignment = PAGE_SIZE;
652 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
653 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
654 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
655 if (R3PtrFixed != (RTR3PTR)-1)
656 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
657 if (R0Process == NIL_RTR0PROCESS)
658 R0Process = RTR0ProcHandleSelf();
659
660 /* do the reservation. */
661 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process);
662}
663
664
665/**
666 * Maps a memory object into kernel virtual address space.
667 *
668 * @returns IPRT status code.
669 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
670 * @param MemObjToMap The object to be map.
671 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
672 * @param uAlignment The alignment of the reserved memory.
673 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
674 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
675 */
676RTR0DECL(int) RTR0MemObjMapKernel(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
677{
678 return RTR0MemObjMapKernelEx(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0);
679}
680
681
682/**
683 * Maps a memory object into kernel virtual address space.
684 *
685 * The ability to map subsections of the object into kernel space is currently
686 * not implemented on all platforms. All/Most of platforms supports mapping the
687 * whole object into kernel space.
688 *
689 * @returns IPRT status code.
690 * @retval VERR_NOT_SUPPORTED if it's not possible to map a subsection of a
691 * memory object on this platform. When you hit this, try implement it.
692 *
693 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
694 * @param MemObjToMap The object to be map.
695 * @param pvFixed Requested address. (void *)-1 means any address. This must match the alignment.
696 * @param uAlignment The alignment of the reserved memory.
697 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
698 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
699 * @param offSub Where in the object to start mapping. If non-zero
700 * the value must be page aligned and cbSub must be
701 * non-zero as well.
702 * @param cbSub The size of the part of the object to be mapped. If
703 * zero the entire object is mapped. The value must be
704 * page aligned.
705 */
706RTR0DECL(int) RTR0MemObjMapKernelEx(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
707 unsigned fProt, size_t offSub, size_t cbSub)
708{
709 PRTR0MEMOBJINTERNAL pMemToMap;
710 PRTR0MEMOBJINTERNAL pNew;
711 int rc;
712
713 /* sanity checks. */
714 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
715 *pMemObj = NIL_RTR0MEMOBJ;
716 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
717 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
718 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
719 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
720 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
721 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
722 if (uAlignment == 0)
723 uAlignment = PAGE_SIZE;
724 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
725 if (pvFixed != (void *)-1)
726 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
727 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
728 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
729 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
730 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
731 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
732 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
733 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
734
735 /* adjust the request to simplify the native code. */
736 if (offSub == 0 && cbSub == pMemToMap->cb)
737 cbSub = 0;
738
739 /* do the mapping. */
740 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub);
741 if (RT_SUCCESS(rc))
742 {
743 /* link it. */
744 rc = rtR0MemObjLink(pMemToMap, pNew);
745 if (RT_SUCCESS(rc))
746 *pMemObj = pNew;
747 else
748 {
749 /* damn, out of memory. bail out. */
750 int rc2 = rtR0MemObjNativeFree(pNew);
751 AssertRC(rc2);
752 pNew->u32Magic++;
753 pNew->enmType = RTR0MEMOBJTYPE_END;
754 RTMemFree(pNew);
755 }
756 }
757
758 return rc;
759}
760
761
762/**
763 * Maps a memory object into user virtual address space in the current process.
764 *
765 * @returns IPRT status code.
766 * @param pMemObj Where to store the ring-0 memory object handle of the mapping object.
767 * @param MemObjToMap The object to be map.
768 * @param R3PtrFixed Requested address. (RTR3PTR)-1 means any address. This must match the alignment.
769 * @param uAlignment The alignment of the reserved memory.
770 * Supported values are 0 (alias for PAGE_SIZE), PAGE_SIZE, _2M and _4M.
771 * @param fProt Combination of RTMEM_PROT_* flags (except RTMEM_PROT_NONE).
772 * @param R0Process The process to map the memory into. NIL_R0PROCESS is an alias for the current one.
773 */
774RTR0DECL(int) RTR0MemObjMapUser(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
775{
776 /* sanity checks. */
777 PRTR0MEMOBJINTERNAL pMemToMap;
778 PRTR0MEMOBJINTERNAL pNew;
779 int rc;
780 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
781 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
782 *pMemObj = NIL_RTR0MEMOBJ;
783 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
784 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
785 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
786 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
787 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
788 if (uAlignment == 0)
789 uAlignment = PAGE_SIZE;
790 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
791 if (R3PtrFixed != (RTR3PTR)-1)
792 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
793 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
794 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
795 if (R0Process == NIL_RTR0PROCESS)
796 R0Process = RTR0ProcHandleSelf();
797
798 /* do the mapping. */
799 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process);
800 if (RT_SUCCESS(rc))
801 {
802 /* link it. */
803 rc = rtR0MemObjLink(pMemToMap, pNew);
804 if (RT_SUCCESS(rc))
805 *pMemObj = pNew;
806 else
807 {
808 /* damn, out of memory. bail out. */
809 int rc2 = rtR0MemObjNativeFree(pNew);
810 AssertRC(rc2);
811 pNew->u32Magic++;
812 pNew->enmType = RTR0MEMOBJTYPE_END;
813 RTMemFree(pNew);
814 }
815 }
816
817 return rc;
818}
819
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette