VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp@ 75133

Last change on this file since 75133 was 75133, checked in by vboxsync, 6 years ago

IPRT: Shut up some warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.6 KB
Line 
1/* $Id: memobj-r0drv-os2.cpp 75133 2018-10-28 17:49:49Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, OS/2.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "the-os2-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*********************************************************************************************************************************
48* Structures and Typedefs *
49*********************************************************************************************************************************/
50/**
51 * The OS/2 version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJDARWIN
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** Lock for the ring-3 / ring-0 pinned objectes.
58 * This member might not be allocated for some object types. */
59 KernVMLock_t Lock;
60 /** Array of physical pages.
61 * This array can be 0 in length for some object types. */
62 KernPageList_t aPages[1];
63} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
64
65
66/*********************************************************************************************************************************
67* Internal Functions *
68*********************************************************************************************************************************/
69static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
70
71
72DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
75 int rc;
76
77 switch (pMemOs2->Core.enmType)
78 {
79 case RTR0MEMOBJTYPE_PHYS_NC:
80 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
81 return VERR_INTERNAL_ERROR;
82
83 case RTR0MEMOBJTYPE_PHYS:
84 if (!pMemOs2->Core.pv)
85 break;
86
87 case RTR0MEMOBJTYPE_MAPPING:
88 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
89 break;
90
91 RT_FALL_THRU();
92 case RTR0MEMOBJTYPE_PAGE:
93 case RTR0MEMOBJTYPE_LOW:
94 case RTR0MEMOBJTYPE_CONT:
95 rc = KernVMFree(pMemOs2->Core.pv);
96 AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
97 break;
98
99 case RTR0MEMOBJTYPE_LOCK:
100 rc = KernVMUnlock(&pMemOs2->Lock);
101 AssertMsg(!rc, ("rc=%d\n", rc));
102 break;
103
104 case RTR0MEMOBJTYPE_RES_VIRT:
105 default:
106 AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
107 return VERR_INTERNAL_ERROR;
108 }
109
110 return VINF_SUCCESS;
111}
112
113
114DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
115{
116 NOREF(fExecutable);
117
118 /* create the object. */
119 const ULONG cPages = cb >> PAGE_SHIFT;
120 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
121 RTR0MEMOBJTYPE_PAGE, NULL, cb);
122 if (!pMemOs2)
123 return VERR_NO_MEMORY;
124
125 /* do the allocation. */
126 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
127 if (!rc)
128 {
129 ULONG cPagesRet = cPages;
130 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
131 if (!rc)
132 {
133 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
134 *ppMem = &pMemOs2->Core;
135 return VINF_SUCCESS;
136 }
137 KernVMFree(pMemOs2->Core.pv);
138 }
139 rtR0MemObjDelete(&pMemOs2->Core);
140 return RTErrConvertFromOS2(rc);
141}
142
143
144DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
145{
146 NOREF(fExecutable);
147
148 /* create the object. */
149 const ULONG cPages = cb >> PAGE_SHIFT;
150 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
151 RTR0MEMOBJTYPE_LOW, NULL, cb);
152 if (!pMemOs2)
153 return VERR_NO_MEMORY;
154
155 /* do the allocation. */
156 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
157 if (!rc)
158 {
159 ULONG cPagesRet = cPages;
160 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
161 if (!rc)
162 {
163 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
164 *ppMem = &pMemOs2->Core;
165 return VINF_SUCCESS;
166 }
167 KernVMFree(pMemOs2->Core.pv);
168 }
169 rtR0MemObjDelete(&pMemOs2->Core);
170 rc = RTErrConvertFromOS2(rc);
171 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
172}
173
174
175DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
176{
177 NOREF(fExecutable);
178
179 /* create the object. */
180 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
181 if (!pMemOs2)
182 return VERR_NO_MEMORY;
183
184 /* do the allocation. */
185 ULONG ulPhys = ~0UL;
186 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
187 if (!rc)
188 {
189 Assert(ulPhys != ~0UL);
190 pMemOs2->Core.u.Cont.Phys = ulPhys;
191 *ppMem = &pMemOs2->Core;
192 return VINF_SUCCESS;
193 }
194 rtR0MemObjDelete(&pMemOs2->Core);
195 return RTErrConvertFromOS2(rc);
196}
197
198
199DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
200{
201 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
202
203 /** @todo alignment */
204 if (uAlignment != PAGE_SIZE)
205 return VERR_NOT_SUPPORTED;
206
207 /* create the object. */
208 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
209 if (!pMemOs2)
210 return VERR_NO_MEMORY;
211
212 /* do the allocation. */
213 ULONG ulPhys = ~0UL;
214 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
215 if (!rc)
216 {
217 Assert(ulPhys != ~0UL);
218 pMemOs2->Core.u.Phys.fAllocated = true;
219 pMemOs2->Core.u.Phys.PhysBase = ulPhys;
220 *ppMem = &pMemOs2->Core;
221 return VINF_SUCCESS;
222 }
223 rtR0MemObjDelete(&pMemOs2->Core);
224 return RTErrConvertFromOS2(rc);
225}
226
227
228DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
229{
230 /** @todo rtR0MemObjNativeAllocPhys / darwin. */
231 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
232}
233
234
235DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
236{
237 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
238
239 /* create the object. */
240 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
241 if (!pMemOs2)
242 return VERR_NO_MEMORY;
243
244 /* there is no allocation here, right? it needs to be mapped somewhere first. */
245 pMemOs2->Core.u.Phys.fAllocated = false;
246 pMemOs2->Core.u.Phys.PhysBase = Phys;
247 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy;
248 *ppMem = &pMemOs2->Core;
249 return VINF_SUCCESS;
250}
251
252
253DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
254 RTR0PROCESS R0Process)
255{
256 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
257
258 /* create the object. */
259 const ULONG cPages = cb >> PAGE_SHIFT;
260 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
261 RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
262 if (!pMemOs2)
263 return VERR_NO_MEMORY;
264
265 /* lock it. */
266 ULONG cPagesRet = cPages;
267 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
268 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
269 if (!rc)
270 {
271 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
272 Assert(cb == pMemOs2->Core.cb);
273 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
274 pMemOs2->Core.u.Lock.R0Process = R0Process;
275 *ppMem = &pMemOs2->Core;
276 return VINF_SUCCESS;
277 }
278 rtR0MemObjDelete(&pMemOs2->Core);
279 return RTErrConvertFromOS2(rc);
280}
281
282
283DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
284{
285 /* create the object. */
286 const ULONG cPages = cb >> PAGE_SHIFT;
287 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
288 RTR0MEMOBJTYPE_LOCK, pv, cb);
289 if (!pMemOs2)
290 return VERR_NO_MEMORY;
291
292 /* lock it. */
293 ULONG cPagesRet = cPages;
294 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
295 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
296 if (!rc)
297 {
298 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
299 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
300 *ppMem = &pMemOs2->Core;
301 return VINF_SUCCESS;
302 }
303 rtR0MemObjDelete(&pMemOs2->Core);
304 return RTErrConvertFromOS2(rc);
305}
306
307
308DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
309{
310 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
311 return VERR_NOT_SUPPORTED;
312}
313
314
315DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
316 RTR0PROCESS R0Process)
317{
318 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
319 return VERR_NOT_SUPPORTED;
320}
321
322
323DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
324 unsigned fProt, size_t offSub, size_t cbSub)
325{
326 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
327 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
328
329 /*
330 * Check that the specified alignment is supported.
331 */
332 if (uAlignment > PAGE_SIZE)
333 return VERR_NOT_SUPPORTED;
334
335
336/** @todo finish the implementation. */
337
338 int rc;
339 void *pvR0 = NULL;
340 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
341 switch (pMemToMapOs2->Core.enmType)
342 {
343 /*
344 * These has kernel mappings.
345 */
346 case RTR0MEMOBJTYPE_PAGE:
347 case RTR0MEMOBJTYPE_LOW:
348 case RTR0MEMOBJTYPE_CONT:
349 pvR0 = pMemToMapOs2->Core.pv;
350 break;
351
352 case RTR0MEMOBJTYPE_PHYS:
353 pvR0 = pMemToMapOs2->Core.pv;
354 if (!pvR0)
355 {
356 /* no ring-0 mapping, so allocate a mapping in the process. */
357 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
358 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
359 ULONG ulPhys = (ULONG)pMemToMapOs2->Core.u.Phys.PhysBase;
360 AssertReturn(ulPhys == pMemToMapOs2->Core.u.Phys.PhysBase, VERR_OUT_OF_RANGE);
361 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
362 if (rc)
363 return RTErrConvertFromOS2(rc);
364 pMemToMapOs2->Core.pv = pvR0;
365 }
366 break;
367
368 case RTR0MEMOBJTYPE_PHYS_NC:
369 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
370 return VERR_INTERNAL_ERROR_3;
371
372 case RTR0MEMOBJTYPE_LOCK:
373 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
374 return VERR_NOT_SUPPORTED; /** @todo implement this... */
375 pvR0 = pMemToMapOs2->Core.pv;
376 break;
377
378 case RTR0MEMOBJTYPE_RES_VIRT:
379 case RTR0MEMOBJTYPE_MAPPING:
380 default:
381 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
382 return VERR_INTERNAL_ERROR;
383 }
384
385 /*
386 * Create a dummy mapping object for it.
387 *
388 * All mappings are read/write/execute in OS/2 and there isn't
389 * any cache options, so sharing is ok. And the main memory object
390 * isn't actually freed until all the mappings have been freed up
391 * (reference counting).
392 */
393 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
394 pvR0, pMemToMapOs2->Core.cb);
395 if (pMemOs2)
396 {
397 pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
398 *ppMem = &pMemOs2->Core;
399 return VINF_SUCCESS;
400 }
401 return VERR_NO_MEMORY;
402}
403
404
405DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
406{
407 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
408 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
409 if (uAlignment > PAGE_SIZE)
410 return VERR_NOT_SUPPORTED;
411
412 int rc;
413 void *pvR0;
414 void *pvR3 = NULL;
415 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
416 switch (pMemToMapOs2->Core.enmType)
417 {
418 /*
419 * These has kernel mappings.
420 */
421 case RTR0MEMOBJTYPE_PAGE:
422 case RTR0MEMOBJTYPE_LOW:
423 case RTR0MEMOBJTYPE_CONT:
424 pvR0 = pMemToMapOs2->Core.pv;
425 break;
426
427 case RTR0MEMOBJTYPE_PHYS:
428 pvR0 = pMemToMapOs2->Core.pv;
429#if 0/* this is wrong. */
430 if (!pvR0)
431 {
432 /* no ring-0 mapping, so allocate a mapping in the process. */
433 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
434 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
435 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
436 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
437 if (rc)
438 return RTErrConvertFromOS2(rc);
439 }
440 break;
441#endif
442 return VERR_NOT_SUPPORTED;
443
444 case RTR0MEMOBJTYPE_PHYS_NC:
445 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
446 return VERR_INTERNAL_ERROR_5;
447
448 case RTR0MEMOBJTYPE_LOCK:
449 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
450 return VERR_NOT_SUPPORTED; /** @todo implement this... */
451 pvR0 = pMemToMapOs2->Core.pv;
452 break;
453
454 case RTR0MEMOBJTYPE_RES_VIRT:
455 case RTR0MEMOBJTYPE_MAPPING:
456 default:
457 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
458 return VERR_INTERNAL_ERROR;
459 }
460
461 /*
462 * Map the ring-0 memory into the current process.
463 */
464 if (!pvR3)
465 {
466 Assert(pvR0);
467 ULONG flFlags = 0;
468 if (uAlignment == PAGE_SIZE)
469 flFlags |= VMDHGP_4MB;
470 if (fProt & RTMEM_PROT_WRITE)
471 flFlags |= VMDHGP_WRITE;
472 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
473 if (rc)
474 return RTErrConvertFromOS2(rc);
475 }
476 Assert(pvR3);
477
478 /*
479 * Create a mapping object for it.
480 */
481 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
482 pvR3, pMemToMapOs2->Core.cb);
483 if (pMemOs2)
484 {
485 Assert(pMemOs2->Core.pv == pvR3);
486 pMemOs2->Core.u.Mapping.R0Process = R0Process;
487 *ppMem = &pMemOs2->Core;
488 return VINF_SUCCESS;
489 }
490 KernVMFree(pvR3);
491 return VERR_NO_MEMORY;
492}
493
494
495DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
496{
497 NOREF(pMem);
498 NOREF(offSub);
499 NOREF(cbSub);
500 NOREF(fProt);
501 return VERR_NOT_SUPPORTED;
502}
503
504
505DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
506{
507 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
508
509 switch (pMemOs2->Core.enmType)
510 {
511 case RTR0MEMOBJTYPE_PAGE:
512 case RTR0MEMOBJTYPE_LOW:
513 case RTR0MEMOBJTYPE_LOCK:
514 case RTR0MEMOBJTYPE_PHYS_NC:
515 return pMemOs2->aPages[iPage].Addr;
516
517 case RTR0MEMOBJTYPE_CONT:
518 return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
519
520 case RTR0MEMOBJTYPE_PHYS:
521 return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
522
523 case RTR0MEMOBJTYPE_RES_VIRT:
524 case RTR0MEMOBJTYPE_MAPPING:
525 default:
526 return NIL_RTHCPHYS;
527 }
528}
529
530
531/**
532 * Expands the page list so we can index pages directly.
533 *
534 * @param paPages The page list array to fix.
535 * @param cPages The number of pages that's supposed to go into the list.
536 * @param cPagesRet The actual number of pages in the list.
537 */
538static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
539{
540 Assert(cPages >= cPagesRet);
541 if (cPages != cPagesRet)
542 {
543 ULONG iIn = cPagesRet;
544 ULONG iOut = cPages;
545 do
546 {
547 iIn--;
548 iOut--;
549 Assert(iIn <= iOut);
550
551 KernPageList_t Page = paPages[iIn];
552 Assert(!(Page.Addr & PAGE_OFFSET_MASK));
553 Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
554
555 if (Page.Size > PAGE_SIZE)
556 {
557 do
558 {
559 Page.Size -= PAGE_SIZE;
560 paPages[iOut].Addr = Page.Addr + Page.Size;
561 paPages[iOut].Size = PAGE_SIZE;
562 iOut--;
563 } while (Page.Size > PAGE_SIZE);
564 }
565
566 paPages[iOut].Addr = Page.Addr;
567 paPages[iOut].Size = PAGE_SIZE;
568 } while ( iIn != iOut
569 && iIn > 0);
570 }
571}
572
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette