VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp@ 77120

Last change on this file since 77120 was 77120, checked in by vboxsync, 6 years ago

IPRT: Some license header cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 20.7 KB
Line 
1/* $Id: memobj-r0drv-os2.cpp 77120 2019-02-01 15:08:46Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, OS/2.
4 */
5
6/*
7 * Contributed by knut st. osmundsen.
8 *
9 * Copyright (C) 2007-2019 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * The contents of this file may alternatively be used under the terms
20 * of the Common Development and Distribution License Version 1.0
21 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
22 * VirtualBox OSE distribution, in which case the provisions of the
23 * CDDL are applicable instead of those of the GPL.
24 *
25 * You may elect to license modified versions of this file under the
26 * terms and conditions of either the GPL or the CDDL or both.
27 *
28 * --------------------------------------------------------------------
29 *
30 * This code is based on:
31 *
32 * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
33 *
34 * Permission is hereby granted, free of charge, to any person
35 * obtaining a copy of this software and associated documentation
36 * files (the "Software"), to deal in the Software without
37 * restriction, including without limitation the rights to use,
38 * copy, modify, merge, publish, distribute, sublicense, and/or sell
39 * copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following
41 * conditions:
42 *
43 * The above copyright notice and this permission notice shall be
44 * included in all copies or substantial portions of the Software.
45 *
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
47 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
48 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
49 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
50 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
51 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
52 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
53 * OTHER DEALINGS IN THE SOFTWARE.
54 */
55
56
57/*********************************************************************************************************************************
58* Header Files *
59*********************************************************************************************************************************/
60#include "the-os2-kernel.h"
61
62#include <iprt/memobj.h>
63#include <iprt/mem.h>
64#include <iprt/err.h>
65#include <iprt/assert.h>
66#include <iprt/log.h>
67#include <iprt/param.h>
68#include <iprt/process.h>
69#include "internal/memobj.h"
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75/**
76 * The OS/2 version of the memory object structure.
77 */
78typedef struct RTR0MEMOBJDARWIN
79{
80 /** The core structure. */
81 RTR0MEMOBJINTERNAL Core;
82 /** Lock for the ring-3 / ring-0 pinned objectes.
83 * This member might not be allocated for some object types. */
84 KernVMLock_t Lock;
85 /** Array of physical pages.
86 * This array can be 0 in length for some object types. */
87 KernPageList_t aPages[1];
88} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
89
90
91/*********************************************************************************************************************************
92* Internal Functions *
93*********************************************************************************************************************************/
94static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
95
96
97DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
98{
99 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
100 int rc;
101
102 switch (pMemOs2->Core.enmType)
103 {
104 case RTR0MEMOBJTYPE_PHYS_NC:
105 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
106 return VERR_INTERNAL_ERROR;
107
108 case RTR0MEMOBJTYPE_PHYS:
109 if (!pMemOs2->Core.pv)
110 break;
111
112 case RTR0MEMOBJTYPE_MAPPING:
113 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
114 break;
115
116 RT_FALL_THRU();
117 case RTR0MEMOBJTYPE_PAGE:
118 case RTR0MEMOBJTYPE_LOW:
119 case RTR0MEMOBJTYPE_CONT:
120 rc = KernVMFree(pMemOs2->Core.pv);
121 AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
122 break;
123
124 case RTR0MEMOBJTYPE_LOCK:
125 rc = KernVMUnlock(&pMemOs2->Lock);
126 AssertMsg(!rc, ("rc=%d\n", rc));
127 break;
128
129 case RTR0MEMOBJTYPE_RES_VIRT:
130 default:
131 AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
132 return VERR_INTERNAL_ERROR;
133 }
134
135 return VINF_SUCCESS;
136}
137
138
139DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
140{
141 NOREF(fExecutable);
142
143 /* create the object. */
144 const ULONG cPages = cb >> PAGE_SHIFT;
145 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
146 RTR0MEMOBJTYPE_PAGE, NULL, cb);
147 if (!pMemOs2)
148 return VERR_NO_MEMORY;
149
150 /* do the allocation. */
151 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
152 if (!rc)
153 {
154 ULONG cPagesRet = cPages;
155 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
156 if (!rc)
157 {
158 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
159 *ppMem = &pMemOs2->Core;
160 return VINF_SUCCESS;
161 }
162 KernVMFree(pMemOs2->Core.pv);
163 }
164 rtR0MemObjDelete(&pMemOs2->Core);
165 return RTErrConvertFromOS2(rc);
166}
167
168
169DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
170{
171 NOREF(fExecutable);
172
173 /* create the object. */
174 const ULONG cPages = cb >> PAGE_SHIFT;
175 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
176 RTR0MEMOBJTYPE_LOW, NULL, cb);
177 if (!pMemOs2)
178 return VERR_NO_MEMORY;
179
180 /* do the allocation. */
181 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
182 if (!rc)
183 {
184 ULONG cPagesRet = cPages;
185 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
186 if (!rc)
187 {
188 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
189 *ppMem = &pMemOs2->Core;
190 return VINF_SUCCESS;
191 }
192 KernVMFree(pMemOs2->Core.pv);
193 }
194 rtR0MemObjDelete(&pMemOs2->Core);
195 rc = RTErrConvertFromOS2(rc);
196 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
197}
198
199
200DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
201{
202 NOREF(fExecutable);
203
204 /* create the object. */
205 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
206 if (!pMemOs2)
207 return VERR_NO_MEMORY;
208
209 /* do the allocation. */
210 ULONG ulPhys = ~0UL;
211 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
212 if (!rc)
213 {
214 Assert(ulPhys != ~0UL);
215 pMemOs2->Core.u.Cont.Phys = ulPhys;
216 *ppMem = &pMemOs2->Core;
217 return VINF_SUCCESS;
218 }
219 rtR0MemObjDelete(&pMemOs2->Core);
220 return RTErrConvertFromOS2(rc);
221}
222
223
224DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
225{
226 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
227
228 /** @todo alignment */
229 if (uAlignment != PAGE_SIZE)
230 return VERR_NOT_SUPPORTED;
231
232 /* create the object. */
233 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
234 if (!pMemOs2)
235 return VERR_NO_MEMORY;
236
237 /* do the allocation. */
238 ULONG ulPhys = ~0UL;
239 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
240 if (!rc)
241 {
242 Assert(ulPhys != ~0UL);
243 pMemOs2->Core.u.Phys.fAllocated = true;
244 pMemOs2->Core.u.Phys.PhysBase = ulPhys;
245 *ppMem = &pMemOs2->Core;
246 return VINF_SUCCESS;
247 }
248 rtR0MemObjDelete(&pMemOs2->Core);
249 return RTErrConvertFromOS2(rc);
250}
251
252
253DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
254{
255 /** @todo rtR0MemObjNativeAllocPhys / darwin. */
256 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
257}
258
259
260DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
261{
262 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
263
264 /* create the object. */
265 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
266 if (!pMemOs2)
267 return VERR_NO_MEMORY;
268
269 /* there is no allocation here, right? it needs to be mapped somewhere first. */
270 pMemOs2->Core.u.Phys.fAllocated = false;
271 pMemOs2->Core.u.Phys.PhysBase = Phys;
272 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy;
273 *ppMem = &pMemOs2->Core;
274 return VINF_SUCCESS;
275}
276
277
278DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
279 RTR0PROCESS R0Process)
280{
281 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
282
283 /* create the object. */
284 const ULONG cPages = cb >> PAGE_SHIFT;
285 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
286 RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
287 if (!pMemOs2)
288 return VERR_NO_MEMORY;
289
290 /* lock it. */
291 ULONG cPagesRet = cPages;
292 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
293 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
294 if (!rc)
295 {
296 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
297 Assert(cb == pMemOs2->Core.cb);
298 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
299 pMemOs2->Core.u.Lock.R0Process = R0Process;
300 *ppMem = &pMemOs2->Core;
301 return VINF_SUCCESS;
302 }
303 rtR0MemObjDelete(&pMemOs2->Core);
304 return RTErrConvertFromOS2(rc);
305}
306
307
308DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
309{
310 /* create the object. */
311 const ULONG cPages = cb >> PAGE_SHIFT;
312 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
313 RTR0MEMOBJTYPE_LOCK, pv, cb);
314 if (!pMemOs2)
315 return VERR_NO_MEMORY;
316
317 /* lock it. */
318 ULONG cPagesRet = cPages;
319 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
320 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
321 if (!rc)
322 {
323 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
324 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
325 *ppMem = &pMemOs2->Core;
326 return VINF_SUCCESS;
327 }
328 rtR0MemObjDelete(&pMemOs2->Core);
329 return RTErrConvertFromOS2(rc);
330}
331
332
333DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
334{
335 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
336 return VERR_NOT_SUPPORTED;
337}
338
339
340DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
341 RTR0PROCESS R0Process)
342{
343 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
344 return VERR_NOT_SUPPORTED;
345}
346
347
348DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
349 unsigned fProt, size_t offSub, size_t cbSub)
350{
351 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
352 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
353
354 /*
355 * Check that the specified alignment is supported.
356 */
357 if (uAlignment > PAGE_SIZE)
358 return VERR_NOT_SUPPORTED;
359
360
361/** @todo finish the implementation. */
362
363 int rc;
364 void *pvR0 = NULL;
365 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
366 switch (pMemToMapOs2->Core.enmType)
367 {
368 /*
369 * These has kernel mappings.
370 */
371 case RTR0MEMOBJTYPE_PAGE:
372 case RTR0MEMOBJTYPE_LOW:
373 case RTR0MEMOBJTYPE_CONT:
374 pvR0 = pMemToMapOs2->Core.pv;
375 break;
376
377 case RTR0MEMOBJTYPE_PHYS:
378 pvR0 = pMemToMapOs2->Core.pv;
379 if (!pvR0)
380 {
381 /* no ring-0 mapping, so allocate a mapping in the process. */
382 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
383 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
384 ULONG ulPhys = (ULONG)pMemToMapOs2->Core.u.Phys.PhysBase;
385 AssertReturn(ulPhys == pMemToMapOs2->Core.u.Phys.PhysBase, VERR_OUT_OF_RANGE);
386 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
387 if (rc)
388 return RTErrConvertFromOS2(rc);
389 pMemToMapOs2->Core.pv = pvR0;
390 }
391 break;
392
393 case RTR0MEMOBJTYPE_PHYS_NC:
394 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
395 return VERR_INTERNAL_ERROR_3;
396
397 case RTR0MEMOBJTYPE_LOCK:
398 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
399 return VERR_NOT_SUPPORTED; /** @todo implement this... */
400 pvR0 = pMemToMapOs2->Core.pv;
401 break;
402
403 case RTR0MEMOBJTYPE_RES_VIRT:
404 case RTR0MEMOBJTYPE_MAPPING:
405 default:
406 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
407 return VERR_INTERNAL_ERROR;
408 }
409
410 /*
411 * Create a dummy mapping object for it.
412 *
413 * All mappings are read/write/execute in OS/2 and there isn't
414 * any cache options, so sharing is ok. And the main memory object
415 * isn't actually freed until all the mappings have been freed up
416 * (reference counting).
417 */
418 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
419 pvR0, pMemToMapOs2->Core.cb);
420 if (pMemOs2)
421 {
422 pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
423 *ppMem = &pMemOs2->Core;
424 return VINF_SUCCESS;
425 }
426 return VERR_NO_MEMORY;
427}
428
429
430DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
431{
432 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
433 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
434 if (uAlignment > PAGE_SIZE)
435 return VERR_NOT_SUPPORTED;
436
437 int rc;
438 void *pvR0;
439 void *pvR3 = NULL;
440 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
441 switch (pMemToMapOs2->Core.enmType)
442 {
443 /*
444 * These has kernel mappings.
445 */
446 case RTR0MEMOBJTYPE_PAGE:
447 case RTR0MEMOBJTYPE_LOW:
448 case RTR0MEMOBJTYPE_CONT:
449 pvR0 = pMemToMapOs2->Core.pv;
450 break;
451
452 case RTR0MEMOBJTYPE_PHYS:
453 pvR0 = pMemToMapOs2->Core.pv;
454#if 0/* this is wrong. */
455 if (!pvR0)
456 {
457 /* no ring-0 mapping, so allocate a mapping in the process. */
458 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
459 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
460 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
461 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
462 if (rc)
463 return RTErrConvertFromOS2(rc);
464 }
465 break;
466#endif
467 return VERR_NOT_SUPPORTED;
468
469 case RTR0MEMOBJTYPE_PHYS_NC:
470 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
471 return VERR_INTERNAL_ERROR_5;
472
473 case RTR0MEMOBJTYPE_LOCK:
474 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
475 return VERR_NOT_SUPPORTED; /** @todo implement this... */
476 pvR0 = pMemToMapOs2->Core.pv;
477 break;
478
479 case RTR0MEMOBJTYPE_RES_VIRT:
480 case RTR0MEMOBJTYPE_MAPPING:
481 default:
482 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
483 return VERR_INTERNAL_ERROR;
484 }
485
486 /*
487 * Map the ring-0 memory into the current process.
488 */
489 if (!pvR3)
490 {
491 Assert(pvR0);
492 ULONG flFlags = 0;
493 if (uAlignment == PAGE_SIZE)
494 flFlags |= VMDHGP_4MB;
495 if (fProt & RTMEM_PROT_WRITE)
496 flFlags |= VMDHGP_WRITE;
497 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
498 if (rc)
499 return RTErrConvertFromOS2(rc);
500 }
501 Assert(pvR3);
502
503 /*
504 * Create a mapping object for it.
505 */
506 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
507 pvR3, pMemToMapOs2->Core.cb);
508 if (pMemOs2)
509 {
510 Assert(pMemOs2->Core.pv == pvR3);
511 pMemOs2->Core.u.Mapping.R0Process = R0Process;
512 *ppMem = &pMemOs2->Core;
513 return VINF_SUCCESS;
514 }
515 KernVMFree(pvR3);
516 return VERR_NO_MEMORY;
517}
518
519
520DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
521{
522 NOREF(pMem);
523 NOREF(offSub);
524 NOREF(cbSub);
525 NOREF(fProt);
526 return VERR_NOT_SUPPORTED;
527}
528
529
530DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
531{
532 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
533
534 switch (pMemOs2->Core.enmType)
535 {
536 case RTR0MEMOBJTYPE_PAGE:
537 case RTR0MEMOBJTYPE_LOW:
538 case RTR0MEMOBJTYPE_LOCK:
539 case RTR0MEMOBJTYPE_PHYS_NC:
540 return pMemOs2->aPages[iPage].Addr;
541
542 case RTR0MEMOBJTYPE_CONT:
543 return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
544
545 case RTR0MEMOBJTYPE_PHYS:
546 return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
547
548 case RTR0MEMOBJTYPE_RES_VIRT:
549 case RTR0MEMOBJTYPE_MAPPING:
550 default:
551 return NIL_RTHCPHYS;
552 }
553}
554
555
556/**
557 * Expands the page list so we can index pages directly.
558 *
559 * @param paPages The page list array to fix.
560 * @param cPages The number of pages that's supposed to go into the list.
561 * @param cPagesRet The actual number of pages in the list.
562 */
563static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
564{
565 Assert(cPages >= cPagesRet);
566 if (cPages != cPagesRet)
567 {
568 ULONG iIn = cPagesRet;
569 ULONG iOut = cPages;
570 do
571 {
572 iIn--;
573 iOut--;
574 Assert(iIn <= iOut);
575
576 KernPageList_t Page = paPages[iIn];
577 Assert(!(Page.Addr & PAGE_OFFSET_MASK));
578 Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
579
580 if (Page.Size > PAGE_SIZE)
581 {
582 do
583 {
584 Page.Size -= PAGE_SIZE;
585 paPages[iOut].Addr = Page.Addr + Page.Size;
586 paPages[iOut].Size = PAGE_SIZE;
587 iOut--;
588 } while (Page.Size > PAGE_SIZE);
589 }
590
591 paPages[iOut].Addr = Page.Addr;
592 paPages[iOut].Size = PAGE_SIZE;
593 } while ( iIn != iOut
594 && iIn > 0);
595 }
596}
597
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette