VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 95248

Last change on this file since 95248 was 94800, checked in by vboxsync, 3 years ago

VMM/IEM,PGM: TLB work, esp. on the data one. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 53.5 KB
Line 
1/* $Id: PGMR0.cpp 94800 2022-05-03 21:49:43Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/rawpci.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/gmm.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/pdmdev.h>
30#include <VBox/vmm/vmcc.h>
31#include <VBox/vmm/gvm.h>
32#include "PGMInline.h"
33#include <VBox/log.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/mem.h>
37#include <iprt/memobj.h>
38#include <iprt/process.h>
39#include <iprt/rand.h>
40#include <iprt/string.h>
41#include <iprt/time.h>
42
43
44/*
45 * Instantiate the ring-0 header/code templates.
46 */
47/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
48#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
49#include "PGMR0Bth.h"
50#undef PGM_BTH_NAME
51
52#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
53#include "PGMR0Bth.h"
54#undef PGM_BTH_NAME
55
56#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
57#include "PGMR0Bth.h"
58#undef PGM_BTH_NAME
59
60#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
61#include "PGMR0Bth.h"
62#undef PGM_BTH_NAME
63
64
65/**
66 * Initializes the per-VM data for the PGM.
67 *
68 * This is called from under the GVMM lock, so it should only initialize the
69 * data so PGMR0CleanupVM and others will work smoothly.
70 *
71 * @returns VBox status code.
72 * @param pGVM Pointer to the global VM structure.
73 * @param hMemObj Handle to the memory object backing pGVM.
74 */
75VMMR0_INT_DECL(int) PGMR0InitPerVMData(PGVM pGVM, RTR0MEMOBJ hMemObj)
76{
77 AssertCompile(sizeof(pGVM->pgm.s) <= sizeof(pGVM->pgm.padding));
78 AssertCompile(sizeof(pGVM->pgmr0.s) <= sizeof(pGVM->pgmr0.padding));
79
80 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs));
81 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++)
82 {
83 pGVM->pgmr0.s.ahPoolMemObjs[i] = NIL_RTR0MEMOBJ;
84 pGVM->pgmr0.s.ahPoolMapObjs[i] = NIL_RTR0MEMOBJ;
85 }
86 pGVM->pgmr0.s.hPhysHandlerMemObj = NIL_RTR0MEMOBJ;
87 pGVM->pgmr0.s.hPhysHandlerMapObj = NIL_RTR0MEMOBJ;
88
89 /*
90 * Initialize the handler type table with return to ring-3 callbacks so we
91 * don't have to do anything special for ring-3 only registrations.
92 *
93 * Note! The random bits of the hType value is mainly for prevent trouble
94 * with zero initialized handles w/o needing to sacrifice handle zero.
95 */
96 for (size_t i = 0; i < RT_ELEMENTS(pGVM->pgm.s.aPhysHandlerTypes); i++)
97 {
98 pGVM->pgmr0.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
99 pGVM->pgmr0.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
100 pGVM->pgmr0.s.aPhysHandlerTypes[i].pfnHandler = pgmR0HandlerPhysicalHandlerToRing3;
101 pGVM->pgmr0.s.aPhysHandlerTypes[i].pfnPfHandler = pgmR0HandlerPhysicalPfHandlerToRing3;
102
103 pGVM->pgm.s.aPhysHandlerTypes[i].hType = pGVM->pgmr0.s.aPhysHandlerTypes[i].hType;
104 pGVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
105 }
106
107 /*
108 * Get the physical address of the ZERO and MMIO-dummy pages.
109 */
110 AssertReturn(((uintptr_t)&pGVM->pgm.s.abZeroPg[0] & HOST_PAGE_OFFSET_MASK) == 0, VERR_INTERNAL_ERROR_2);
111 pGVM->pgm.s.HCPhysZeroPg = RTR0MemObjGetPagePhysAddr(hMemObj, RT_UOFFSETOF_DYN(GVM, pgm.s.abZeroPg) >> HOST_PAGE_SHIFT);
112 AssertReturn(pGVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS, VERR_INTERNAL_ERROR_3);
113
114 AssertReturn(((uintptr_t)&pGVM->pgm.s.abMmioPg[0] & HOST_PAGE_OFFSET_MASK) == 0, VERR_INTERNAL_ERROR_2);
115 pGVM->pgm.s.HCPhysMmioPg = RTR0MemObjGetPagePhysAddr(hMemObj, RT_UOFFSETOF_DYN(GVM, pgm.s.abMmioPg) >> HOST_PAGE_SHIFT);
116 AssertReturn(pGVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS, VERR_INTERNAL_ERROR_3);
117
118 pGVM->pgm.s.HCPhysInvMmioPg = pGVM->pgm.s.HCPhysMmioPg;
119
120 return RTCritSectInit(&pGVM->pgmr0.s.PoolGrowCritSect);
121}
122
123
124/**
125 * Initalize the per-VM PGM for ring-0.
126 *
127 * @returns VBox status code.
128 * @param pGVM Pointer to the global VM structure.
129 */
130VMMR0_INT_DECL(int) PGMR0InitVM(PGVM pGVM)
131{
132 /*
133 * Set up the ring-0 context for our access handlers.
134 */
135 int rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/,
136 pgmPhysRomWriteHandler, pgmPhysRomWritePfHandler,
137 "ROM write protection", pGVM->pgm.s.hRomPhysHandlerType);
138 AssertLogRelRCReturn(rc, rc);
139
140 /*
141 * Register the physical access handler doing dirty MMIO2 tracing.
142 */
143 rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
144 pgmPhysMmio2WriteHandler, pgmPhysMmio2WritePfHandler,
145 "MMIO2 dirty page tracing", pGVM->pgm.s.hMmio2DirtyPhysHandlerType);
146 AssertLogRelRCReturn(rc, rc);
147
148 /*
149 * The page pool.
150 */
151 return pgmR0PoolInitVM(pGVM);
152}
153
154
155/**
156 * Called at the end of the ring-0 initialization to seal access handler types.
157 *
158 * @returns VBox status code.
159 * @param pGVM Pointer to the global VM structure.
160 */
161VMMR0_INT_DECL(void) PGMR0DoneInitVM(PGVM pGVM)
162{
163 /*
164 * Seal all the access handler types. Does both ring-3 and ring-0.
165 *
166 * Note! Since this is a void function and we don't have any ring-0 state
167 * machinery for marking the VM as bogus, this code will just
168 * override corrupted values as best as it can.
169 */
170 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes) == RT_ELEMENTS(pGVM->pgm.s.aPhysHandlerTypes));
171 for (size_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes); i++)
172 {
173 PPGMPHYSHANDLERTYPEINTR0 const pTypeR0 = &pGVM->pgmr0.s.aPhysHandlerTypes[i];
174 PPGMPHYSHANDLERTYPEINTR3 const pTypeR3 = &pGVM->pgm.s.aPhysHandlerTypes[i];
175 PGMPHYSHANDLERKIND const enmKindR3 = pTypeR3->enmKind;
176 PGMPHYSHANDLERKIND const enmKindR0 = pTypeR0->enmKind;
177 AssertLogRelMsgStmt(pTypeR0->hType == pTypeR3->hType,
178 ("i=%u %#RX64 vs %#RX64 %s\n", i, pTypeR0->hType, pTypeR3->hType, pTypeR0->pszDesc),
179 pTypeR3->hType = pTypeR0->hType);
180 switch (enmKindR3)
181 {
182 case PGMPHYSHANDLERKIND_ALL:
183 case PGMPHYSHANDLERKIND_MMIO:
184 if ( enmKindR0 == enmKindR3
185 || enmKindR0 == PGMPHYSHANDLERKIND_INVALID)
186 {
187 pTypeR3->fRing0Enabled = enmKindR0 == enmKindR3;
188 pTypeR0->uState = PGM_PAGE_HNDL_PHYS_STATE_ALL;
189 pTypeR3->uState = PGM_PAGE_HNDL_PHYS_STATE_ALL;
190 continue;
191 }
192 break;
193
194 case PGMPHYSHANDLERKIND_WRITE:
195 if ( enmKindR0 == enmKindR3
196 || enmKindR0 == PGMPHYSHANDLERKIND_INVALID)
197 {
198 pTypeR3->fRing0Enabled = enmKindR0 == enmKindR3;
199 pTypeR0->uState = PGM_PAGE_HNDL_PHYS_STATE_WRITE;
200 pTypeR3->uState = PGM_PAGE_HNDL_PHYS_STATE_WRITE;
201 continue;
202 }
203 break;
204
205 default:
206 AssertLogRelMsgFailed(("i=%u enmKindR3=%d\n", i, enmKindR3));
207 RT_FALL_THROUGH();
208 case PGMPHYSHANDLERKIND_INVALID:
209 AssertLogRelMsg(enmKindR0 == PGMPHYSHANDLERKIND_INVALID,
210 ("i=%u enmKind=%d %s\n", i, enmKindR0, pTypeR0->pszDesc));
211 AssertLogRelMsg(pTypeR0->pfnHandler == pgmR0HandlerPhysicalHandlerToRing3,
212 ("i=%u pfnHandler=%p %s\n", i, pTypeR0->pfnHandler, pTypeR0->pszDesc));
213 AssertLogRelMsg(pTypeR0->pfnPfHandler == pgmR0HandlerPhysicalPfHandlerToRing3,
214 ("i=%u pfnPfHandler=%p %s\n", i, pTypeR0->pfnPfHandler, pTypeR0->pszDesc));
215
216 /* Unused of bad ring-3 entry, make it and the ring-0 one harmless. */
217 pTypeR3->enmKind = PGMPHYSHANDLERKIND_END;
218 pTypeR3->fRing0DevInsIdx = false;
219 pTypeR3->fKeepPgmLock = false;
220 pTypeR3->uState = 0;
221 break;
222 }
223 pTypeR3->fRing0Enabled = false;
224
225 /* Make sure the entry is harmless and goes to ring-3. */
226 pTypeR0->enmKind = PGMPHYSHANDLERKIND_END;
227 pTypeR0->pfnHandler = pgmR0HandlerPhysicalHandlerToRing3;
228 pTypeR0->pfnPfHandler = pgmR0HandlerPhysicalPfHandlerToRing3;
229 pTypeR0->fRing0DevInsIdx = false;
230 pTypeR0->fKeepPgmLock = false;
231 pTypeR0->uState = 0;
232 pTypeR0->pszDesc = "invalid";
233 }
234}
235
236
237/**
238 * Cleans up any loose ends before the GVM structure is destroyed.
239 */
240VMMR0_INT_DECL(void) PGMR0CleanupVM(PGVM pGVM)
241{
242 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++)
243 {
244 if (pGVM->pgmr0.s.ahPoolMapObjs[i] != NIL_RTR0MEMOBJ)
245 {
246 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahPoolMapObjs[i], true /*fFreeMappings*/);
247 AssertRC(rc);
248 pGVM->pgmr0.s.ahPoolMapObjs[i] = NIL_RTR0MEMOBJ;
249 }
250
251 if (pGVM->pgmr0.s.ahPoolMemObjs[i] != NIL_RTR0MEMOBJ)
252 {
253 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahPoolMemObjs[i], true /*fFreeMappings*/);
254 AssertRC(rc);
255 pGVM->pgmr0.s.ahPoolMemObjs[i] = NIL_RTR0MEMOBJ;
256 }
257 }
258
259 if (pGVM->pgmr0.s.hPhysHandlerMapObj != NIL_RTR0MEMOBJ)
260 {
261 int rc = RTR0MemObjFree(pGVM->pgmr0.s.hPhysHandlerMapObj, true /*fFreeMappings*/);
262 AssertRC(rc);
263 pGVM->pgmr0.s.hPhysHandlerMapObj = NIL_RTR0MEMOBJ;
264 }
265
266 if (pGVM->pgmr0.s.hPhysHandlerMemObj != NIL_RTR0MEMOBJ)
267 {
268 int rc = RTR0MemObjFree(pGVM->pgmr0.s.hPhysHandlerMemObj, true /*fFreeMappings*/);
269 AssertRC(rc);
270 pGVM->pgmr0.s.hPhysHandlerMemObj = NIL_RTR0MEMOBJ;
271 }
272
273 if (RTCritSectIsInitialized(&pGVM->pgmr0.s.PoolGrowCritSect))
274 RTCritSectDelete(&pGVM->pgmr0.s.PoolGrowCritSect);
275}
276
277
278/**
279 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
280 *
281 * @returns The following VBox status codes.
282 * @retval VINF_SUCCESS on success. FF cleared.
283 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
284 *
285 * @param pGVM The global (ring-0) VM structure.
286 * @param idCpu The ID of the calling EMT.
287 * @param fRing3 Set if the caller is ring-3. Determins whether to
288 * return VINF_EM_NO_MEMORY or not.
289 *
290 * @thread EMT(idCpu)
291 *
292 * @remarks Must be called from within the PGM critical section. The caller
293 * must clear the new pages.
294 */
295int pgmR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu, bool fRing3)
296{
297 /*
298 * Validate inputs.
299 */
300 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
301 Assert(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf());
302 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
303
304 /*
305 * Check for error injection.
306 */
307 if (RT_LIKELY(!pGVM->pgm.s.fErrInjHandyPages))
308 { /* likely */ }
309 else
310 return VERR_NO_MEMORY;
311
312 /*
313 * Try allocate a full set of handy pages.
314 */
315 uint32_t const iFirst = pGVM->pgm.s.cHandyPages;
316 AssertMsgReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), ("%#x\n", iFirst), VERR_PGM_HANDY_PAGE_IPE);
317
318 uint32_t const cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst;
319 if (!cPages)
320 return VINF_SUCCESS;
321
322 int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, cPages, &pGVM->pgm.s.aHandyPages[iFirst]);
323 if (RT_SUCCESS(rc))
324 {
325 uint32_t const cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages); /** @todo allow allocating less... */
326 pGVM->pgm.s.cHandyPages = cHandyPages;
327 VM_FF_CLEAR(pGVM, VM_FF_PGM_NEED_HANDY_PAGES);
328 VM_FF_CLEAR(pGVM, VM_FF_PGM_NO_MEMORY);
329
330#ifdef VBOX_STRICT
331 for (uint32_t i = 0; i < cHandyPages; i++)
332 {
333 Assert(pGVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
334 Assert(pGVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
335 Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
336 Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
337 Assert(!(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
338 }
339#endif
340
341 /*
342 * Clear the pages.
343 */
344 for (uint32_t iPage = iFirst; iPage < cHandyPages; iPage++)
345 {
346 PGMMPAGEDESC pPage = &pGVM->pgm.s.aHandyPages[iPage];
347 if (!pPage->fZeroed)
348 {
349 void *pv = NULL;
350#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
351 rc = SUPR0HCPhysToVirt(pPage->HCPhysGCPhys, &pv);
352#else
353 rc = GMMR0PageIdToVirt(pGVM, pPage->idPage, &pv);
354#endif
355 AssertMsgRCReturn(rc, ("idPage=%#x HCPhys=%RHp rc=%Rrc\n", pPage->idPage, pPage->HCPhysGCPhys, rc), rc);
356
357 RT_BZERO(pv, GUEST_PAGE_SIZE);
358 pPage->fZeroed = true;
359 }
360#ifdef VBOX_STRICT
361 else
362 {
363 void *pv = NULL;
364# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
365 rc = SUPR0HCPhysToVirt(pPage->HCPhysGCPhys, &pv);
366# else
367 rc = GMMR0PageIdToVirt(pGVM, pPage->idPage, &pv);
368# endif
369 AssertMsgRCReturn(rc, ("idPage=%#x HCPhys=%RHp rc=%Rrc\n", pPage->idPage, pPage->HCPhysGCPhys, rc), rc);
370 AssertReturn(ASMMemIsZero(pv, GUEST_PAGE_SIZE), VERR_PGM_HANDY_PAGE_IPE);
371 }
372#endif
373 Log3(("PGMR0PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
374 }
375 }
376 else
377 {
378 /*
379 * We should never get here unless there is a genuine shortage of
380 * memory (or some internal error). Flag the error so the VM can be
381 * suspended ASAP and the user informed. If we're totally out of
382 * handy pages we will return failure.
383 */
384 /* Report the failure. */
385 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc cHandyPages=%#x\n"
386 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
387 rc, pGVM->pgm.s.cHandyPages,
388 pGVM->pgm.s.cAllPages, pGVM->pgm.s.cPrivatePages, pGVM->pgm.s.cSharedPages, pGVM->pgm.s.cZeroPages));
389
390 GMMMEMSTATSREQ Stats = { { SUPVMMR0REQHDR_MAGIC, sizeof(Stats) }, 0, 0, 0, 0, 0 };
391 if (RT_SUCCESS(GMMR0QueryMemoryStatsReq(pGVM, idCpu, &Stats)))
392 LogRel(("GMM: Statistics:\n"
393 " Allocated pages: %RX64\n"
394 " Free pages: %RX64\n"
395 " Shared pages: %RX64\n"
396 " Maximum pages: %RX64\n"
397 " Ballooned pages: %RX64\n",
398 Stats.cAllocPages, Stats.cFreePages, Stats.cSharedPages, Stats.cMaxPages, Stats.cBalloonedPages));
399
400 if ( rc != VERR_NO_MEMORY
401 && rc != VERR_NO_PHYS_MEMORY
402 && rc != VERR_LOCK_FAILED)
403 for (uint32_t iPage = 0; iPage < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); iPage++)
404 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
405 iPage, pGVM->pgm.s.aHandyPages[iPage].HCPhysGCPhys, pGVM->pgm.s.aHandyPages[iPage].idPage,
406 pGVM->pgm.s.aHandyPages[iPage].idSharedPage));
407
408 /* Set the FFs and adjust rc. */
409 VM_FF_SET(pGVM, VM_FF_PGM_NEED_HANDY_PAGES);
410 VM_FF_SET(pGVM, VM_FF_PGM_NO_MEMORY);
411 if (!fRing3)
412 if ( rc == VERR_NO_MEMORY
413 || rc == VERR_NO_PHYS_MEMORY
414 || rc == VERR_LOCK_FAILED
415 || rc == VERR_MAP_FAILED)
416 rc = VINF_EM_NO_MEMORY;
417 }
418
419 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
420 return rc;
421}
422
423
424/**
425 * Worker function for PGMR3PhysAllocateHandyPages / VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES.
426 *
427 * @returns The following VBox status codes.
428 * @retval VINF_SUCCESS on success. FF cleared.
429 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
430 *
431 * @param pGVM The global (ring-0) VM structure.
432 * @param idCpu The ID of the calling EMT.
433 *
434 * @thread EMT(idCpu)
435 *
436 * @remarks Must be called from within the PGM critical section. The caller
437 * must clear the new pages.
438 */
439VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu)
440{
441 /*
442 * Validate inputs.
443 */
444 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
445 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
446
447 /*
448 * Enter the PGM lock and call the worker.
449 */
450 int rc = PGM_LOCK(pGVM);
451 if (RT_SUCCESS(rc))
452 {
453 rc = pgmR0PhysAllocateHandyPages(pGVM, idCpu, true /*fRing3*/);
454 PGM_UNLOCK(pGVM);
455 }
456 return rc;
457}
458
459
460/**
461 * Flushes any changes pending in the handy page array.
462 *
463 * It is very important that this gets done when page sharing is enabled.
464 *
465 * @returns The following VBox status codes.
466 * @retval VINF_SUCCESS on success. FF cleared.
467 *
468 * @param pGVM The global (ring-0) VM structure.
469 * @param idCpu The ID of the calling EMT.
470 *
471 * @thread EMT(idCpu)
472 *
473 * @remarks Must be called from within the PGM critical section.
474 */
475VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu)
476{
477 /*
478 * Validate inputs.
479 */
480 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
481 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
482 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
483
484 /*
485 * Try allocate a full set of handy pages.
486 */
487 uint32_t iFirst = pGVM->pgm.s.cHandyPages;
488 AssertReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
489 uint32_t cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst;
490 if (!cPages)
491 return VINF_SUCCESS;
492 int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, 0, &pGVM->pgm.s.aHandyPages[iFirst]);
493
494 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
495 return rc;
496}
497
498
499/**
500 * Allocate a large page at @a GCPhys.
501 *
502 * @returns The following VBox status codes.
503 * @retval VINF_SUCCESS on success.
504 * @retval VINF_EM_NO_MEMORY if we're out of memory.
505 *
506 * @param pGVM The global (ring-0) VM structure.
507 * @param idCpu The ID of the calling EMT.
508 * @param GCPhys The guest physical address of the page.
509 *
510 * @thread EMT(idCpu)
511 *
512 * @remarks Must be called from within the PGM critical section. The caller
513 * must clear the new pages.
514 */
515int pgmR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys)
516{
517 STAM_PROFILE_START(&pGVM->pgm.s.Stats.StatLargePageAlloc2, a);
518 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
519
520 /*
521 * Allocate a large page.
522 */
523 RTHCPHYS HCPhys = NIL_GMMPAGEDESC_PHYS;
524 uint32_t idPage = NIL_GMM_PAGEID;
525
526 if (true) /** @todo pre-allocate 2-3 pages on the allocation thread. */
527 {
528 uint64_t const nsAllocStart = RTTimeNanoTS();
529 if (nsAllocStart < pGVM->pgm.s.nsLargePageRetry)
530 {
531 LogFlowFunc(("returns VERR_TRY_AGAIN - %RU64 ns left of hold off period\n", pGVM->pgm.s.nsLargePageRetry - nsAllocStart));
532 return VERR_TRY_AGAIN;
533 }
534
535 int const rc = GMMR0AllocateLargePage(pGVM, idCpu, _2M, &idPage, &HCPhys);
536
537 uint64_t const nsAllocEnd = RTTimeNanoTS();
538 uint64_t const cNsElapsed = nsAllocEnd - nsAllocStart;
539 STAM_REL_PROFILE_ADD_PERIOD(&pGVM->pgm.s.StatLargePageAlloc, cNsElapsed);
540 if (cNsElapsed < RT_NS_100MS)
541 pGVM->pgm.s.cLargePageLongAllocRepeats = 0;
542 else
543 {
544 /* If a large page allocation takes more than 100ms back off for a
545 while so the host OS can reshuffle memory and make some more large
546 pages available. However if it took over a second, just disable it. */
547 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageOverflow);
548 pGVM->pgm.s.cLargePageLongAllocRepeats++;
549 if (cNsElapsed > RT_NS_1SEC)
550 {
551 LogRel(("PGMR0PhysAllocateLargePage: Disabling large pages after %'RU64 ns allocation time.\n", cNsElapsed));
552 PGMSetLargePageUsage(pGVM, false);
553 }
554 else
555 {
556 Log(("PGMR0PhysAllocateLargePage: Suspending large page allocations for %u sec after %'RU64 ns allocation time.\n",
557 30 * pGVM->pgm.s.cLargePageLongAllocRepeats, cNsElapsed));
558 pGVM->pgm.s.nsLargePageRetry = nsAllocEnd + RT_NS_30SEC * pGVM->pgm.s.cLargePageLongAllocRepeats;
559 }
560 }
561
562 if (RT_FAILURE(rc))
563 {
564 Log(("PGMR0PhysAllocateLargePage: Failed: %Rrc\n", rc));
565 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageAllocFailed);
566 if (rc == VERR_NOT_SUPPORTED)
567 {
568 LogRel(("PGM: Disabling large pages because of VERR_NOT_SUPPORTED status.\n"));
569 PGMSetLargePageUsage(pGVM, false);
570 }
571 return rc;
572 }
573 }
574
575 STAM_PROFILE_STOP_START(&pGVM->pgm.s.Stats.StatLargePageAlloc2, &pGVM->pgm.s.Stats.StatLargePageSetup, a);
576
577 /*
578 * Enter the pages into PGM.
579 */
580 bool fFlushTLBs = false;
581 VBOXSTRICTRC rc = VINF_SUCCESS;
582 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
583 while (cLeft-- > 0)
584 {
585 PPGMPAGE const pPage = pgmPhysGetPage(pGVM, GCPhys);
586 AssertReturn(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM && PGM_PAGE_IS_ZERO(pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
587
588 /* Make sure there are no zero mappings. */
589 uint16_t const u16Tracking = PGM_PAGE_GET_TRACKING(pPage);
590 if (u16Tracking == 0)
591 Assert(PGM_PAGE_GET_PTE_INDEX(pPage) == 0);
592 else
593 {
594 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageZeroEvict);
595 VBOXSTRICTRC rc3 = pgmPoolTrackUpdateGCPhys(pGVM, GCPhys, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
596 Log(("PGMR0PhysAllocateLargePage: GCPhys=%RGp: tracking=%#x rc3=%Rrc\n", GCPhys, u16Tracking, VBOXSTRICTRC_VAL(rc3)));
597 if (rc3 != VINF_SUCCESS && rc == VINF_SUCCESS)
598 rc = rc3; /** @todo not perfect... */
599 PGM_PAGE_SET_PTE_INDEX(pGVM, pPage, 0);
600 PGM_PAGE_SET_TRACKING(pGVM, pPage, 0);
601 }
602
603 /* Setup the new page. */
604 PGM_PAGE_SET_HCPHYS(pGVM, pPage, HCPhys);
605 PGM_PAGE_SET_STATE(pGVM, pPage, PGM_PAGE_STATE_ALLOCATED);
606 PGM_PAGE_SET_PDE_TYPE(pGVM, pPage, PGM_PAGE_PDE_TYPE_PDE);
607 PGM_PAGE_SET_PAGEID(pGVM, pPage, idPage);
608 Log3(("PGMR0PhysAllocateLargePage: GCPhys=%RGp: idPage=%#x HCPhys=%RGp (old tracking=%#x)\n",
609 GCPhys, idPage, HCPhys, u16Tracking));
610
611 /* advance */
612 idPage++;
613 HCPhys += GUEST_PAGE_SIZE;
614 GCPhys += GUEST_PAGE_SIZE;
615 }
616
617 STAM_COUNTER_ADD(&pGVM->pgm.s.Stats.StatRZPageReplaceZero, _2M / GUEST_PAGE_SIZE);
618 pGVM->pgm.s.cZeroPages -= _2M / GUEST_PAGE_SIZE;
619 pGVM->pgm.s.cPrivatePages += _2M / GUEST_PAGE_SIZE;
620
621 /*
622 * Flush all TLBs.
623 */
624 if (!fFlushTLBs)
625 { /* likely as we shouldn't normally map zero pages */ }
626 else
627 {
628 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageTlbFlush);
629 PGM_INVL_ALL_VCPU_TLBS(pGVM);
630 }
631 /** @todo this is a little expensive (~3000 ticks) since we'll have to
632 * invalidate everything. Add a version to the TLB? */
633 pgmPhysInvalidatePageMapTLB(pGVM);
634 IEMTlbInvalidateAllPhysicalAllCpus(pGVM, idCpu);
635
636 STAM_PROFILE_STOP(&pGVM->pgm.s.Stats.StatLargePageSetup, a);
637#if 0 /** @todo returning info statuses here might not be a great idea... */
638 LogFlow(("PGMR0PhysAllocateLargePage: returns %Rrc\n", VBOXSTRICTRC_VAL(rc) ));
639 return VBOXSTRICTRC_TODO(rc);
640#else
641 LogFlow(("PGMR0PhysAllocateLargePage: returns VINF_SUCCESS (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rc) ));
642 return VINF_SUCCESS;
643#endif
644}
645
646
647/**
648 * Allocate a large page at @a GCPhys.
649 *
650 * @returns The following VBox status codes.
651 * @retval VINF_SUCCESS on success.
652 * @retval VINF_EM_NO_MEMORY if we're out of memory.
653 *
654 * @param pGVM The global (ring-0) VM structure.
655 * @param idCpu The ID of the calling EMT.
656 * @param GCPhys The guest physical address of the page.
657 *
658 * @thread EMT(idCpu)
659 *
660 * @remarks Must be called from within the PGM critical section. The caller
661 * must clear the new pages.
662 */
663VMMR0_INT_DECL(int) PGMR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys)
664{
665 /*
666 * Validate inputs.
667 */
668 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
669 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
670
671 int rc = PGM_LOCK(pGVM);
672 AssertRCReturn(rc, rc);
673
674 /* The caller might have done this already, but since we're ring-3 callable we
675 need to make sure everything is fine before starting the allocation here. */
676 for (unsigned i = 0; i < _2M / GUEST_PAGE_SIZE; i++)
677 {
678 PPGMPAGE pPage;
679 rc = pgmPhysGetPageEx(pGVM, GCPhys + i * GUEST_PAGE_SIZE, &pPage);
680 AssertRCReturnStmt(rc, PGM_UNLOCK(pGVM), rc);
681 AssertReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, PGM_UNLOCK(pGVM), VERR_PGM_PHYS_NOT_RAM);
682 AssertReturnStmt(PGM_PAGE_IS_ZERO(pPage), PGM_UNLOCK(pGVM), VERR_PGM_UNEXPECTED_PAGE_STATE);
683 }
684
685 /*
686 * Call common code.
687 */
688 rc = pgmR0PhysAllocateLargePage(pGVM, idCpu, GCPhys);
689
690 PGM_UNLOCK(pGVM);
691 return rc;
692}
693
694
695/**
696 * Locate a MMIO2 range.
697 *
698 * @returns Pointer to the MMIO2 range.
699 * @param pGVM The global (ring-0) VM structure.
700 * @param pDevIns The device instance owning the region.
701 * @param hMmio2 Handle to look up.
702 */
703DECLINLINE(PPGMREGMMIO2RANGE) pgmR0PhysMmio2Find(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
704{
705 /*
706 * We use the lookup table here as list walking is tedious in ring-0 when using
707 * ring-3 pointers and this probably will require some kind of refactoring anyway.
708 */
709 if (hMmio2 <= RT_ELEMENTS(pGVM->pgm.s.apMmio2RangesR0) && hMmio2 != 0)
710 {
711 PPGMREGMMIO2RANGE pCur = pGVM->pgm.s.apMmio2RangesR0[hMmio2 - 1];
712 if (pCur && pCur->pDevInsR3 == pDevIns->pDevInsForR3)
713 {
714 Assert(pCur->idMmio2 == hMmio2);
715 return pCur;
716 }
717 Assert(!pCur);
718 }
719 return NULL;
720}
721
722
723/**
724 * Worker for PDMDEVHLPR0::pfnMmio2SetUpContext.
725 *
726 * @returns VBox status code.
727 * @param pGVM The global (ring-0) VM structure.
728 * @param pDevIns The device instance.
729 * @param hMmio2 The MMIO2 region to map into ring-0 address space.
730 * @param offSub The offset into the region.
731 * @param cbSub The size of the mapping, zero meaning all the rest.
732 * @param ppvMapping Where to return the ring-0 mapping address.
733 */
734VMMR0_INT_DECL(int) PGMR0PhysMMIO2MapKernel(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
735 size_t offSub, size_t cbSub, void **ppvMapping)
736{
737 AssertReturn(!(offSub & HOST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
738 AssertReturn(!(cbSub & HOST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
739
740 /*
741 * Translate hRegion into a range pointer.
742 */
743 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR0PhysMmio2Find(pGVM, pDevIns, hMmio2);
744 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
745#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
746 uint8_t * const pvR0 = (uint8_t *)pFirstRegMmio->pvR0;
747#else
748 RTR3PTR const pvR3 = pFirstRegMmio->pvR3;
749#endif
750 RTGCPHYS const cbReal = pFirstRegMmio->cbReal;
751 pFirstRegMmio = NULL;
752 ASMCompilerBarrier();
753
754 AssertReturn(offSub < cbReal, VERR_OUT_OF_RANGE);
755 if (cbSub == 0)
756 cbSub = cbReal - offSub;
757 else
758 AssertReturn(cbSub < cbReal && cbSub + offSub <= cbReal, VERR_OUT_OF_RANGE);
759
760 /*
761 * Do the mapping.
762 */
763#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
764 AssertPtr(pvR0);
765 *ppvMapping = pvR0 + offSub;
766 return VINF_SUCCESS;
767#else
768 return SUPR0PageMapKernel(pGVM->pSession, pvR3, (uint32_t)offSub, (uint32_t)cbSub, 0 /*fFlags*/, ppvMapping);
769#endif
770}
771
772
773/**
774 * This is called during PGMR3Init to init the physical access handler allocator
775 * and tree.
776 *
777 * @returns VBox status code.
778 * @param pGVM Pointer to the global VM structure.
779 * @param cEntries Desired number of physical access handlers to reserve
780 * space for (will be adjusted).
781 * @thread EMT(0)
782 */
783VMMR0_INT_DECL(int) PGMR0PhysHandlerInitReqHandler(PGVM pGVM, uint32_t cEntries)
784{
785 /*
786 * Validate the input and state.
787 */
788 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
789 AssertRCReturn(rc, rc);
790 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); /** @todo ring-0 safe state check. */
791
792 AssertReturn(pGVM->pgmr0.s.PhysHandlerAllocator.m_paNodes == NULL, VERR_WRONG_ORDER);
793 AssertReturn(pGVM->pgm.s.PhysHandlerAllocator.m_paNodes == NULL, VERR_WRONG_ORDER);
794
795 AssertLogRelMsgReturn(cEntries <= _64K, ("%#x\n", cEntries), VERR_OUT_OF_RANGE);
796
797 /*
798 * Calculate the table size and allocate it.
799 */
800 uint32_t cbTreeAndBitmap = 0;
801 uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cEntries, &cbTreeAndBitmap);
802 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
803 rc = RTR0MemObjAllocPage(&hMemObj, cbTotalAligned, false);
804 if (RT_SUCCESS(rc))
805 {
806 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
807 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
808 if (RT_SUCCESS(rc))
809 {
810 uint8_t *pb = (uint8_t *)RTR0MemObjAddress(hMemObj);
811 if (!RTR0MemObjWasZeroInitialized(hMemObj))
812 RT_BZERO(pb, cbTotalAligned);
813
814 pGVM->pgmr0.s.PhysHandlerAllocator.initSlabAllocator(cEntries, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
815 (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
816 pGVM->pgmr0.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
817 pGVM->pgmr0.s.pPhysHandlerTree->initWithAllocator(&pGVM->pgmr0.s.PhysHandlerAllocator);
818 pGVM->pgmr0.s.hPhysHandlerMemObj = hMemObj;
819 pGVM->pgmr0.s.hPhysHandlerMapObj = hMapObj;
820
821 AssertCompile(sizeof(pGVM->pgm.s.PhysHandlerAllocator) == sizeof(pGVM->pgmr0.s.PhysHandlerAllocator));
822 RTR3PTR R3Ptr = RTR0MemObjAddressR3(hMapObj);
823 pGVM->pgm.s.pPhysHandlerTree = R3Ptr;
824 pGVM->pgm.s.PhysHandlerAllocator.m_paNodes = R3Ptr + cbTreeAndBitmap;
825 pGVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc = R3Ptr + sizeof(PGMPHYSHANDLERTREE);
826 pGVM->pgm.s.PhysHandlerAllocator.m_cNodes = cEntries;
827 pGVM->pgm.s.PhysHandlerAllocator.m_cErrors = 0;
828 pGVM->pgm.s.PhysHandlerAllocator.m_idxAllocHint = 0;
829 pGVM->pgm.s.PhysHandlerAllocator.m_uPadding = 0;
830 return VINF_SUCCESS;
831 }
832
833 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
834 }
835 return rc;
836}
837
838
839/**
840 * Updates a physical access handler type with ring-0 callback functions.
841 *
842 * The handler type must first have been registered in ring-3.
843 *
844 * @returns VBox status code.
845 * @param pGVM The global (ring-0) VM structure.
846 * @param enmKind The kind of access handler.
847 * @param fFlags PGMPHYSHANDLER_F_XXX
848 * @param pfnHandler Pointer to the ring-0 handler callback.
849 * @param pfnPfHandler Pointer to the ring-0 \#PF handler callback.
850 * callback. Can be NULL (not recommended though).
851 * @param pszDesc The type description.
852 * @param hType The handle to do ring-0 callback registrations for.
853 * @thread EMT(0)
854 */
855VMMR0_INT_DECL(int) PGMR0HandlerPhysicalTypeSetUpContext(PGVM pGVM, PGMPHYSHANDLERKIND enmKind, uint32_t fFlags,
856 PFNPGMPHYSHANDLER pfnHandler, PFNPGMRZPHYSPFHANDLER pfnPfHandler,
857 const char *pszDesc, PGMPHYSHANDLERTYPE hType)
858{
859 /*
860 * Validate input.
861 */
862 AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
863 AssertPtrNullReturn(pfnPfHandler, VERR_INVALID_POINTER);
864
865 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
866 AssertReturn( enmKind == PGMPHYSHANDLERKIND_WRITE
867 || enmKind == PGMPHYSHANDLERKIND_ALL
868 || enmKind == PGMPHYSHANDLERKIND_MMIO,
869 VERR_INVALID_PARAMETER);
870 AssertMsgReturn(!(fFlags & ~PGMPHYSHANDLER_F_VALID_MASK), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
871
872 PPGMPHYSHANDLERTYPEINTR0 const pTypeR0 = &pGVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
873 AssertMsgReturn(hType == pTypeR0->hType, ("%#RX64, expected=%#RX64\n", hType, pTypeR0->hType), VERR_INVALID_HANDLE);
874 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes) == RT_ELEMENTS(pGVM->pgm.s.aPhysHandlerTypes));
875 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes) == PGMPHYSHANDLERTYPE_IDX_MASK + 1);
876 AssertReturn(pTypeR0->enmKind == PGMPHYSHANDLERKIND_INVALID, VERR_ALREADY_INITIALIZED);
877
878 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
879 AssertRCReturn(rc, rc);
880 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); /** @todo ring-0 safe state check. */
881
882 PPGMPHYSHANDLERTYPEINTR3 const pTypeR3 = &pGVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
883 AssertMsgReturn(pTypeR3->enmKind == enmKind,
884 ("%#x: %d, expected %d\n", hType, pTypeR3->enmKind, enmKind),
885 VERR_INVALID_HANDLE);
886 AssertMsgReturn(pTypeR3->fKeepPgmLock == RT_BOOL(fFlags & PGMPHYSHANDLER_F_KEEP_PGM_LOCK),
887 ("%#x: %d, fFlags=%d\n", hType, pTypeR3->fKeepPgmLock, fFlags),
888 VERR_INVALID_HANDLE);
889 AssertMsgReturn(pTypeR3->fRing0DevInsIdx == RT_BOOL(fFlags & PGMPHYSHANDLER_F_R0_DEVINS_IDX),
890 ("%#x: %d, fFlags=%d\n", hType, pTypeR3->fRing0DevInsIdx, fFlags),
891 VERR_INVALID_HANDLE);
892
893 /*
894 * Update the entry.
895 */
896 pTypeR0->enmKind = enmKind;
897 pTypeR0->uState = enmKind == PGMPHYSHANDLERKIND_WRITE
898 ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL;
899 pTypeR0->fKeepPgmLock = RT_BOOL(fFlags & PGMPHYSHANDLER_F_KEEP_PGM_LOCK);
900 pTypeR0->fRing0DevInsIdx = RT_BOOL(fFlags & PGMPHYSHANDLER_F_R0_DEVINS_IDX);
901 pTypeR0->pfnHandler = pfnHandler;
902 pTypeR0->pfnPfHandler = pfnPfHandler;
903 pTypeR0->pszDesc = pszDesc;
904
905 pTypeR3->fRing0Enabled = true;
906
907 LogFlow(("PGMR0HandlerPhysicalTypeRegister: hType=%#x: enmKind=%d fFlags=%#x pfnHandler=%p pfnPfHandler=%p pszDesc=%s\n",
908 hType, enmKind, fFlags, pfnHandler, pfnPfHandler, pszDesc));
909 return VINF_SUCCESS;
910}
911
912
913#ifdef VBOX_WITH_PCI_PASSTHROUGH
914/* Interface sketch. The interface belongs to a global PCI pass-through
915 manager. It shall use the global VM handle, not the user VM handle to
916 store the per-VM info (domain) since that is all ring-0 stuff, thus
917 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
918 we can discuss the PciRaw code re-organtization when I'm back from
919 vacation.
920
921 I've implemented the initial IOMMU set up below. For things to work
922 reliably, we will probably need add a whole bunch of checks and
923 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
924 assuming nested paging (enforced) and prealloc (enforced), no
925 ballooning (check missing), page sharing (check missing) or live
926 migration (check missing), it might work fine. At least if some
927 VM power-off hook is present and can tear down the IOMMU page tables. */
928
929/**
930 * Tells the global PCI pass-through manager that we are about to set up the
931 * guest page to host page mappings for the specfied VM.
932 *
933 * @returns VBox status code.
934 *
935 * @param pGVM The ring-0 VM structure.
936 */
937VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
938{
939 NOREF(pGVM);
940 return VINF_SUCCESS;
941}
942
943
944/**
945 * Assigns a host page mapping for a guest page.
946 *
947 * This is only used when setting up the mappings, i.e. between
948 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
949 *
950 * @returns VBox status code.
951 * @param pGVM The ring-0 VM structure.
952 * @param GCPhys The address of the guest page (page aligned).
953 * @param HCPhys The address of the host page (page aligned).
954 */
955VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
956{
957 AssertReturn(!(GCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
958 AssertReturn(!(HCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
959
960 if (pGVM->rawpci.s.pfnContigMemInfo)
961 /** @todo what do we do on failure? */
962 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, HOST_PAGE_SIZE, PCIRAW_MEMINFO_MAP);
963
964 return VINF_SUCCESS;
965}
966
967
968/**
969 * Indicates that the specified guest page doesn't exists but doesn't have host
970 * page mapping we trust PCI pass-through with.
971 *
972 * This is only used when setting up the mappings, i.e. between
973 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
974 *
975 * @returns VBox status code.
976 * @param pGVM The ring-0 VM structure.
977 * @param GCPhys The address of the guest page (page aligned).
978 * @param HCPhys The address of the host page (page aligned).
979 */
980VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
981{
982 AssertReturn(!(GCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
983
984 if (pGVM->rawpci.s.pfnContigMemInfo)
985 /** @todo what do we do on failure? */
986 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, HOST_PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
987
988 return VINF_SUCCESS;
989}
990
991
992/**
993 * Tells the global PCI pass-through manager that we have completed setting up
994 * the guest page to host page mappings for the specfied VM.
995 *
996 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
997 * if some page assignment failed.
998 *
999 * @returns VBox status code.
1000 *
1001 * @param pGVM The ring-0 VM structure.
1002 */
1003VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
1004{
1005 NOREF(pGVM);
1006 return VINF_SUCCESS;
1007}
1008
1009
1010/**
1011 * Tells the global PCI pass-through manager that a guest page mapping has
1012 * changed after the initial setup.
1013 *
1014 * @returns VBox status code.
1015 * @param pGVM The ring-0 VM structure.
1016 * @param GCPhys The address of the guest page (page aligned).
1017 * @param HCPhys The new host page address or NIL_RTHCPHYS if
1018 * now unassigned.
1019 */
1020VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
1021{
1022 AssertReturn(!(GCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
1023 AssertReturn(!(HCPhys & HOST_PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
1024 NOREF(pGVM);
1025 return VINF_SUCCESS;
1026}
1027
1028#endif /* VBOX_WITH_PCI_PASSTHROUGH */
1029
1030
1031/**
1032 * Sets up the IOMMU when raw PCI device is enabled.
1033 *
1034 * @note This is a hack that will probably be remodelled and refined later!
1035 *
1036 * @returns VBox status code.
1037 *
1038 * @param pGVM The global (ring-0) VM structure.
1039 */
1040VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM)
1041{
1042 int rc = GVMMR0ValidateGVM(pGVM);
1043 if (RT_FAILURE(rc))
1044 return rc;
1045
1046#ifdef VBOX_WITH_PCI_PASSTHROUGH
1047 if (pGVM->pgm.s.fPciPassthrough)
1048 {
1049 /*
1050 * The Simplistic Approach - Enumerate all the pages and call tell the
1051 * IOMMU about each of them.
1052 */
1053 PGM_LOCK_VOID(pGVM);
1054 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
1055 if (RT_SUCCESS(rc))
1056 {
1057 for (PPGMRAMRANGE pRam = pGVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
1058 {
1059 PPGMPAGE pPage = &pRam->aPages[0];
1060 RTGCPHYS GCPhys = pRam->GCPhys;
1061 uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
1062 while (cLeft-- > 0)
1063 {
1064 /* Only expose pages that are 100% safe for now. */
1065 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1066 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
1067 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1068 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
1069 else
1070 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
1071
1072 /* next */
1073 pPage++;
1074 GCPhys += HOST_PAGE_SIZE;
1075 }
1076 }
1077
1078 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
1079 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1080 rc = rc2;
1081 }
1082 PGM_UNLOCK(pGVM);
1083 }
1084 else
1085#endif
1086 rc = VERR_NOT_SUPPORTED;
1087 return rc;
1088}
1089
1090
1091/**
1092 * \#PF Handler for nested paging.
1093 *
1094 * @returns VBox status code (appropriate for trap handling and GC return).
1095 * @param pGVM The global (ring-0) VM structure.
1096 * @param pGVCpu The global (ring-0) CPU structure of the calling
1097 * EMT.
1098 * @param enmShwPagingMode Paging mode for the nested page tables.
1099 * @param uErr The trap error code.
1100 * @param pRegFrame Trap register frame.
1101 * @param GCPhysFault The fault address.
1102 */
1103VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
1104 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
1105{
1106 int rc;
1107
1108 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
1109 STAM_PROFILE_START(&pGVCpu->pgm.s.StatRZTrap0e, a);
1110 STAM_STATS({ pGVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
1111
1112 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
1113 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
1114 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
1115 ("enmShwPagingMode=%d\n", enmShwPagingMode));
1116
1117 /* Reserved shouldn't end up here. */
1118 Assert(!(uErr & X86_TRAP_PF_RSVD));
1119
1120#ifdef VBOX_WITH_STATISTICS
1121 /*
1122 * Error code stats.
1123 */
1124 if (uErr & X86_TRAP_PF_US)
1125 {
1126 if (!(uErr & X86_TRAP_PF_P))
1127 {
1128 if (uErr & X86_TRAP_PF_RW)
1129 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
1130 else
1131 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1132 }
1133 else if (uErr & X86_TRAP_PF_RW)
1134 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1135 else if (uErr & X86_TRAP_PF_RSVD)
1136 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1137 else if (uErr & X86_TRAP_PF_ID)
1138 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1139 else
1140 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1141 }
1142 else
1143 { /* Supervisor */
1144 if (!(uErr & X86_TRAP_PF_P))
1145 {
1146 if (uErr & X86_TRAP_PF_RW)
1147 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1148 else
1149 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1150 }
1151 else if (uErr & X86_TRAP_PF_RW)
1152 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1153 else if (uErr & X86_TRAP_PF_ID)
1154 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1155 else if (uErr & X86_TRAP_PF_RSVD)
1156 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1157 }
1158#endif
1159
1160 /*
1161 * Call the worker.
1162 *
1163 * Note! We pretend the guest is in protected mode without paging, so we
1164 * can use existing code to build the nested page tables.
1165 */
1166/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1167 bool fLockTaken = false;
1168 switch (enmShwPagingMode)
1169 {
1170 case PGMMODE_32_BIT:
1171 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
1172 break;
1173 case PGMMODE_PAE:
1174 case PGMMODE_PAE_NX:
1175 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
1176 break;
1177 case PGMMODE_AMD64:
1178 case PGMMODE_AMD64_NX:
1179 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
1180 break;
1181 case PGMMODE_EPT:
1182 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
1183 break;
1184 default:
1185 AssertFailed();
1186 rc = VERR_INVALID_PARAMETER;
1187 break;
1188 }
1189 if (fLockTaken)
1190 {
1191 PGM_LOCK_ASSERT_OWNER(pGVM);
1192 PGM_UNLOCK(pGVM);
1193 }
1194
1195 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1196 rc = VINF_SUCCESS;
1197 /*
1198 * Handle the case where we cannot interpret the instruction because we cannot get the guest physical address
1199 * via its page tables, see @bugref{6043}.
1200 */
1201 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1202 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1203 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1204 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1205 {
1206 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
1207 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
1208 single VCPU VMs though. */
1209 rc = VINF_SUCCESS;
1210 }
1211
1212 STAM_STATS({ if (!pGVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1213 pGVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pGVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1214 STAM_PROFILE_STOP_EX(&pGVCpu->pgm.s.Stats.StatRZTrap0e, pGVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1215 return rc;
1216}
1217
1218
1219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1220/**
1221 * Nested \#PF Handler for nested-guest execution using nested paging.
1222 *
1223 * @returns Strict VBox status code (appropriate for trap handling and GC return).
1224 * @param pGVM The global (ring-0) VM structure.
1225 * @param pGVCpu The global (ring-0) CPU structure of the calling
1226 * EMT.
1227 * @param uErr The trap error code.
1228 * @param pRegFrame Trap register frame.
1229 * @param GCPhysNested The nested-guest physical address causing the fault.
1230 * @param fIsLinearAddrValid Whether translation of a nested-guest linear address
1231 * caused this fault. If @c false, GCPtrNested must be
1232 * 0.
1233 * @param GCPtrNested The nested-guest linear address that caused this
1234 * fault.
1235 * @param pWalk Where to store the SLAT walk result.
1236 */
1237VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
1238 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested,
1239 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk)
1240{
1241 Assert(enmShwPagingMode == PGMMODE_EPT);
1242 NOREF(enmShwPagingMode);
1243
1244 bool fLockTaken;
1245 VBOXSTRICTRC rcStrict = PGM_BTH_NAME_EPT_PROT(NestedTrap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysNested, fIsLinearAddrValid,
1246 GCPtrNested, pWalk, &fLockTaken);
1247 if (fLockTaken)
1248 {
1249 PGM_LOCK_ASSERT_OWNER(pGVCpu->CTX_SUFF(pVM));
1250 PGM_UNLOCK(pGVCpu->CTX_SUFF(pVM));
1251 }
1252 if (rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1253 rcStrict = VINF_SUCCESS;
1254 return rcStrict;
1255}
1256#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1257
1258
1259/**
1260 * \#PF Handler for deliberate nested paging misconfiguration (/reserved bit)
1261 * employed for MMIO pages.
1262 *
1263 * @returns VBox status code (appropriate for trap handling and GC return).
1264 * @param pGVM The global (ring-0) VM structure.
1265 * @param pGVCpu The global (ring-0) CPU structure of the calling
1266 * EMT.
1267 * @param enmShwPagingMode Paging mode for the nested page tables.
1268 * @param pRegFrame Trap register frame.
1269 * @param GCPhysFault The fault address.
1270 * @param uErr The error code, UINT32_MAX if not available
1271 * (VT-x).
1272 */
1273VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode,
1274 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
1275{
1276#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
1277 STAM_PROFILE_START(&pGVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
1278 VBOXSTRICTRC rc;
1279
1280 /*
1281 * Try lookup the all access physical handler for the address.
1282 */
1283 PGM_LOCK_VOID(pGVM);
1284 PPGMPHYSHANDLER pHandler;
1285 rc = pgmHandlerPhysicalLookup(pGVM, GCPhysFault, &pHandler);
1286 if (RT_SUCCESS(rc))
1287 {
1288 PCPGMPHYSHANDLERTYPEINT pHandlerType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pGVM, pHandler);
1289 if (RT_LIKELY(pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE))
1290 {
1291 /*
1292 * If the handle has aliases page or pages that have been temporarily
1293 * disabled, we'll have to take a detour to make sure we resync them
1294 * to avoid lots of unnecessary exits.
1295 */
1296 PPGMPAGE pPage;
1297 if ( ( pHandler->cAliasedPages
1298 || pHandler->cTmpOffPages)
1299 && ( (pPage = pgmPhysGetPage(pGVM, GCPhysFault)) == NULL
1300 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1301 )
1302 {
1303 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
1304 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatR0NpMiscfgSyncPage);
1305 rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
1306 PGM_UNLOCK(pGVM);
1307 }
1308 else
1309 {
1310 if (pHandlerType->pfnPfHandler)
1311 {
1312 uint64_t const uUser = !pHandlerType->fRing0DevInsIdx ? pHandler->uUser
1313 : (uintptr_t)PDMDeviceRing0IdxToInstance(pGVM, pHandler->uUser);
1314 STAM_PROFILE_START(&pHandler->Stat, h);
1315 PGM_UNLOCK(pGVM);
1316
1317 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->pfnPfHandler, uErr, GCPhysFault, uUser));
1318 rc = pHandlerType->pfnPfHandler(pGVM, pGVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,
1319 GCPhysFault, GCPhysFault, uUser);
1320
1321 STAM_PROFILE_STOP(&pHandler->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
1322 }
1323 else
1324 {
1325 PGM_UNLOCK(pGVM);
1326 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
1327 rc = VINF_EM_RAW_EMULATE_INSTR;
1328 }
1329 }
1330 STAM_PROFILE_STOP(&pGVCpu->pgm.s.Stats.StatR0NpMiscfg, a);
1331 return rc;
1332 }
1333 }
1334 else
1335 AssertMsgReturn(rc == VERR_NOT_FOUND, ("%Rrc GCPhysFault=%RGp\n", VBOXSTRICTRC_VAL(rc), GCPhysFault), rc);
1336
1337 /*
1338 * Must be out of sync, so do a SyncPage and restart the instruction.
1339 *
1340 * ASSUMES that ALL handlers are page aligned and covers whole pages
1341 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
1342 */
1343 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
1344 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatR0NpMiscfgSyncPage);
1345 rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
1346 PGM_UNLOCK(pGVM);
1347
1348 STAM_PROFILE_STOP(&pGVCpu->pgm.s.Stats.StatR0NpMiscfg, a);
1349 return rc;
1350
1351#else
1352 AssertLogRelFailed();
1353 return VERR_PGM_NOT_USED_IN_MODE;
1354#endif
1355}
1356
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette