VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInline.h@ 30160

Last change on this file since 30160 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.9 KB
Line 
1/* $Id: PGMInline.h 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInline_h
19#define ___PGMInline_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/stam.h>
25#include <VBox/param.h>
26#include <VBox/vmm.h>
27#include <VBox/mm.h>
28#include <VBox/pdmcritsect.h>
29#include <VBox/pdmapi.h>
30#include <VBox/dis.h>
31#include <VBox/dbgf.h>
32#include <VBox/log.h>
33#include <VBox/gmm.h>
34#include <VBox/hwaccm.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/avl.h>
38#include <iprt/critsect.h>
39#include <iprt/sha.h>
40
41
42
43/** @addtogroup grp_pgm_int Internals
44 * @internal
45 * @{
46 */
47
48/** @todo Split out all the inline stuff into a separate file. Then we can
49 * include it later when VM and VMCPU are defined and so avoid all that
50 * &pVM->pgm.s and &pVCpu->pgm.s stuff. It also chops ~1600 lines off
51 * this file and will make it somewhat easier to navigate... */
52
53/**
54 * Gets the PGMRAMRANGE structure for a guest page.
55 *
56 * @returns Pointer to the RAM range on success.
57 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
58 *
59 * @param pPGM PGM handle.
60 * @param GCPhys The GC physical address.
61 */
62DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
63{
64 /*
65 * Optimize for the first range.
66 */
67 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
68 RTGCPHYS off = GCPhys - pRam->GCPhys;
69 if (RT_UNLIKELY(off >= pRam->cb))
70 {
71 do
72 {
73 pRam = pRam->CTX_SUFF(pNext);
74 if (RT_UNLIKELY(!pRam))
75 break;
76 off = GCPhys - pRam->GCPhys;
77 } while (off >= pRam->cb);
78 }
79 return pRam;
80}
81
82
83/**
84 * Gets the PGMPAGE structure for a guest page.
85 *
86 * @returns Pointer to the page on success.
87 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
88 *
89 * @param pPGM PGM handle.
90 * @param GCPhys The GC physical address.
91 */
92DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
93{
94 /*
95 * Optimize for the first range.
96 */
97 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
98 RTGCPHYS off = GCPhys - pRam->GCPhys;
99 if (RT_UNLIKELY(off >= pRam->cb))
100 {
101 do
102 {
103 pRam = pRam->CTX_SUFF(pNext);
104 if (RT_UNLIKELY(!pRam))
105 return NULL;
106 off = GCPhys - pRam->GCPhys;
107 } while (off >= pRam->cb);
108 }
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pPGM PGM handle.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 /*
129 * Optimize for the first range.
130 */
131 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
132 RTGCPHYS off = GCPhys - pRam->GCPhys;
133 if (RT_UNLIKELY(off >= pRam->cb))
134 {
135 do
136 {
137 pRam = pRam->CTX_SUFF(pNext);
138 if (RT_UNLIKELY(!pRam))
139 {
140 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
141 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
142 }
143 off = GCPhys - pRam->GCPhys;
144 } while (off >= pRam->cb);
145 }
146 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
147 return VINF_SUCCESS;
148}
149
150
151
152
153/**
154 * Gets the PGMPAGE structure for a guest page.
155 *
156 * Old Phys code: Will make sure the page is present.
157 *
158 * @returns VBox status code.
159 * @retval VINF_SUCCESS and a valid *ppPage on success.
160 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
161 *
162 * @param pPGM PGM handle.
163 * @param GCPhys The GC physical address.
164 * @param ppPage Where to store the page pointer on success.
165 * @param ppRamHint Where to read and store the ram list hint.
166 * The caller initializes this to NULL before the call.
167 */
168DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
169{
170 RTGCPHYS off;
171 PPGMRAMRANGE pRam = *ppRamHint;
172 if ( !pRam
173 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
174 {
175 pRam = pPGM->CTX_SUFF(pRamRanges);
176 off = GCPhys - pRam->GCPhys;
177 if (RT_UNLIKELY(off >= pRam->cb))
178 {
179 do
180 {
181 pRam = pRam->CTX_SUFF(pNext);
182 if (RT_UNLIKELY(!pRam))
183 {
184 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
185 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
186 }
187 off = GCPhys - pRam->GCPhys;
188 } while (off >= pRam->cb);
189 }
190 *ppRamHint = pRam;
191 }
192 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
193 return VINF_SUCCESS;
194}
195
196
197/**
198 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
199 *
200 * @returns Pointer to the page on success.
201 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
202 *
203 * @param pPGM PGM handle.
204 * @param GCPhys The GC physical address.
205 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
206 */
207DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
208{
209 /*
210 * Optimize for the first range.
211 */
212 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
213 RTGCPHYS off = GCPhys - pRam->GCPhys;
214 if (RT_UNLIKELY(off >= pRam->cb))
215 {
216 do
217 {
218 pRam = pRam->CTX_SUFF(pNext);
219 if (RT_UNLIKELY(!pRam))
220 return NULL;
221 off = GCPhys - pRam->GCPhys;
222 } while (off >= pRam->cb);
223 }
224 *ppRam = pRam;
225 return &pRam->aPages[off >> PAGE_SHIFT];
226}
227
228
229/**
230 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
231 *
232 * @returns Pointer to the page on success.
233 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
234 *
235 * @param pPGM PGM handle.
236 * @param GCPhys The GC physical address.
237 * @param ppPage Where to store the pointer to the PGMPAGE structure.
238 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
239 */
240DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
241{
242 /*
243 * Optimize for the first range.
244 */
245 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
246 RTGCPHYS off = GCPhys - pRam->GCPhys;
247 if (RT_UNLIKELY(off >= pRam->cb))
248 {
249 do
250 {
251 pRam = pRam->CTX_SUFF(pNext);
252 if (RT_UNLIKELY(!pRam))
253 {
254 *ppRam = NULL; /* Shut up silly GCC warnings. */
255 *ppPage = NULL; /* ditto */
256 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
257 }
258 off = GCPhys - pRam->GCPhys;
259 } while (off >= pRam->cb);
260 }
261 *ppRam = pRam;
262 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
263 return VINF_SUCCESS;
264}
265
266
267/**
268 * Convert GC Phys to HC Phys.
269 *
270 * @returns VBox status.
271 * @param pPGM PGM handle.
272 * @param GCPhys The GC physical address.
273 * @param pHCPhys Where to store the corresponding HC physical address.
274 *
275 * @deprecated Doesn't deal with zero, shared or write monitored pages.
276 * Avoid when writing new code!
277 */
278DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
279{
280 PPGMPAGE pPage;
281 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
282 if (RT_FAILURE(rc))
283 return rc;
284 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
285 return VINF_SUCCESS;
286}
287
288#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
289
290/**
291 * Inlined version of the ring-0 version of PGMDynMapHCPage that
292 * optimizes access to pages already in the set.
293 *
294 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
295 * @param pPGM Pointer to the PVM instance data.
296 * @param HCPhys The physical address of the page.
297 * @param ppv Where to store the mapping address.
298 */
299DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
300{
301 PVM pVM = PGM2VM(pPGM);
302 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
303 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
304
305 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapHCPageInl, a);
306 Assert(!(HCPhys & PAGE_OFFSET_MASK));
307 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
308
309 unsigned iHash = PGMMAPSET_HASH(HCPhys);
310 unsigned iEntry = pSet->aiHashTable[iHash];
311 if ( iEntry < pSet->cEntries
312 && pSet->aEntries[iEntry].HCPhys == HCPhys)
313 {
314 *ppv = pSet->aEntries[iEntry].pvPage;
315 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlHits);
316 }
317 else
318 {
319 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlMisses);
320 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
321 }
322
323 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapHCPageInl, a);
324 return VINF_SUCCESS;
325}
326
327
328/**
329 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
330 * access to pages already in the set.
331 *
332 * @returns See PGMDynMapGCPage.
333 * @param pPGM Pointer to the PVM instance data.
334 * @param GCPhys The guest physical address of the page.
335 * @param ppv Where to store the mapping address.
336 */
337DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
338{
339 PVM pVM = PGM2VM(pPGM);
340 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
341
342 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
343 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
344
345 /*
346 * Get the ram range.
347 */
348 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
349 RTGCPHYS off = GCPhys - pRam->GCPhys;
350 if (RT_UNLIKELY(off >= pRam->cb
351 /** @todo || page state stuff */))
352 {
353 /* This case is not counted into StatR0DynMapGCPageInl. */
354 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
355 return PGMDynMapGCPage(pVM, GCPhys, ppv);
356 }
357
358 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
359 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
360
361 /*
362 * pgmR0DynMapHCPageInlined with out stats.
363 */
364 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
365 Assert(!(HCPhys & PAGE_OFFSET_MASK));
366 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
367
368 unsigned iHash = PGMMAPSET_HASH(HCPhys);
369 unsigned iEntry = pSet->aiHashTable[iHash];
370 if ( iEntry < pSet->cEntries
371 && pSet->aEntries[iEntry].HCPhys == HCPhys)
372 {
373 *ppv = pSet->aEntries[iEntry].pvPage;
374 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
375 }
376 else
377 {
378 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
379 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
380 }
381
382 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
389 * access to pages already in the set.
390 *
391 * @returns See PGMDynMapGCPage.
392 * @param pPGM Pointer to the PVM instance data.
393 * @param HCPhys The physical address of the page.
394 * @param ppv Where to store the mapping address.
395 */
396DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
397{
398 PVM pVM = PGM2VM(pPGM);
399 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
400
401 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
402
403 /*
404 * Get the ram range.
405 */
406 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
407 RTGCPHYS off = GCPhys - pRam->GCPhys;
408 if (RT_UNLIKELY(off >= pRam->cb
409 /** @todo || page state stuff */))
410 {
411 /* This case is not counted into StatR0DynMapGCPageInl. */
412 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
413 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
414 }
415
416 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
417 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
418
419 /*
420 * pgmR0DynMapHCPageInlined with out stats.
421 */
422 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
423 Assert(!(HCPhys & PAGE_OFFSET_MASK));
424 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
425
426 unsigned iHash = PGMMAPSET_HASH(HCPhys);
427 unsigned iEntry = pSet->aiHashTable[iHash];
428 if ( iEntry < pSet->cEntries
429 && pSet->aEntries[iEntry].HCPhys == HCPhys)
430 {
431 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
432 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
433 }
434 else
435 {
436 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
437 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
438 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
439 }
440
441 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
442 return VINF_SUCCESS;
443}
444
445#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
446#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
447
448/**
449 * Maps the page into current context (RC and maybe R0).
450 *
451 * @returns pointer to the mapping.
452 * @param pVM Pointer to the PGM instance data.
453 * @param pPage The page.
454 */
455DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
456{
457 if (pPage->idx >= PGMPOOL_IDX_FIRST)
458 {
459 Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
460 void *pv;
461# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
462 pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
463# else
464 PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
465# endif
466 return pv;
467 }
468 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
469}
470
471/**
472 * Temporarily maps one host page specified by HC physical address, returning
473 * pointer within the page.
474 *
475 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
476 * reused after 8 mappings (or perhaps a few more if you score with the cache).
477 *
478 * @returns The address corresponding to HCPhys.
479 * @param pPGM Pointer to the PVM instance data.
480 * @param HCPhys HC Physical address of the page.
481 */
482DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
483{
484 void *pv;
485# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
486 pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
487# else
488 PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
489# endif
490 pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK));
491 return pv;
492}
493
494#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
495#ifndef IN_RC
496
497/**
498 * Queries the Physical TLB entry for a physical guest page,
499 * attempting to load the TLB entry if necessary.
500 *
501 * @returns VBox status code.
502 * @retval VINF_SUCCESS on success
503 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
504 *
505 * @param pPGM The PGM instance handle.
506 * @param GCPhys The address of the guest page.
507 * @param ppTlbe Where to store the pointer to the TLB entry.
508 */
509DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
510{
511 int rc;
512 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
513 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
514 {
515 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
516 rc = VINF_SUCCESS;
517 }
518 else
519 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
520 *ppTlbe = pTlbe;
521 return rc;
522}
523
524
525/**
526 * Queries the Physical TLB entry for a physical guest page,
527 * attempting to load the TLB entry if necessary.
528 *
529 * @returns VBox status code.
530 * @retval VINF_SUCCESS on success
531 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
532 *
533 * @param pPGM The PGM instance handle.
534 * @param pPage Pointer to the PGMPAGE structure corresponding to
535 * GCPhys.
536 * @param GCPhys The address of the guest page.
537 * @param ppTlbe Where to store the pointer to the TLB entry.
538 */
539DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
540{
541 int rc;
542 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
543 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
544 {
545 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
546 rc = VINF_SUCCESS;
547 }
548 else
549 rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
550 *ppTlbe = pTlbe;
551 return rc;
552}
553
554#endif /* !IN_RC */
555
556/**
557 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
558 * Takes PSE-36 into account.
559 *
560 * @returns guest physical address
561 * @param pPGM Pointer to the PGM instance data.
562 * @param Pde Guest Pde
563 */
564DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
565{
566 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
567 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
568
569 return GCPhys & pPGM->GCPhys4MBPSEMask;
570}
571
572
573/**
574 * Gets the page directory entry for the specified address (32-bit paging).
575 *
576 * @returns The page directory entry in question.
577 * @param pPGM Pointer to the PGM instance data.
578 * @param GCPtr The address.
579 */
580DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
581{
582#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
583 PCX86PD pGuestPD = NULL;
584 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
585 if (RT_FAILURE(rc))
586 {
587 X86PDE ZeroPde = {0};
588 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
589 }
590#else
591 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
592# ifdef IN_RING3
593 if (!pGuestPD)
594 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
595# endif
596#endif
597 return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
598}
599
600
601/**
602 * Gets the address of a specific page directory entry (32-bit paging).
603 *
604 * @returns Pointer the page directory entry in question.
605 * @param pPGM Pointer to the PGM instance data.
606 * @param GCPtr The address.
607 */
608DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
609{
610#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
611 PX86PD pGuestPD = NULL;
612 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
613 AssertRCReturn(rc, NULL);
614#else
615 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
616# ifdef IN_RING3
617 if (!pGuestPD)
618 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
619# endif
620#endif
621 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
622}
623
624
625/**
626 * Gets the address the guest page directory (32-bit paging).
627 *
628 * @returns Pointer the page directory entry in question.
629 * @param pPGM Pointer to the PGM instance data.
630 */
631DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM)
632{
633#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
634 PX86PD pGuestPD = NULL;
635 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
636 AssertRCReturn(rc, NULL);
637#else
638 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
639# ifdef IN_RING3
640 if (!pGuestPD)
641 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
642# endif
643#endif
644 return pGuestPD;
645}
646
647
648/**
649 * Gets the guest page directory pointer table.
650 *
651 * @returns Pointer to the page directory in question.
652 * @returns NULL if the page directory is not present or on an invalid page.
653 * @param pPGM Pointer to the PGM instance data.
654 */
655DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM)
656{
657#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
658 PX86PDPT pGuestPDPT = NULL;
659 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
660 AssertRCReturn(rc, NULL);
661#else
662 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
663# ifdef IN_RING3
664 if (!pGuestPDPT)
665 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
666# endif
667#endif
668 return pGuestPDPT;
669}
670
671
672/**
673 * Gets the guest page directory pointer table entry for the specified address.
674 *
675 * @returns Pointer to the page directory in question.
676 * @returns NULL if the page directory is not present or on an invalid page.
677 * @param pPGM Pointer to the PGM instance data.
678 * @param GCPtr The address.
679 */
680DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
681{
682 AssertGCPtr32(GCPtr);
683
684#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
685 PX86PDPT pGuestPDPT = 0;
686 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
687 AssertRCReturn(rc, 0);
688#else
689 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
690# ifdef IN_RING3
691 if (!pGuestPDPT)
692 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
693# endif
694#endif
695 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
696}
697
698
699/**
700 * Gets the page directory for the specified address.
701 *
702 * @returns Pointer to the page directory in question.
703 * @returns NULL if the page directory is not present or on an invalid page.
704 * @param pPGM Pointer to the PGM instance data.
705 * @param GCPtr The address.
706 */
707DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)
708{
709 AssertGCPtr32(GCPtr);
710
711 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
712 AssertReturn(pGuestPDPT, NULL);
713 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
714 if (pGuestPDPT->a[iPdpt].n.u1Present)
715 {
716#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
717 PX86PDPAE pGuestPD = NULL;
718 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
719 AssertRCReturn(rc, NULL);
720#else
721 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
722 if ( !pGuestPD
723 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
724 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
725#endif
726 return pGuestPD;
727 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
728 }
729 return NULL;
730}
731
732
733/**
734 * Gets the page directory entry for the specified address.
735 *
736 * @returns Pointer to the page directory entry in question.
737 * @returns NULL if the page directory is not present or on an invalid page.
738 * @param pPGM Pointer to the PGM instance data.
739 * @param GCPtr The address.
740 */
741DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
742{
743 AssertGCPtr32(GCPtr);
744
745 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
746 AssertReturn(pGuestPDPT, NULL);
747 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
748 if (pGuestPDPT->a[iPdpt].n.u1Present)
749 {
750 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
751#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
752 PX86PDPAE pGuestPD = NULL;
753 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
754 AssertRCReturn(rc, NULL);
755#else
756 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
757 if ( !pGuestPD
758 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
759 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
760#endif
761 return &pGuestPD->a[iPD];
762 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
763 }
764 return NULL;
765}
766
767
768/**
769 * Gets the page directory entry for the specified address.
770 *
771 * @returns The page directory entry in question.
772 * @returns A non-present entry if the page directory is not present or on an invalid page.
773 * @param pPGM Pointer to the PGM instance data.
774 * @param GCPtr The address.
775 */
776DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
777{
778 AssertGCPtr32(GCPtr);
779 X86PDEPAE ZeroPde = {0};
780 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
781 if (RT_LIKELY(pGuestPDPT))
782 {
783 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
784 if (pGuestPDPT->a[iPdpt].n.u1Present)
785 {
786 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
787#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
788 PX86PDPAE pGuestPD = NULL;
789 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
790 AssertRCReturn(rc, ZeroPde);
791#else
792 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
793 if ( !pGuestPD
794 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
795 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
796#endif
797 return pGuestPD->a[iPD];
798 }
799 }
800 return ZeroPde;
801}
802
803
804/**
805 * Gets the page directory pointer table entry for the specified address
806 * and returns the index into the page directory
807 *
808 * @returns Pointer to the page directory in question.
809 * @returns NULL if the page directory is not present or on an invalid page.
810 * @param pPGM Pointer to the PGM instance data.
811 * @param GCPtr The address.
812 * @param piPD Receives the index into the returned page directory
813 * @param pPdpe Receives the page directory pointer entry. Optional.
814 */
815DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
816{
817 AssertGCPtr32(GCPtr);
818
819 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
820 AssertReturn(pGuestPDPT, NULL);
821 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
822 if (pPdpe)
823 *pPdpe = pGuestPDPT->a[iPdpt];
824 if (pGuestPDPT->a[iPdpt].n.u1Present)
825 {
826 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
827#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
828 PX86PDPAE pGuestPD = NULL;
829 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
830 AssertRCReturn(rc, NULL);
831#else
832 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
833 if ( !pGuestPD
834 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
835 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
836#endif
837 *piPD = iPD;
838 return pGuestPD;
839 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
840 }
841 return NULL;
842}
843
844#ifndef IN_RC
845
846/**
847 * Gets the page map level-4 pointer for the guest.
848 *
849 * @returns Pointer to the PML4 page.
850 * @param pPGM Pointer to the PGM instance data.
851 */
852DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM)
853{
854#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
855 PX86PML4 pGuestPml4;
856 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
857 AssertRCReturn(rc, NULL);
858#else
859 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
860# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
861 if (!pGuestPml4)
862 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
863# endif
864 Assert(pGuestPml4);
865#endif
866 return pGuestPml4;
867}
868
869
870/**
871 * Gets the pointer to a page map level-4 entry.
872 *
873 * @returns Pointer to the PML4 entry.
874 * @param pPGM Pointer to the PGM instance data.
875 * @param iPml4 The index.
876 */
877DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
878{
879#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
880 PX86PML4 pGuestPml4;
881 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
882 AssertRCReturn(rc, NULL);
883#else
884 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
885# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
886 if (!pGuestPml4)
887 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
888# endif
889 Assert(pGuestPml4);
890#endif
891 return &pGuestPml4->a[iPml4];
892}
893
894
895/**
896 * Gets a page map level-4 entry.
897 *
898 * @returns The PML4 entry.
899 * @param pPGM Pointer to the PGM instance data.
900 * @param iPml4 The index.
901 */
902DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)
903{
904#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
905 PX86PML4 pGuestPml4;
906 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
907 if (RT_FAILURE(rc))
908 {
909 X86PML4E ZeroPml4e = {0};
910 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
911 }
912#else
913 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
914# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
915 if (!pGuestPml4)
916 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
917# endif
918 Assert(pGuestPml4);
919#endif
920 return pGuestPml4->a[iPml4];
921}
922
923
924/**
925 * Gets the page directory pointer entry for the specified address.
926 *
927 * @returns Pointer to the page directory pointer entry in question.
928 * @returns NULL if the page directory is not present or on an invalid page.
929 * @param pPGM Pointer to the PGM instance data.
930 * @param GCPtr The address.
931 * @param ppPml4e Page Map Level-4 Entry (out)
932 */
933DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
934{
935 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
936 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
937 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
938 if (pPml4e->n.u1Present)
939 {
940 PX86PDPT pPdpt;
941 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
942 AssertRCReturn(rc, NULL);
943
944 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
945 return &pPdpt->a[iPdpt];
946 }
947 return NULL;
948}
949
950
951/**
952 * Gets the page directory entry for the specified address.
953 *
954 * @returns The page directory entry in question.
955 * @returns A non-present entry if the page directory is not present or on an invalid page.
956 * @param pPGM Pointer to the PGM instance data.
957 * @param GCPtr The address.
958 * @param ppPml4e Page Map Level-4 Entry (out)
959 * @param pPdpe Page directory pointer table entry (out)
960 */
961DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
962{
963 X86PDEPAE ZeroPde = {0};
964 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
965 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
966 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
967 if (pPml4e->n.u1Present)
968 {
969 PCX86PDPT pPdptTemp;
970 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
971 AssertRCReturn(rc, ZeroPde);
972
973 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
974 *pPdpe = pPdptTemp->a[iPdpt];
975 if (pPdptTemp->a[iPdpt].n.u1Present)
976 {
977 PCX86PDPAE pPD;
978 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
979 AssertRCReturn(rc, ZeroPde);
980
981 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
982 return pPD->a[iPD];
983 }
984 }
985
986 return ZeroPde;
987}
988
989
990/**
991 * Gets the page directory entry for the specified address.
992 *
993 * @returns The page directory entry in question.
994 * @returns A non-present entry if the page directory is not present or on an invalid page.
995 * @param pPGM Pointer to the PGM instance data.
996 * @param GCPtr The address.
997 */
998DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)
999{
1000 X86PDEPAE ZeroPde = {0};
1001 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1002 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1003 if (pGuestPml4->a[iPml4].n.u1Present)
1004 {
1005 PCX86PDPT pPdptTemp;
1006 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1007 AssertRCReturn(rc, ZeroPde);
1008
1009 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1010 if (pPdptTemp->a[iPdpt].n.u1Present)
1011 {
1012 PCX86PDPAE pPD;
1013 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1014 AssertRCReturn(rc, ZeroPde);
1015
1016 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1017 return pPD->a[iPD];
1018 }
1019 }
1020 return ZeroPde;
1021}
1022
1023
1024/**
1025 * Gets the page directory entry for the specified address.
1026 *
1027 * @returns Pointer to the page directory entry in question.
1028 * @returns NULL if the page directory is not present or on an invalid page.
1029 * @param pPGM Pointer to the PGM instance data.
1030 * @param GCPtr The address.
1031 */
1032DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)
1033{
1034 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1035 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1036 if (pGuestPml4->a[iPml4].n.u1Present)
1037 {
1038 PCX86PDPT pPdptTemp;
1039 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1040 AssertRCReturn(rc, NULL);
1041
1042 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1043 if (pPdptTemp->a[iPdpt].n.u1Present)
1044 {
1045 PX86PDPAE pPD;
1046 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1047 AssertRCReturn(rc, NULL);
1048
1049 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1050 return &pPD->a[iPD];
1051 }
1052 }
1053 return NULL;
1054}
1055
1056
1057/**
1058 * Gets the GUEST page directory pointer for the specified address.
1059 *
1060 * @returns The page directory in question.
1061 * @returns NULL if the page directory is not present or on an invalid page.
1062 * @param pPGM Pointer to the PGM instance data.
1063 * @param GCPtr The address.
1064 * @param ppPml4e Page Map Level-4 Entry (out)
1065 * @param pPdpe Page directory pointer table entry (out)
1066 * @param piPD Receives the index into the returned page directory
1067 */
1068DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
1069{
1070 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1071 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1072 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
1073 if (pPml4e->n.u1Present)
1074 {
1075 PCX86PDPT pPdptTemp;
1076 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1077 AssertRCReturn(rc, NULL);
1078
1079 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1080 *pPdpe = pPdptTemp->a[iPdpt];
1081 if (pPdptTemp->a[iPdpt].n.u1Present)
1082 {
1083 PX86PDPAE pPD;
1084 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1085 AssertRCReturn(rc, NULL);
1086
1087 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1088 return pPD;
1089 }
1090 }
1091 return 0;
1092}
1093
1094#endif /* !IN_RC */
1095
1096/**
1097 * Gets the shadow page directory, 32-bit.
1098 *
1099 * @returns Pointer to the shadow 32-bit PD.
1100 * @param pPGM Pointer to the PGM instance data.
1101 */
1102DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGMCPU pPGM)
1103{
1104 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1105}
1106
1107
1108/**
1109 * Gets the shadow page directory entry for the specified address, 32-bit.
1110 *
1111 * @returns Shadow 32-bit PDE.
1112 * @param pPGM Pointer to the PGM instance data.
1113 * @param GCPtr The address.
1114 */
1115DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
1116{
1117 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1118
1119 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
1120 if (!pShwPde)
1121 {
1122 X86PDE ZeroPde = {0};
1123 return ZeroPde;
1124 }
1125 return pShwPde->a[iPd];
1126}
1127
1128
1129/**
1130 * Gets the pointer to the shadow page directory entry for the specified
1131 * address, 32-bit.
1132 *
1133 * @returns Pointer to the shadow 32-bit PDE.
1134 * @param pPGM Pointer to the PGM instance data.
1135 * @param GCPtr The address.
1136 */
1137DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1138{
1139 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1140
1141 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
1142 AssertReturn(pPde, NULL);
1143 return &pPde->a[iPd];
1144}
1145
1146
1147/**
1148 * Gets the shadow page pointer table, PAE.
1149 *
1150 * @returns Pointer to the shadow PAE PDPT.
1151 * @param pPGM Pointer to the PGM instance data.
1152 */
1153DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGMCPU pPGM)
1154{
1155 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1156}
1157
1158
1159/**
1160 * Gets the shadow page directory for the specified address, PAE.
1161 *
1162 * @returns Pointer to the shadow PD.
1163 * @param pPGM Pointer to the PGM instance data.
1164 * @param GCPtr The address.
1165 */
1166DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1167{
1168 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1169 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
1170
1171 if (!pPdpt->a[iPdpt].n.u1Present)
1172 return NULL;
1173
1174 /* Fetch the pgm pool shadow descriptor. */
1175 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1176 AssertReturn(pShwPde, NULL);
1177
1178 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
1179}
1180
1181
1182/**
1183 * Gets the shadow page directory for the specified address, PAE.
1184 *
1185 * @returns Pointer to the shadow PD.
1186 * @param pPGM Pointer to the PGM instance data.
1187 * @param GCPtr The address.
1188 */
1189DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
1190{
1191 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1192
1193 if (!pPdpt->a[iPdpt].n.u1Present)
1194 return NULL;
1195
1196 /* Fetch the pgm pool shadow descriptor. */
1197 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1198 AssertReturn(pShwPde, NULL);
1199
1200 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
1201}
1202
1203
1204/**
1205 * Gets the shadow page directory entry, PAE.
1206 *
1207 * @returns PDE.
1208 * @param pPGM Pointer to the PGM instance data.
1209 * @param GCPtr The address.
1210 */
1211DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
1212{
1213 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1214
1215 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
1216 if (!pShwPde)
1217 {
1218 X86PDEPAE ZeroPde = {0};
1219 return ZeroPde;
1220 }
1221 return pShwPde->a[iPd];
1222}
1223
1224
1225/**
1226 * Gets the pointer to the shadow page directory entry for an address, PAE.
1227 *
1228 * @returns Pointer to the PDE.
1229 * @param pPGM Pointer to the PGM instance data.
1230 * @param GCPtr The address.
1231 */
1232DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1233{
1234 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1235
1236 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
1237 AssertReturn(pPde, NULL);
1238 return &pPde->a[iPd];
1239}
1240
1241#ifndef IN_RC
1242
1243/**
1244 * Gets the shadow page map level-4 pointer.
1245 *
1246 * @returns Pointer to the shadow PML4.
1247 * @param pPGM Pointer to the PGM instance data.
1248 */
1249DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGMCPU pPGM)
1250{
1251 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1252}
1253
1254
1255/**
1256 * Gets the shadow page map level-4 entry for the specified address.
1257 *
1258 * @returns The entry.
1259 * @param pPGM Pointer to the PGM instance data.
1260 * @param GCPtr The address.
1261 */
1262DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGMCPU pPGM, RTGCPTR GCPtr)
1263{
1264 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1265 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
1266
1267 if (!pShwPml4)
1268 {
1269 X86PML4E ZeroPml4e = {0};
1270 return ZeroPml4e;
1271 }
1272 return pShwPml4->a[iPml4];
1273}
1274
1275
1276/**
1277 * Gets the pointer to the specified shadow page map level-4 entry.
1278 *
1279 * @returns The entry.
1280 * @param pPGM Pointer to the PGM instance data.
1281 * @param iPml4 The PML4 index.
1282 */
1283DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
1284{
1285 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
1286 if (!pShwPml4)
1287 return NULL;
1288 return &pShwPml4->a[iPml4];
1289}
1290
1291
1292/**
1293 * Gets the GUEST page directory pointer for the specified address.
1294 *
1295 * @returns The page directory in question.
1296 * @returns NULL if the page directory is not present or on an invalid page.
1297 * @param pPGM Pointer to the PGM instance data.
1298 * @param GCPtr The address.
1299 * @param piPD Receives the index into the returned page directory
1300 */
1301DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
1302{
1303 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1304 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1305 if (pGuestPml4->a[iPml4].n.u1Present)
1306 {
1307 PCX86PDPT pPdptTemp;
1308 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1309 AssertRCReturn(rc, NULL);
1310
1311 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1312 if (pPdptTemp->a[iPdpt].n.u1Present)
1313 {
1314 PX86PDPAE pPD;
1315 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1316 AssertRCReturn(rc, NULL);
1317
1318 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1319 return pPD;
1320 }
1321 }
1322 return NULL;
1323}
1324
1325#endif /* !IN_RC */
1326
1327/**
1328 * Gets the page state for a physical handler.
1329 *
1330 * @returns The physical handler page state.
1331 * @param pCur The physical handler in question.
1332 */
1333DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
1334{
1335 switch (pCur->enmType)
1336 {
1337 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
1338 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
1339
1340 case PGMPHYSHANDLERTYPE_MMIO:
1341 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
1342 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
1343
1344 default:
1345 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1346 }
1347}
1348
1349
1350/**
1351 * Gets the page state for a virtual handler.
1352 *
1353 * @returns The virtual handler page state.
1354 * @param pCur The virtual handler in question.
1355 * @remarks This should never be used on a hypervisor access handler.
1356 */
1357DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
1358{
1359 switch (pCur->enmType)
1360 {
1361 case PGMVIRTHANDLERTYPE_WRITE:
1362 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
1363 case PGMVIRTHANDLERTYPE_ALL:
1364 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
1365 default:
1366 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1367 }
1368}
1369
1370
1371/**
1372 * Clears one physical page of a virtual handler
1373 *
1374 * @param pPGM Pointer to the PGM instance.
1375 * @param pCur Virtual handler structure
1376 * @param iPage Physical page index
1377 *
1378 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1379 * need to care about other handlers in the same page.
1380 */
1381DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
1382{
1383 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1384
1385 /*
1386 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1387 */
1388#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1389 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1390 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1391 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1392#endif
1393 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1394 {
1395 /* We're the head of the alias chain. */
1396 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1397#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1398 AssertReleaseMsg(pRemove != NULL,
1399 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1400 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1401 AssertReleaseMsg(pRemove == pPhys2Virt,
1402 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1403 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1404 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1405 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1406#endif
1407 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1408 {
1409 /* Insert the next list in the alias chain into the tree. */
1410 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1411#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1412 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1413 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1414 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1415#endif
1416 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1417 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1418 AssertRelease(fRc);
1419 }
1420 }
1421 else
1422 {
1423 /* Locate the previous node in the alias chain. */
1424 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1425#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1426 AssertReleaseMsg(pPrev != pPhys2Virt,
1427 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1428 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1429#endif
1430 for (;;)
1431 {
1432 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1433 if (pNext == pPhys2Virt)
1434 {
1435 /* unlink. */
1436 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1437 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1438 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1439 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1440 else
1441 {
1442 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1443 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1444 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1445 }
1446 break;
1447 }
1448
1449 /* next */
1450 if (pNext == pPrev)
1451 {
1452#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1453 AssertReleaseMsg(pNext != pPrev,
1454 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1455 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1456#endif
1457 break;
1458 }
1459 pPrev = pNext;
1460 }
1461 }
1462 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1463 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1464 pPhys2Virt->offNextAlias = 0;
1465 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1466
1467 /*
1468 * Clear the ram flags for this page.
1469 */
1470 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
1471 AssertReturnVoid(pPage);
1472 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1473}
1474
1475
1476/**
1477 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1478 *
1479 * @returns Pointer to the shadow page structure.
1480 * @param pPool The pool.
1481 * @param idx The pool page index.
1482 */
1483DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1484{
1485 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1486 return &pPool->aPages[idx];
1487}
1488
1489
1490/**
1491 * Clear references to guest physical memory.
1492 *
1493 * @param pPool The pool.
1494 * @param pPoolPage The pool page.
1495 * @param pPhysPage The physical guest page tracking structure.
1496 * @param iPte Shadow PTE index
1497 */
1498DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1499{
1500 /*
1501 * Just deal with the simple case here.
1502 */
1503# ifdef LOG_ENABLED
1504 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1505# endif
1506 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1507 if (cRefs == 1)
1508 {
1509 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1510 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1511 /* Invalidate the tracking data. */
1512 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
1513 }
1514 else
1515 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1516 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1517}
1518
1519
1520/**
1521 * Moves the page to the head of the age list.
1522 *
1523 * This is done when the cached page is used in one way or another.
1524 *
1525 * @param pPool The pool.
1526 * @param pPage The cached page.
1527 */
1528DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1529{
1530 PVM pVM = pPool->CTX_SUFF(pVM);
1531 pgmLock(pVM);
1532
1533 /*
1534 * Move to the head of the age list.
1535 */
1536 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1537 {
1538 /* unlink */
1539 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1540 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1541 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1542 else
1543 pPool->iAgeTail = pPage->iAgePrev;
1544
1545 /* insert at head */
1546 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1547 pPage->iAgeNext = pPool->iAgeHead;
1548 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1549 pPool->iAgeHead = pPage->idx;
1550 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1551 }
1552 pgmUnlock(pVM);
1553}
1554
1555/**
1556 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1557 *
1558 * @param pVM VM Handle.
1559 * @param pPage PGM pool page
1560 */
1561DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1562{
1563 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1564 ASMAtomicIncU32(&pPage->cLocked);
1565}
1566
1567
1568/**
1569 * Unlocks a page to allow flushing again
1570 *
1571 * @param pVM VM Handle.
1572 * @param pPage PGM pool page
1573 */
1574DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1575{
1576 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1577 Assert(pPage->cLocked);
1578 ASMAtomicDecU32(&pPage->cLocked);
1579}
1580
1581
1582/**
1583 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1584 *
1585 * @returns VBox status code.
1586 * @param pPage PGM pool page
1587 */
1588DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
1589{
1590 if (pPage->cLocked)
1591 {
1592 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1593 if (pPage->cModifications)
1594 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1595 return true;
1596 }
1597 return false;
1598}
1599
1600
1601/**
1602 * Tells if mappings are to be put into the shadow page table or not.
1603 *
1604 * @returns boolean result
1605 * @param pVM VM handle.
1606 */
1607DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
1608{
1609#ifdef PGM_WITHOUT_MAPPINGS
1610 /* There are no mappings in VT-x and AMD-V mode. */
1611 Assert(pPGM->fMappingsDisabled);
1612 return false;
1613#else
1614 return !pPGM->fMappingsDisabled;
1615#endif
1616}
1617
1618
1619/**
1620 * Checks if the mappings are floating and enabled.
1621 *
1622 * @returns true / false.
1623 * @param pVM The VM handle.
1624 */
1625DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PPGM pPGM)
1626{
1627#ifdef PGM_WITHOUT_MAPPINGS
1628 /* There are no mappings in VT-x and AMD-V mode. */
1629 Assert(pPGM->fMappingsDisabled);
1630 return false;
1631#else
1632 return !pPGM->fMappingsDisabled
1633 && !pPGM->fMappingsFixed;
1634#endif
1635}
1636
1637/** @} */
1638
1639#endif
1640
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette