VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 36054

Last change on this file since 36054 was 36009, checked in by vboxsync, 14 years ago

PGM: Fixed large pages and write monitoring (live snapshot). Added checks for PGM_PAGE_PDE_TYPE_PDE_DISABLED in a few places where only PGM_PAGE_PDE_TYPE_PDE was checked for (might have missed some).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.9 KB
Line 
1/* $Id: PGMInline.h 36009 2011-02-17 10:15:02Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInline_h
19#define ___PGMInline_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/param.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/pdmcritsect.h>
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/dis.h>
31#include <VBox/vmm/dbgf.h>
32#include <VBox/log.h>
33#include <VBox/vmm/gmm.h>
34#include <VBox/vmm/hwaccm.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/avl.h>
38#include <iprt/critsect.h>
39#include <iprt/sha.h>
40
41
42
43/** @addtogroup grp_pgm_int Internals
44 * @internal
45 * @{
46 */
47
48/** @todo Split out all the inline stuff into a separate file. Then we can
49 * include it later when VM and VMCPU are defined and so avoid all that
50 * &pVM->pgm.s and &pVCpu->pgm.s stuff. It also chops ~1600 lines off
51 * this file and will make it somewhat easier to navigate... */
52
53/**
54 * Gets the PGMRAMRANGE structure for a guest page.
55 *
56 * @returns Pointer to the RAM range on success.
57 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
58 *
59 * @param pPGM PGM handle.
60 * @param GCPhys The GC physical address.
61 */
62DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
63{
64 /*
65 * Optimize for the first range.
66 */
67 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
68 RTGCPHYS off = GCPhys - pRam->GCPhys;
69 if (RT_UNLIKELY(off >= pRam->cb))
70 {
71 do
72 {
73 pRam = pRam->CTX_SUFF(pNext);
74 if (RT_UNLIKELY(!pRam))
75 break;
76 off = GCPhys - pRam->GCPhys;
77 } while (off >= pRam->cb);
78 }
79 return pRam;
80}
81
82
83/**
84 * Gets the PGMPAGE structure for a guest page.
85 *
86 * @returns Pointer to the page on success.
87 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
88 *
89 * @param pPGM PGM handle.
90 * @param GCPhys The GC physical address.
91 */
92DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
93{
94 /*
95 * Optimize for the first range.
96 */
97 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
98 RTGCPHYS off = GCPhys - pRam->GCPhys;
99 if (RT_UNLIKELY(off >= pRam->cb))
100 {
101 do
102 {
103 pRam = pRam->CTX_SUFF(pNext);
104 if (RT_UNLIKELY(!pRam))
105 return NULL;
106 off = GCPhys - pRam->GCPhys;
107 } while (off >= pRam->cb);
108 }
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pPGM PGM handle.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 /*
129 * Optimize for the first range.
130 */
131 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
132 RTGCPHYS off = GCPhys - pRam->GCPhys;
133 if (RT_UNLIKELY(off >= pRam->cb))
134 {
135 do
136 {
137 pRam = pRam->CTX_SUFF(pNext);
138 if (RT_UNLIKELY(!pRam))
139 {
140 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
141 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
142 }
143 off = GCPhys - pRam->GCPhys;
144 } while (off >= pRam->cb);
145 }
146 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
147 return VINF_SUCCESS;
148}
149
150
151
152
153/**
154 * Gets the PGMPAGE structure for a guest page.
155 *
156 * Old Phys code: Will make sure the page is present.
157 *
158 * @returns VBox status code.
159 * @retval VINF_SUCCESS and a valid *ppPage on success.
160 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
161 *
162 * @param pPGM PGM handle.
163 * @param GCPhys The GC physical address.
164 * @param ppPage Where to store the page pointer on success.
165 * @param ppRamHint Where to read and store the ram list hint.
166 * The caller initializes this to NULL before the call.
167 */
168DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
169{
170 RTGCPHYS off;
171 PPGMRAMRANGE pRam = *ppRamHint;
172 if ( !pRam
173 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
174 {
175 pRam = pPGM->CTX_SUFF(pRamRanges);
176 off = GCPhys - pRam->GCPhys;
177 if (RT_UNLIKELY(off >= pRam->cb))
178 {
179 do
180 {
181 pRam = pRam->CTX_SUFF(pNext);
182 if (RT_UNLIKELY(!pRam))
183 {
184 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
185 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
186 }
187 off = GCPhys - pRam->GCPhys;
188 } while (off >= pRam->cb);
189 }
190 *ppRamHint = pRam;
191 }
192 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
193 return VINF_SUCCESS;
194}
195
196
197/**
198 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
199 *
200 * @returns Pointer to the page on success.
201 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
202 *
203 * @param pPGM PGM handle.
204 * @param GCPhys The GC physical address.
205 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
206 */
207DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
208{
209 /*
210 * Optimize for the first range.
211 */
212 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
213 RTGCPHYS off = GCPhys - pRam->GCPhys;
214 if (RT_UNLIKELY(off >= pRam->cb))
215 {
216 do
217 {
218 pRam = pRam->CTX_SUFF(pNext);
219 if (RT_UNLIKELY(!pRam))
220 return NULL;
221 off = GCPhys - pRam->GCPhys;
222 } while (off >= pRam->cb);
223 }
224 *ppRam = pRam;
225 return &pRam->aPages[off >> PAGE_SHIFT];
226}
227
228
229/**
230 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
231 *
232 * @returns Pointer to the page on success.
233 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
234 *
235 * @param pPGM PGM handle.
236 * @param GCPhys The GC physical address.
237 * @param ppPage Where to store the pointer to the PGMPAGE structure.
238 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
239 */
240DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
241{
242 /*
243 * Optimize for the first range.
244 */
245 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
246 RTGCPHYS off = GCPhys - pRam->GCPhys;
247 if (RT_UNLIKELY(off >= pRam->cb))
248 {
249 do
250 {
251 pRam = pRam->CTX_SUFF(pNext);
252 if (RT_UNLIKELY(!pRam))
253 {
254 *ppRam = NULL; /* Shut up silly GCC warnings. */
255 *ppPage = NULL; /* ditto */
256 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
257 }
258 off = GCPhys - pRam->GCPhys;
259 } while (off >= pRam->cb);
260 }
261 *ppRam = pRam;
262 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
263 return VINF_SUCCESS;
264}
265
266
267/**
268 * Convert GC Phys to HC Phys.
269 *
270 * @returns VBox status.
271 * @param pPGM PGM handle.
272 * @param GCPhys The GC physical address.
273 * @param pHCPhys Where to store the corresponding HC physical address.
274 *
275 * @deprecated Doesn't deal with zero, shared or write monitored pages.
276 * Avoid when writing new code!
277 */
278DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
279{
280 PPGMPAGE pPage;
281 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
282 if (RT_FAILURE(rc))
283 return rc;
284 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
285 return VINF_SUCCESS;
286}
287
288#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
289
290/**
291 * Inlined version of the ring-0 version of the host page mapping code
292 * that optimizes access to pages already in the set.
293 *
294 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
295 * @param pVCpu The current CPU.
296 * @param HCPhys The physical address of the page.
297 * @param ppv Where to store the mapping address.
298 */
299DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
300{
301 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
302
303 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
304 Assert(!(HCPhys & PAGE_OFFSET_MASK));
305 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
306
307 unsigned iHash = PGMMAPSET_HASH(HCPhys);
308 unsigned iEntry = pSet->aiHashTable[iHash];
309 if ( iEntry < pSet->cEntries
310 && pSet->aEntries[iEntry].HCPhys == HCPhys
311 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
312 {
313 pSet->aEntries[iEntry].cInlinedRefs++;
314 *ppv = pSet->aEntries[iEntry].pvPage;
315 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
316 }
317 else
318 {
319 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
320 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
321 }
322
323 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
324 return VINF_SUCCESS;
325}
326
327
328/**
329 * Inlined version of the guest page mapping code that optimizes access to pages
330 * already in the set.
331 *
332 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
333 * @param pVM The VM handle.
334 * @param pVCpu The current CPU.
335 * @param GCPhys The guest physical address of the page.
336 * @param ppv Where to store the mapping address.
337 */
338DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
339{
340 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
341 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
342
343 /*
344 * Get the ram range.
345 */
346 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
347 RTGCPHYS off = GCPhys - pRam->GCPhys;
348 if (RT_UNLIKELY(off >= pRam->cb
349 /** @todo || page state stuff */))
350 {
351 /* This case is not counted into StatRZDynMapGCPageInl. */
352 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
353 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
354 }
355
356 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
357 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
358
359 /*
360 * pgmRZDynMapHCPageInlined with out stats.
361 */
362 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
363 Assert(!(HCPhys & PAGE_OFFSET_MASK));
364 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
365
366 unsigned iHash = PGMMAPSET_HASH(HCPhys);
367 unsigned iEntry = pSet->aiHashTable[iHash];
368 if ( iEntry < pSet->cEntries
369 && pSet->aEntries[iEntry].HCPhys == HCPhys
370 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
371 {
372 pSet->aEntries[iEntry].cInlinedRefs++;
373 *ppv = pSet->aEntries[iEntry].pvPage;
374 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
375 }
376 else
377 {
378 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
379 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
380 }
381
382 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Inlined version of the ring-0 version of guest page mapping that optimizes
389 * access to pages already in the set.
390 *
391 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
392 * @param pVCpu The current CPU.
393 * @param GCPhys The guest physical address of the page.
394 * @param ppv Where to store the mapping address.
395 */
396DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
397{
398 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
399}
400
401
402/**
403 * Inlined version of the ring-0 version of the guest byte mapping code
404 * that optimizes access to pages already in the set.
405 *
406 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
407 * @param pVCpu The current CPU.
408 * @param HCPhys The physical address of the page.
409 * @param ppv Where to store the mapping address. The offset is
410 * preserved.
411 */
412DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
413{
414 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
415
416 /*
417 * Get the ram range.
418 */
419 PVM pVM = pVCpu->CTX_SUFF(pVM);
420 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
421 RTGCPHYS off = GCPhys - pRam->GCPhys;
422 if (RT_UNLIKELY(off >= pRam->cb
423 /** @todo || page state stuff */))
424 {
425 /* This case is not counted into StatRZDynMapGCPageInl. */
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
427 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
428 }
429
430 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
431 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
432
433 /*
434 * pgmRZDynMapHCPageInlined with out stats.
435 */
436 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
437 Assert(!(HCPhys & PAGE_OFFSET_MASK));
438 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
439
440 unsigned iHash = PGMMAPSET_HASH(HCPhys);
441 unsigned iEntry = pSet->aiHashTable[iHash];
442 if ( iEntry < pSet->cEntries
443 && pSet->aEntries[iEntry].HCPhys == HCPhys
444 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
445 {
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
447 pSet->aEntries[iEntry].cInlinedRefs++;
448 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
449 }
450 else
451 {
452 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
453 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
454 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
455 }
456
457 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
458 return VINF_SUCCESS;
459}
460
461#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
462#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
463
464/**
465 * Maps the page into current context (RC and maybe R0).
466 *
467 * @returns pointer to the mapping.
468 * @param pVM Pointer to the PGM instance data.
469 * @param pPage The page.
470 */
471DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
472{
473 if (pPage->idx >= PGMPOOL_IDX_FIRST)
474 {
475 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
476 void *pv;
477 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
478 return pv;
479 }
480 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
481}
482
483/**
484 * Maps the page into current context (RC and maybe R0).
485 *
486 * @returns pointer to the mapping.
487 * @param pVM Pointer to the PGM instance data.
488 * @param pVCpu The current CPU.
489 * @param pPage The page.
490 */
491DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
492{
493 if (pPage->idx >= PGMPOOL_IDX_FIRST)
494 {
495 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
496 void *pv;
497 Assert(pVCpu == VMMGetCpu(pVM));
498 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
499 return pv;
500 }
501 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
502}
503
504#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
505#ifndef IN_RC
506
507/**
508 * Queries the Physical TLB entry for a physical guest page,
509 * attempting to load the TLB entry if necessary.
510 *
511 * @returns VBox status code.
512 * @retval VINF_SUCCESS on success
513 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
514 *
515 * @param pPGM The PGM instance handle.
516 * @param GCPhys The address of the guest page.
517 * @param ppTlbe Where to store the pointer to the TLB entry.
518 */
519DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
520{
521 int rc;
522 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
523 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
524 {
525 STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
526 rc = VINF_SUCCESS;
527 }
528 else
529 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
530 *ppTlbe = pTlbe;
531 return rc;
532}
533
534
535/**
536 * Queries the Physical TLB entry for a physical guest page,
537 * attempting to load the TLB entry if necessary.
538 *
539 * @returns VBox status code.
540 * @retval VINF_SUCCESS on success
541 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
542 *
543 * @param pPGM The PGM instance handle.
544 * @param pPage Pointer to the PGMPAGE structure corresponding to
545 * GCPhys.
546 * @param GCPhys The address of the guest page.
547 * @param ppTlbe Where to store the pointer to the TLB entry.
548 */
549DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
550{
551 int rc;
552 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
553 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
554 {
555 STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
556 rc = VINF_SUCCESS;
557 }
558 else
559 rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
560 *ppTlbe = pTlbe;
561 return rc;
562}
563
564#endif /* !IN_RC */
565
566
567/**
568 * Enables write monitoring for an allocated page.
569 *
570 * The caller is responsible for updating the shadow page tables.
571 *
572 * @param pVM The VM handle.
573 * @param pPage The page to write monitor.
574 * @param GCPhysPage The address of the page.
575 */
576DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
577{
578 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
579 Assert(PGMIsLockOwner(pVM));
580
581 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_WRITE_MONITORED);
582 pVM->pgm.s.cMonitoredPages++;
583
584 /* Large pages must disabled. */
585 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
586 {
587 PPGMPAGE pFirstPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
588 AssertFatal(pFirstPage);
589 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
590 {
591 PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
592 pVM->pgm.s.cLargePagesDisabled++;
593 }
594 else
595 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
596 }
597}
598
599
600/**
601 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
602 *
603 * Only used when the guest is in PAE or long mode. This is inlined so that we
604 * can perform consistency checks in debug builds.
605 *
606 * @returns true if it is, false if it isn't.
607 * @param pVCpu The current CPU.
608 */
609DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPU pVCpu)
610{
611 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
612 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
613 return pVCpu->pgm.s.fNoExecuteEnabled;
614}
615
616
617/**
618 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
619 *
620 * Only used when the guest is in paged 32-bit mode. This is inlined so that
621 * we can perform consistency checks in debug builds.
622 *
623 * @returns true if it is, false if it isn't.
624 * @param pVCpu The current CPU.
625 */
626DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPU pVCpu)
627{
628 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
629 Assert(!CPUMIsGuestInPAEMode(pVCpu));
630 Assert(!CPUMIsGuestInLongMode(pVCpu));
631 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
632}
633
634
635/**
636 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
637 * Takes PSE-36 into account.
638 *
639 * @returns guest physical address
640 * @param pPGM Pointer to the PGM instance data.
641 * @param Pde Guest Pde
642 */
643DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
644{
645 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
646 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
647
648 return GCPhys & pPGM->GCPhys4MBPSEMask;
649}
650
651
652/**
653 * Gets the address the guest page directory (32-bit paging).
654 *
655 * @returns VBox status code.
656 * @param pVCpu The current CPU.
657 * @param ppPd Where to return the mapping. This is always set.
658 */
659DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
660{
661#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
662 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
663 if (RT_FAILURE(rc))
664 {
665 *ppPd = NULL;
666 return rc;
667 }
668#else
669 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
670 if (RT_UNLIKELY(!*ppPd))
671 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
672#endif
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Gets the address the guest page directory (32-bit paging).
679 *
680 * @returns Pointer the page directory entry in question.
681 * @param pVCpu The current CPU.
682 */
683DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
684{
685#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
686 PX86PD pGuestPD = NULL;
687 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
688 if (RT_FAILURE(rc))
689 {
690 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
691 return NULL;
692 }
693#else
694 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
695 if (RT_UNLIKELY(!pGuestPD))
696 {
697 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
698 if (RT_FAILURE(rc))
699 return NULL;
700 }
701#endif
702 return pGuestPD;
703}
704
705
706/**
707 * Gets the guest page directory pointer table.
708 *
709 * @returns VBox status code.
710 * @param pVCpu The current CPU.
711 * @param ppPdpt Where to return the mapping. This is always set.
712 */
713DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
714{
715#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
716 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
717 if (RT_FAILURE(rc))
718 {
719 *ppPdpt = NULL;
720 return rc;
721 }
722#else
723 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
724 if (RT_UNLIKELY(!*ppPdpt))
725 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
726#endif
727 return VINF_SUCCESS;
728}
729
730/**
731 * Gets the guest page directory pointer table.
732 *
733 * @returns Pointer to the page directory in question.
734 * @returns NULL if the page directory is not present or on an invalid page.
735 * @param pVCpu The current CPU.
736 */
737DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
738{
739 PX86PDPT pGuestPdpt;
740 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
741 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
742 return pGuestPdpt;
743}
744
745
746/**
747 * Gets the guest page directory pointer table entry for the specified address.
748 *
749 * @returns Pointer to the page directory in question.
750 * @returns NULL if the page directory is not present or on an invalid page.
751 * @param pVCpu The current CPU
752 * @param GCPtr The address.
753 */
754DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
755{
756 AssertGCPtr32(GCPtr);
757
758#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
759 PX86PDPT pGuestPDPT = NULL;
760 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
761 AssertRCReturn(rc, NULL);
762#else
763 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
764 if (RT_UNLIKELY(!pGuestPDPT))
765 {
766 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
767 if (RT_FAILURE(rc))
768 return NULL;
769 }
770#endif
771 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
772}
773
774
775/**
776 * Gets the page directory entry for the specified address.
777 *
778 * @returns The page directory entry in question.
779 * @returns A non-present entry if the page directory is not present or on an invalid page.
780 * @param pVCpu The handle of the virtual CPU.
781 * @param GCPtr The address.
782 */
783DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
784{
785 AssertGCPtr32(GCPtr);
786 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
787 if (RT_LIKELY(pGuestPDPT))
788 {
789 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
790 if ( pGuestPDPT->a[iPdpt].n.u1Present
791 && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
792 {
793 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
794#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
795 PX86PDPAE pGuestPD = NULL;
796 int rc = pgmRZDynMapGCPageInlined(pVCpu,
797 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
798 (void **)&pGuestPD
799 RTLOG_COMMA_SRC_POS);
800 if (RT_SUCCESS(rc))
801 return pGuestPD->a[iPD];
802 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
803#else
804 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
805 if ( !pGuestPD
806 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
807 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
808 if (pGuestPD)
809 return pGuestPD->a[iPD];
810#endif
811 }
812 }
813
814 X86PDEPAE ZeroPde = {0};
815 return ZeroPde;
816}
817
818
819/**
820 * Gets the page directory pointer table entry for the specified address
821 * and returns the index into the page directory
822 *
823 * @returns Pointer to the page directory in question.
824 * @returns NULL if the page directory is not present or on an invalid page.
825 * @param pVCpu The current CPU.
826 * @param GCPtr The address.
827 * @param piPD Receives the index into the returned page directory
828 * @param pPdpe Receives the page directory pointer entry. Optional.
829 */
830DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
831{
832 AssertGCPtr32(GCPtr);
833
834 /* The PDPE. */
835 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
836 if (RT_UNLIKELY(!pGuestPDPT))
837 return NULL;
838 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
839 if (pPdpe)
840 *pPdpe = pGuestPDPT->a[iPdpt];
841 if (!pGuestPDPT->a[iPdpt].n.u1Present)
842 return NULL;
843 if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
844 return NULL;
845
846 /* The PDE. */
847#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
848 PX86PDPAE pGuestPD = NULL;
849 int rc = pgmRZDynMapGCPageInlined(pVCpu,
850 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
851 (void **)&pGuestPD
852 RTLOG_COMMA_SRC_POS);
853 if (RT_FAILURE(rc))
854 {
855 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
856 return NULL;
857 }
858#else
859 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
860 if ( !pGuestPD
861 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
862 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
863#endif
864
865 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
866 return pGuestPD;
867}
868
869#ifndef IN_RC
870
871/**
872 * Gets the page map level-4 pointer for the guest.
873 *
874 * @returns VBox status code.
875 * @param pVCpu The current CPU.
876 * @param ppPml4 Where to return the mapping. Always set.
877 */
878DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
879{
880#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
881 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
882 if (RT_FAILURE(rc))
883 {
884 *ppPml4 = NULL;
885 return rc;
886 }
887#else
888 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
889 if (RT_UNLIKELY(!*ppPml4))
890 return pgmGstLazyMapPml4(pVCpu, ppPml4);
891#endif
892 return VINF_SUCCESS;
893}
894
895
896/**
897 * Gets the page map level-4 pointer for the guest.
898 *
899 * @returns Pointer to the PML4 page.
900 * @param pVCpu The current CPU.
901 */
902DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
903{
904 PX86PML4 pGuestPml4;
905 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
906 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
907 return pGuestPml4;
908}
909
910
911/**
912 * Gets the pointer to a page map level-4 entry.
913 *
914 * @returns Pointer to the PML4 entry.
915 * @param pVCpu The current CPU.
916 * @param iPml4 The index.
917 * @remarks Only used by AssertCR3.
918 */
919DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
920{
921#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
922 PX86PML4 pGuestPml4;
923 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
924 AssertRCReturn(rc, NULL);
925#else
926 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
927 if (RT_UNLIKELY(!pGuestPml4))
928 {
929 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
930 AssertRCReturn(rc, NULL);
931 }
932#endif
933 return &pGuestPml4->a[iPml4];
934}
935
936
937/**
938 * Gets the page directory entry for the specified address.
939 *
940 * @returns The page directory entry in question.
941 * @returns A non-present entry if the page directory is not present or on an invalid page.
942 * @param pVCpu The current CPU.
943 * @param GCPtr The address.
944 */
945DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
946{
947 /*
948 * Note! To keep things simple, ASSUME invalid physical addresses will
949 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
950 * supporting 52-bit wide physical guest addresses.
951 */
952 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
953 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
954 if ( RT_LIKELY(pGuestPml4)
955 && pGuestPml4->a[iPml4].n.u1Present
956 && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
957 {
958 PCX86PDPT pPdptTemp;
959 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
960 if (RT_SUCCESS(rc))
961 {
962 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
963 if ( pPdptTemp->a[iPdpt].n.u1Present
964 && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
965 {
966 PCX86PDPAE pPD;
967 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
968 if (RT_SUCCESS(rc))
969 {
970 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
971 return pPD->a[iPD];
972 }
973 }
974 }
975 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
976 }
977
978 X86PDEPAE ZeroPde = {0};
979 return ZeroPde;
980}
981
982
983/**
984 * Gets the GUEST page directory pointer for the specified address.
985 *
986 * @returns The page directory in question.
987 * @returns NULL if the page directory is not present or on an invalid page.
988 * @param pVCpu The current CPU.
989 * @param GCPtr The address.
990 * @param ppPml4e Page Map Level-4 Entry (out)
991 * @param pPdpe Page directory pointer table entry (out)
992 * @param piPD Receives the index into the returned page directory
993 */
994DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
995{
996 /* The PMLE4. */
997 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
998 if (RT_UNLIKELY(!pGuestPml4))
999 return NULL;
1000 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1001 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
1002 if (!pPml4e->n.u1Present)
1003 return NULL;
1004 if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
1005 return NULL;
1006
1007 /* The PDPE. */
1008 PCX86PDPT pPdptTemp;
1009 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1010 if (RT_FAILURE(rc))
1011 {
1012 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1013 return NULL;
1014 }
1015 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1016 *pPdpe = pPdptTemp->a[iPdpt];
1017 if (!pPdpe->n.u1Present)
1018 return NULL;
1019 if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
1020 return NULL;
1021
1022 /* The PDE. */
1023 PX86PDPAE pPD;
1024 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1025 if (RT_FAILURE(rc))
1026 {
1027 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1028 return NULL;
1029 }
1030
1031 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1032 return pPD;
1033}
1034
1035#endif /* !IN_RC */
1036
1037/**
1038 * Gets the shadow page directory, 32-bit.
1039 *
1040 * @returns Pointer to the shadow 32-bit PD.
1041 * @param pVCpu The current CPU.
1042 */
1043DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPU pVCpu)
1044{
1045 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1046}
1047
1048
1049/**
1050 * Gets the shadow page directory entry for the specified address, 32-bit.
1051 *
1052 * @returns Shadow 32-bit PDE.
1053 * @param pVCpu The current CPU.
1054 * @param GCPtr The address.
1055 */
1056DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1057{
1058 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1059
1060 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
1061 if (!pShwPde)
1062 {
1063 X86PDE ZeroPde = {0};
1064 return ZeroPde;
1065 }
1066 return pShwPde->a[iPd];
1067}
1068
1069
1070/**
1071 * Gets the pointer to the shadow page directory entry for the specified
1072 * address, 32-bit.
1073 *
1074 * @returns Pointer to the shadow 32-bit PDE.
1075 * @param pVCpu The current CPU.
1076 * @param GCPtr The address.
1077 */
1078DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1079{
1080 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1081
1082 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1083 AssertReturn(pPde, NULL);
1084 return &pPde->a[iPd];
1085}
1086
1087
1088/**
1089 * Gets the shadow page pointer table, PAE.
1090 *
1091 * @returns Pointer to the shadow PAE PDPT.
1092 * @param pVCpu The current CPU.
1093 */
1094DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPU pVCpu)
1095{
1096 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1097}
1098
1099
1100/**
1101 * Gets the shadow page directory for the specified address, PAE.
1102 *
1103 * @returns Pointer to the shadow PD.
1104 * @param pVCpu The current CPU.
1105 * @param GCPtr The address.
1106 */
1107DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1108{
1109 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1110 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1111
1112 if (!pPdpt->a[iPdpt].n.u1Present)
1113 return NULL;
1114
1115 /* Fetch the pgm pool shadow descriptor. */
1116 PVM pVM = pVCpu->CTX_SUFF(pVM);
1117 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1118 AssertReturn(pShwPde, NULL);
1119
1120 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1121}
1122
1123
1124/**
1125 * Gets the shadow page directory for the specified address, PAE.
1126 *
1127 * @returns Pointer to the shadow PD.
1128 * @param pVCpu The current CPU.
1129 * @param GCPtr The address.
1130 */
1131DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1132{
1133 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1134
1135 if (!pPdpt->a[iPdpt].n.u1Present)
1136 return NULL;
1137
1138 /* Fetch the pgm pool shadow descriptor. */
1139 PVM pVM = pVCpu->CTX_SUFF(pVM);
1140 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1141 AssertReturn(pShwPde, NULL);
1142
1143 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1144}
1145
1146
1147/**
1148 * Gets the shadow page directory entry, PAE.
1149 *
1150 * @returns PDE.
1151 * @param pVCpu The current CPU.
1152 * @param GCPtr The address.
1153 */
1154DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1155{
1156 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1157
1158 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1159 if (!pShwPde)
1160 {
1161 X86PDEPAE ZeroPde = {0};
1162 return ZeroPde;
1163 }
1164 return pShwPde->a[iPd];
1165}
1166
1167
1168/**
1169 * Gets the pointer to the shadow page directory entry for an address, PAE.
1170 *
1171 * @returns Pointer to the PDE.
1172 * @param pVCpu The current CPU.
1173 * @param GCPtr The address.
1174 * @remarks Only used by AssertCR3.
1175 */
1176DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1177{
1178 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1179
1180 PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1181 AssertReturn(pPde, NULL);
1182 return &pPde->a[iPd];
1183}
1184
1185#ifndef IN_RC
1186
1187/**
1188 * Gets the shadow page map level-4 pointer.
1189 *
1190 * @returns Pointer to the shadow PML4.
1191 * @param pVCpu The current CPU.
1192 */
1193DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPU pVCpu)
1194{
1195 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1196}
1197
1198
1199/**
1200 * Gets the shadow page map level-4 entry for the specified address.
1201 *
1202 * @returns The entry.
1203 * @param pVCpu The current CPU.
1204 * @param GCPtr The address.
1205 */
1206DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPU pVCpu, RTGCPTR GCPtr)
1207{
1208 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1209 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1210
1211 if (!pShwPml4)
1212 {
1213 X86PML4E ZeroPml4e = {0};
1214 return ZeroPml4e;
1215 }
1216 return pShwPml4->a[iPml4];
1217}
1218
1219
1220/**
1221 * Gets the pointer to the specified shadow page map level-4 entry.
1222 *
1223 * @returns The entry.
1224 * @param pVCpu The current CPU.
1225 * @param iPml4 The PML4 index.
1226 */
1227DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
1228{
1229 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1230 if (!pShwPml4)
1231 return NULL;
1232 return &pShwPml4->a[iPml4];
1233}
1234
1235#endif /* !IN_RC */
1236
1237
1238/**
1239 * Cached physical handler lookup.
1240 *
1241 * @returns Physical handler covering @a GCPhys.
1242 * @param pVM The VM handle.
1243 * @param GCPhys The lookup address.
1244 */
1245DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVM pVM, RTGCPHYS GCPhys)
1246{
1247 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1248 if ( pHandler
1249 && GCPhys >= pHandler->Core.Key
1250 && GCPhys < pHandler->Core.KeyLast)
1251 {
1252 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1253 return pHandler;
1254 }
1255
1256 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1257 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1258 if (pHandler)
1259 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1260 return pHandler;
1261}
1262
1263
1264/**
1265 * Gets the page state for a physical handler.
1266 *
1267 * @returns The physical handler page state.
1268 * @param pCur The physical handler in question.
1269 */
1270DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
1271{
1272 switch (pCur->enmType)
1273 {
1274 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
1275 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
1276
1277 case PGMPHYSHANDLERTYPE_MMIO:
1278 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
1279 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
1280
1281 default:
1282 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1283 }
1284}
1285
1286
1287/**
1288 * Gets the page state for a virtual handler.
1289 *
1290 * @returns The virtual handler page state.
1291 * @param pCur The virtual handler in question.
1292 * @remarks This should never be used on a hypervisor access handler.
1293 */
1294DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
1295{
1296 switch (pCur->enmType)
1297 {
1298 case PGMVIRTHANDLERTYPE_WRITE:
1299 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
1300 case PGMVIRTHANDLERTYPE_ALL:
1301 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
1302 default:
1303 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1304 }
1305}
1306
1307
1308/**
1309 * Clears one physical page of a virtual handler
1310 *
1311 * @param pPGM Pointer to the PGM instance.
1312 * @param pCur Virtual handler structure
1313 * @param iPage Physical page index
1314 *
1315 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1316 * need to care about other handlers in the same page.
1317 */
1318DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
1319{
1320 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1321
1322 /*
1323 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1324 */
1325#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1326 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1327 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1328 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1329#endif
1330 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1331 {
1332 /* We're the head of the alias chain. */
1333 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1334#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1335 AssertReleaseMsg(pRemove != NULL,
1336 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1337 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1338 AssertReleaseMsg(pRemove == pPhys2Virt,
1339 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1340 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1341 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1342 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1343#endif
1344 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1345 {
1346 /* Insert the next list in the alias chain into the tree. */
1347 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1348#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1349 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1350 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1351 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1352#endif
1353 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1354 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1355 AssertRelease(fRc);
1356 }
1357 }
1358 else
1359 {
1360 /* Locate the previous node in the alias chain. */
1361 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1362#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1363 AssertReleaseMsg(pPrev != pPhys2Virt,
1364 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1365 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1366#endif
1367 for (;;)
1368 {
1369 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1370 if (pNext == pPhys2Virt)
1371 {
1372 /* unlink. */
1373 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1374 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1375 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1376 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1377 else
1378 {
1379 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1380 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1381 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1382 }
1383 break;
1384 }
1385
1386 /* next */
1387 if (pNext == pPrev)
1388 {
1389#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1390 AssertReleaseMsg(pNext != pPrev,
1391 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1392 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1393#endif
1394 break;
1395 }
1396 pPrev = pNext;
1397 }
1398 }
1399 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1400 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1401 pPhys2Virt->offNextAlias = 0;
1402 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1403
1404 /*
1405 * Clear the ram flags for this page.
1406 */
1407 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
1408 AssertReturnVoid(pPage);
1409 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1410}
1411
1412
1413/**
1414 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1415 *
1416 * @returns Pointer to the shadow page structure.
1417 * @param pPool The pool.
1418 * @param idx The pool page index.
1419 */
1420DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1421{
1422 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1423 return &pPool->aPages[idx];
1424}
1425
1426
1427/**
1428 * Clear references to guest physical memory.
1429 *
1430 * @param pPool The pool.
1431 * @param pPoolPage The pool page.
1432 * @param pPhysPage The physical guest page tracking structure.
1433 * @param iPte Shadow PTE index
1434 */
1435DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1436{
1437 /*
1438 * Just deal with the simple case here.
1439 */
1440# ifdef LOG_ENABLED
1441 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1442# endif
1443 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1444 if (cRefs == 1)
1445 {
1446 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1447 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1448 /* Invalidate the tracking data. */
1449 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
1450 }
1451 else
1452 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1453 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1454}
1455
1456
1457/**
1458 * Moves the page to the head of the age list.
1459 *
1460 * This is done when the cached page is used in one way or another.
1461 *
1462 * @param pPool The pool.
1463 * @param pPage The cached page.
1464 */
1465DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1466{
1467 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1468
1469 /*
1470 * Move to the head of the age list.
1471 */
1472 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1473 {
1474 /* unlink */
1475 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1476 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1477 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1478 else
1479 pPool->iAgeTail = pPage->iAgePrev;
1480
1481 /* insert at head */
1482 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1483 pPage->iAgeNext = pPool->iAgeHead;
1484 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1485 pPool->iAgeHead = pPage->idx;
1486 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1487 }
1488}
1489
1490/**
1491 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1492 *
1493 * @param pVM VM Handle.
1494 * @param pPage PGM pool page
1495 */
1496DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1497{
1498 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1499 ASMAtomicIncU32(&pPage->cLocked);
1500}
1501
1502
1503/**
1504 * Unlocks a page to allow flushing again
1505 *
1506 * @param pVM VM Handle.
1507 * @param pPage PGM pool page
1508 */
1509DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1510{
1511 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1512 Assert(pPage->cLocked);
1513 ASMAtomicDecU32(&pPage->cLocked);
1514}
1515
1516
1517/**
1518 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1519 *
1520 * @returns VBox status code.
1521 * @param pPage PGM pool page
1522 */
1523DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
1524{
1525 if (pPage->cLocked)
1526 {
1527 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1528 if (pPage->cModifications)
1529 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1530 return true;
1531 }
1532 return false;
1533}
1534
1535
1536/**
1537 * Tells if mappings are to be put into the shadow page table or not.
1538 *
1539 * @returns boolean result
1540 * @param pVM VM handle.
1541 */
1542DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
1543{
1544#ifdef PGM_WITHOUT_MAPPINGS
1545 /* There are no mappings in VT-x and AMD-V mode. */
1546 Assert(pPGM->fMappingsDisabled);
1547 return false;
1548#else
1549 return !pPGM->fMappingsDisabled;
1550#endif
1551}
1552
1553
1554/**
1555 * Checks if the mappings are floating and enabled.
1556 *
1557 * @returns true / false.
1558 * @param pVM The VM handle.
1559 */
1560DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PPGM pPGM)
1561{
1562#ifdef PGM_WITHOUT_MAPPINGS
1563 /* There are no mappings in VT-x and AMD-V mode. */
1564 Assert(pPGM->fMappingsDisabled);
1565 return false;
1566#else
1567 return !pPGM->fMappingsDisabled
1568 && !pPGM->fMappingsFixed;
1569#endif
1570}
1571
1572/** @} */
1573
1574#endif
1575
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette