VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 72816

Last change on this file since 72816 was 70977, checked in by vboxsync, 7 years ago

NEM: Working on PGM notifications. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.8 KB
Line 
1/* $Id: PGMInline.h 70977 2018-02-12 20:45:31Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInline_h
19#define ___PGMInline_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/param.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/pdmcritsect.h>
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/dis.h>
31#include <VBox/vmm/dbgf.h>
32#include <VBox/log.h>
33#include <VBox/vmm/gmm.h>
34#include <VBox/vmm/hm.h>
35#ifndef IN_RC
36# include <VBox/vmm/nem.h>
37#endif
38#include <iprt/asm.h>
39#include <iprt/assert.h>
40#include <iprt/avl.h>
41#include <iprt/critsect.h>
42#include <iprt/sha.h>
43
44
45
46/** @addtogroup grp_pgm_int Internals
47 * @internal
48 * @{
49 */
50
51/**
52 * Gets the PGMRAMRANGE structure for a guest page.
53 *
54 * @returns Pointer to the RAM range on success.
55 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
56 *
57 * @param pVM The cross context VM structure.
58 * @param GCPhys The GC physical address.
59 */
60DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVM pVM, RTGCPHYS GCPhys)
61{
62 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
63 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
64 return pgmPhysGetRangeSlow(pVM, GCPhys);
65 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
66 return pRam;
67}
68
69
70/**
71 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
72 * range above it.
73 *
74 * @returns Pointer to the RAM range on success.
75 * @returns NULL if the address is located after the last range.
76 *
77 * @param pVM The cross context VM structure.
78 * @param GCPhys The GC physical address.
79 */
80DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVM pVM, RTGCPHYS GCPhys)
81{
82 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
83 if ( !pRam
84 || (GCPhys - pRam->GCPhys) >= pRam->cb)
85 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
86 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
87 return pRam;
88}
89
90
91/**
92 * Gets the PGMPAGE structure for a guest page.
93 *
94 * @returns Pointer to the page on success.
95 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
96 *
97 * @param pVM The cross context VM structure.
98 * @param GCPhys The GC physical address.
99 */
100DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVM pVM, RTGCPHYS GCPhys)
101{
102 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
103 RTGCPHYS off;
104 if ( !pRam
105 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
106 return pgmPhysGetPageSlow(pVM, GCPhys);
107 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
108 return &pRam->aPages[off >> PAGE_SHIFT];
109}
110
111
112/**
113 * Gets the PGMPAGE structure for a guest page.
114 *
115 * Old Phys code: Will make sure the page is present.
116 *
117 * @returns VBox status code.
118 * @retval VINF_SUCCESS and a valid *ppPage on success.
119 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
120 *
121 * @param pVM The cross context VM structure.
122 * @param GCPhys The GC physical address.
123 * @param ppPage Where to store the page pointer on success.
124 */
125DECLINLINE(int) pgmPhysGetPageEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
126{
127 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
128 RTGCPHYS off;
129 if ( !pRam
130 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
131 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
132 *ppPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
133 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
134 return VINF_SUCCESS;
135}
136
137
138/**
139 * Gets the PGMPAGE structure for a guest page.
140 *
141 * Old Phys code: Will make sure the page is present.
142 *
143 * @returns VBox status code.
144 * @retval VINF_SUCCESS and a valid *ppPage on success.
145 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
146 *
147 * @param pVM The cross context VM structure.
148 * @param GCPhys The GC physical address.
149 * @param ppPage Where to store the page pointer on success.
150 * @param ppRamHint Where to read and store the ram list hint.
151 * The caller initializes this to NULL before the call.
152 */
153DECLINLINE(int) pgmPhysGetPageWithHintEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
154{
155 RTGCPHYS off;
156 PPGMRAMRANGE pRam = *ppRamHint;
157 if ( !pRam
158 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
159 {
160 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
161 if ( !pRam
162 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
163 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
164
165 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
166 *ppRamHint = pRam;
167 }
168 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
175 *
176 * @returns Pointer to the page on success.
177 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
178 *
179 * @param pVM The cross context VM structure.
180 * @param GCPhys The GC physical address.
181 * @param ppPage Where to store the pointer to the PGMPAGE structure.
182 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
183 */
184DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
185{
186 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
187 RTGCPHYS off;
188 if ( !pRam
189 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
190 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
191
192 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
193 *ppRam = pRam;
194 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
195 return VINF_SUCCESS;
196}
197
198
199/**
200 * Convert GC Phys to HC Phys.
201 *
202 * @returns VBox status code.
203 * @param pVM The cross context VM structure.
204 * @param GCPhys The GC physical address.
205 * @param pHCPhys Where to store the corresponding HC physical address.
206 *
207 * @deprecated Doesn't deal with zero, shared or write monitored pages.
208 * Avoid when writing new code!
209 */
210DECLINLINE(int) pgmRamGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
211{
212 PPGMPAGE pPage;
213 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
214 if (RT_FAILURE(rc))
215 return rc;
216 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
217 return VINF_SUCCESS;
218}
219
220#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
221
222/**
223 * Inlined version of the ring-0 version of the host page mapping code
224 * that optimizes access to pages already in the set.
225 *
226 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
227 * @param pVCpu The cross context virtual CPU structure.
228 * @param HCPhys The physical address of the page.
229 * @param ppv Where to store the mapping address.
230 * @param SRC_POS The source location of the caller.
231 */
232DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
233{
234 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
235
236 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
237 Assert(!(HCPhys & PAGE_OFFSET_MASK));
238 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
239
240 unsigned iHash = PGMMAPSET_HASH(HCPhys);
241 unsigned iEntry = pSet->aiHashTable[iHash];
242 if ( iEntry < pSet->cEntries
243 && pSet->aEntries[iEntry].HCPhys == HCPhys
244 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
245 {
246 pSet->aEntries[iEntry].cInlinedRefs++;
247 *ppv = pSet->aEntries[iEntry].pvPage;
248 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
249 }
250 else
251 {
252 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
253 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
254 }
255
256 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
257 return VINF_SUCCESS;
258}
259
260
261/**
262 * Inlined version of the guest page mapping code that optimizes access to pages
263 * already in the set.
264 *
265 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
266 * @param pVM The cross context VM structure.
267 * @param pVCpu The cross context virtual CPU structure.
268 * @param GCPhys The guest physical address of the page.
269 * @param ppv Where to store the mapping address.
270 * @param SRC_POS The source location of the caller.
271 */
272DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
273{
274 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
275 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
276
277 /*
278 * Get the ram range.
279 */
280 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
281 RTGCPHYS off;
282 if ( !pRam
283 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
284 /** @todo || page state stuff */
285 )
286 {
287 /* This case is not counted into StatRZDynMapGCPageInl. */
288 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
289 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
290 }
291
292 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
293 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
294
295 /*
296 * pgmRZDynMapHCPageInlined with out stats.
297 */
298 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
299 Assert(!(HCPhys & PAGE_OFFSET_MASK));
300 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
301
302 unsigned iHash = PGMMAPSET_HASH(HCPhys);
303 unsigned iEntry = pSet->aiHashTable[iHash];
304 if ( iEntry < pSet->cEntries
305 && pSet->aEntries[iEntry].HCPhys == HCPhys
306 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
307 {
308 pSet->aEntries[iEntry].cInlinedRefs++;
309 *ppv = pSet->aEntries[iEntry].pvPage;
310 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
311 }
312 else
313 {
314 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
315 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
316 }
317
318 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
319 return VINF_SUCCESS;
320}
321
322
323/**
324 * Inlined version of the ring-0 version of guest page mapping that optimizes
325 * access to pages already in the set.
326 *
327 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
328 * @param pVCpu The cross context virtual CPU structure.
329 * @param GCPhys The guest physical address of the page.
330 * @param ppv Where to store the mapping address.
331 * @param SRC_POS The source location of the caller.
332 */
333DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
334{
335 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
336}
337
338
339/**
340 * Inlined version of the ring-0 version of the guest byte mapping code
341 * that optimizes access to pages already in the set.
342 *
343 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
344 * @param pVCpu The cross context virtual CPU structure.
345 * @param GCPhys The guest physical address of the page.
346 * @param ppv Where to store the mapping address. The offset is
347 * preserved.
348 * @param SRC_POS The source location of the caller.
349 */
350DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
351{
352 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
353
354 /*
355 * Get the ram range.
356 */
357 PVM pVM = pVCpu->CTX_SUFF(pVM);
358 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
359 RTGCPHYS off;
360 if ( !pRam
361 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
362 /** @todo || page state stuff */
363 )
364 {
365 /* This case is not counted into StatRZDynMapGCPageInl. */
366 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
367 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
368 }
369
370 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
371 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
372
373 /*
374 * pgmRZDynMapHCPageInlined with out stats.
375 */
376 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
377 Assert(!(HCPhys & PAGE_OFFSET_MASK));
378 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
379
380 unsigned iHash = PGMMAPSET_HASH(HCPhys);
381 unsigned iEntry = pSet->aiHashTable[iHash];
382 if ( iEntry < pSet->cEntries
383 && pSet->aEntries[iEntry].HCPhys == HCPhys
384 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
385 {
386 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
387 pSet->aEntries[iEntry].cInlinedRefs++;
388 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
389 }
390 else
391 {
392 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
393 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
394 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
395 }
396
397 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
398 return VINF_SUCCESS;
399}
400
401#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
402#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
403
404/**
405 * Maps the page into current context (RC and maybe R0).
406 *
407 * @returns pointer to the mapping.
408 * @param pVM The cross context VM structure.
409 * @param pPage The page.
410 * @param SRC_POS The source location of the caller.
411 */
412DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
413{
414 if (pPage->idx >= PGMPOOL_IDX_FIRST)
415 {
416 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
417 void *pv;
418 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
419 return pv;
420 }
421 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
422}
423
424
425/**
426 * Maps the page into current context (RC and maybe R0).
427 *
428 * @returns pointer to the mapping.
429 * @param pVM The cross context VM structure.
430 * @param pVCpu The cross context virtual CPU structure.
431 * @param pPage The page.
432 * @param SRC_POS The source location of the caller.
433 */
434DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
435{
436 if (pPage->idx >= PGMPOOL_IDX_FIRST)
437 {
438 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
439 void *pv;
440 Assert(pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
441 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
442 return pv;
443 }
444 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
445}
446
447#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
448#ifndef IN_RC
449
450/**
451 * Queries the Physical TLB entry for a physical guest page,
452 * attempting to load the TLB entry if necessary.
453 *
454 * @returns VBox status code.
455 * @retval VINF_SUCCESS on success
456 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
457 *
458 * @param pVM The cross context VM structure.
459 * @param GCPhys The address of the guest page.
460 * @param ppTlbe Where to store the pointer to the TLB entry.
461 */
462DECLINLINE(int) pgmPhysPageQueryTlbe(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
463{
464 int rc;
465 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
466 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
467 {
468 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
469 rc = VINF_SUCCESS;
470 }
471 else
472 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
473 *ppTlbe = pTlbe;
474 return rc;
475}
476
477
478/**
479 * Queries the Physical TLB entry for a physical guest page,
480 * attempting to load the TLB entry if necessary.
481 *
482 * @returns VBox status code.
483 * @retval VINF_SUCCESS on success
484 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
485 *
486 * @param pVM The cross context VM structure.
487 * @param pPage Pointer to the PGMPAGE structure corresponding to
488 * GCPhys.
489 * @param GCPhys The address of the guest page.
490 * @param ppTlbe Where to store the pointer to the TLB entry.
491 */
492DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
493{
494 int rc;
495 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
496 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
497 {
498 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
499 rc = VINF_SUCCESS;
500# if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
501# ifdef IN_RING3
502 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)
503# else
504 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)
505# endif
506 pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);
507# endif
508 AssertPtr(pTlbe->pv);
509# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
510 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
511# endif
512 }
513 else
514 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
515 *ppTlbe = pTlbe;
516 return rc;
517}
518
519
520/**
521 * Calculates NEM page protection flags.
522 */
523DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
524{
525 /*
526 * Deal with potentially writable pages first.
527 */
528 if (PGMPAGETYPE_IS_RWX(enmType))
529 {
530 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
531 {
532 if (PGM_PAGE_IS_ALLOCATED(pPage))
533 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
534 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
535 }
536 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
537 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
538 }
539 /*
540 * Potentially readable & executable pages.
541 */
542 else if ( PGMPAGETYPE_IS_ROX(enmType)
543 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
544 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
545
546 /*
547 * The rest is needs special access handling.
548 */
549 return NEM_PAGE_PROT_NONE;
550}
551
552#endif /* !IN_RC */
553
554/**
555 * Enables write monitoring for an allocated page.
556 *
557 * The caller is responsible for updating the shadow page tables.
558 *
559 * @param pVM The cross context VM structure.
560 * @param pPage The page to write monitor.
561 * @param GCPhysPage The address of the page.
562 */
563DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
564{
565 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
566 PGM_LOCK_ASSERT_OWNER(pVM);
567
568 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
569 pVM->pgm.s.cMonitoredPages++;
570
571 /* Large pages must disabled. */
572 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
573 {
574 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
575 AssertFatal(pFirstPage);
576 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
577 {
578 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
579 pVM->pgm.s.cLargePagesDisabled++;
580 }
581 else
582 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
583 }
584
585#ifndef IN_RC
586 /* Tell NEM. */
587 if (VM_IS_NEM_ENABLED(pVM))
588 {
589 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
590 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
591 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
592 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
593 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
594 }
595#endif
596}
597
598
599/**
600 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
601 *
602 * Only used when the guest is in PAE or long mode. This is inlined so that we
603 * can perform consistency checks in debug builds.
604 *
605 * @returns true if it is, false if it isn't.
606 * @param pVCpu The cross context virtual CPU structure.
607 */
608DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPU pVCpu)
609{
610 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
611 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
612 return pVCpu->pgm.s.fNoExecuteEnabled;
613}
614
615
616/**
617 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
618 *
619 * Only used when the guest is in paged 32-bit mode. This is inlined so that
620 * we can perform consistency checks in debug builds.
621 *
622 * @returns true if it is, false if it isn't.
623 * @param pVCpu The cross context virtual CPU structure.
624 */
625DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPU pVCpu)
626{
627 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
628 Assert(!CPUMIsGuestInPAEMode(pVCpu));
629 Assert(!CPUMIsGuestInLongMode(pVCpu));
630 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
631}
632
633
634/**
635 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
636 * Takes PSE-36 into account.
637 *
638 * @returns guest physical address
639 * @param pVM The cross context VM structure.
640 * @param Pde Guest Pde
641 */
642DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVM pVM, X86PDE Pde)
643{
644 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
645 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
646
647 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
648}
649
650
651/**
652 * Gets the address the guest page directory (32-bit paging).
653 *
654 * @returns VBox status code.
655 * @param pVCpu The cross context virtual CPU structure.
656 * @param ppPd Where to return the mapping. This is always set.
657 */
658DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
659{
660#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
661 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
662 if (RT_FAILURE(rc))
663 {
664 *ppPd = NULL;
665 return rc;
666 }
667#else
668 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
669 if (RT_UNLIKELY(!*ppPd))
670 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
671#endif
672 return VINF_SUCCESS;
673}
674
675
676/**
677 * Gets the address the guest page directory (32-bit paging).
678 *
679 * @returns Pointer to the page directory entry in question.
680 * @param pVCpu The cross context virtual CPU structure.
681 */
682DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
683{
684#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
685 PX86PD pGuestPD = NULL;
686 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
687 if (RT_FAILURE(rc))
688 {
689 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
690 return NULL;
691 }
692#else
693 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
694 if (RT_UNLIKELY(!pGuestPD))
695 {
696 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
697 if (RT_FAILURE(rc))
698 return NULL;
699 }
700#endif
701 return pGuestPD;
702}
703
704
705/**
706 * Gets the guest page directory pointer table.
707 *
708 * @returns VBox status code.
709 * @param pVCpu The cross context virtual CPU structure.
710 * @param ppPdpt Where to return the mapping. This is always set.
711 */
712DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
713{
714#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
715 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
716 if (RT_FAILURE(rc))
717 {
718 *ppPdpt = NULL;
719 return rc;
720 }
721#else
722 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
723 if (RT_UNLIKELY(!*ppPdpt))
724 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
725#endif
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Gets the guest page directory pointer table.
732 *
733 * @returns Pointer to the page directory in question.
734 * @returns NULL if the page directory is not present or on an invalid page.
735 * @param pVCpu The cross context virtual CPU structure.
736 */
737DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
738{
739 PX86PDPT pGuestPdpt;
740 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
741 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
742 return pGuestPdpt;
743}
744
745
746/**
747 * Gets the guest page directory pointer table entry for the specified address.
748 *
749 * @returns Pointer to the page directory in question.
750 * @returns NULL if the page directory is not present or on an invalid page.
751 * @param pVCpu The cross context virtual CPU structure.
752 * @param GCPtr The address.
753 */
754DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
755{
756 AssertGCPtr32(GCPtr);
757
758#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
759 PX86PDPT pGuestPDPT = NULL;
760 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
761 AssertRCReturn(rc, NULL);
762#else
763 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
764 if (RT_UNLIKELY(!pGuestPDPT))
765 {
766 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
767 if (RT_FAILURE(rc))
768 return NULL;
769 }
770#endif
771 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
772}
773
774
775/**
776 * Gets the page directory entry for the specified address.
777 *
778 * @returns The page directory entry in question.
779 * @returns A non-present entry if the page directory is not present or on an invalid page.
780 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
781 * @param GCPtr The address.
782 */
783DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
784{
785 AssertGCPtr32(GCPtr);
786 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
787 if (RT_LIKELY(pGuestPDPT))
788 {
789 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
790 if ( pGuestPDPT->a[iPdpt].n.u1Present
791 && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
792 {
793 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
794#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
795 PX86PDPAE pGuestPD = NULL;
796 int rc = pgmRZDynMapGCPageInlined(pVCpu,
797 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
798 (void **)&pGuestPD
799 RTLOG_COMMA_SRC_POS);
800 if (RT_SUCCESS(rc))
801 return pGuestPD->a[iPD];
802 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
803#else
804 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
805 if ( !pGuestPD
806 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
807 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
808 if (pGuestPD)
809 return pGuestPD->a[iPD];
810#endif
811 }
812 }
813
814 X86PDEPAE ZeroPde = {0};
815 return ZeroPde;
816}
817
818
819/**
820 * Gets the page directory pointer table entry for the specified address
821 * and returns the index into the page directory
822 *
823 * @returns Pointer to the page directory in question.
824 * @returns NULL if the page directory is not present or on an invalid page.
825 * @param pVCpu The cross context virtual CPU structure.
826 * @param GCPtr The address.
827 * @param piPD Receives the index into the returned page directory
828 * @param pPdpe Receives the page directory pointer entry. Optional.
829 */
830DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
831{
832 AssertGCPtr32(GCPtr);
833
834 /* The PDPE. */
835 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
836 if (RT_UNLIKELY(!pGuestPDPT))
837 return NULL;
838 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
839 if (pPdpe)
840 *pPdpe = pGuestPDPT->a[iPdpt];
841 if (!pGuestPDPT->a[iPdpt].n.u1Present)
842 return NULL;
843 if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
844 return NULL;
845
846 /* The PDE. */
847#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
848 PX86PDPAE pGuestPD = NULL;
849 int rc = pgmRZDynMapGCPageInlined(pVCpu,
850 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
851 (void **)&pGuestPD
852 RTLOG_COMMA_SRC_POS);
853 if (RT_FAILURE(rc))
854 {
855 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
856 return NULL;
857 }
858#else
859 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
860 if ( !pGuestPD
861 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
862 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
863#endif
864
865 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
866 return pGuestPD;
867}
868
869#ifndef IN_RC
870
871/**
872 * Gets the page map level-4 pointer for the guest.
873 *
874 * @returns VBox status code.
875 * @param pVCpu The cross context virtual CPU structure.
876 * @param ppPml4 Where to return the mapping. Always set.
877 */
878DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
879{
880#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
881 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
882 if (RT_FAILURE(rc))
883 {
884 *ppPml4 = NULL;
885 return rc;
886 }
887#else
888 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
889 if (RT_UNLIKELY(!*ppPml4))
890 return pgmGstLazyMapPml4(pVCpu, ppPml4);
891#endif
892 return VINF_SUCCESS;
893}
894
895
896/**
897 * Gets the page map level-4 pointer for the guest.
898 *
899 * @returns Pointer to the PML4 page.
900 * @param pVCpu The cross context virtual CPU structure.
901 */
902DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
903{
904 PX86PML4 pGuestPml4;
905 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
906 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
907 return pGuestPml4;
908}
909
910
911/**
912 * Gets the pointer to a page map level-4 entry.
913 *
914 * @returns Pointer to the PML4 entry.
915 * @param pVCpu The cross context virtual CPU structure.
916 * @param iPml4 The index.
917 * @remarks Only used by AssertCR3.
918 */
919DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
920{
921#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
922 PX86PML4 pGuestPml4;
923 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
924 AssertRCReturn(rc, NULL);
925#else
926 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
927 if (RT_UNLIKELY(!pGuestPml4))
928 {
929 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
930 AssertRCReturn(rc, NULL);
931 }
932#endif
933 return &pGuestPml4->a[iPml4];
934}
935
936
937/**
938 * Gets the page directory entry for the specified address.
939 *
940 * @returns The page directory entry in question.
941 * @returns A non-present entry if the page directory is not present or on an invalid page.
942 * @param pVCpu The cross context virtual CPU structure.
943 * @param GCPtr The address.
944 */
945DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
946{
947 /*
948 * Note! To keep things simple, ASSUME invalid physical addresses will
949 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
950 * supporting 52-bit wide physical guest addresses.
951 */
952 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
953 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
954 if ( RT_LIKELY(pGuestPml4)
955 && pGuestPml4->a[iPml4].n.u1Present
956 && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
957 {
958 PCX86PDPT pPdptTemp;
959 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
960 if (RT_SUCCESS(rc))
961 {
962 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
963 if ( pPdptTemp->a[iPdpt].n.u1Present
964 && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
965 {
966 PCX86PDPAE pPD;
967 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
968 if (RT_SUCCESS(rc))
969 {
970 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
971 return pPD->a[iPD];
972 }
973 }
974 }
975 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
976 }
977
978 X86PDEPAE ZeroPde = {0};
979 return ZeroPde;
980}
981
982
983/**
984 * Gets the GUEST page directory pointer for the specified address.
985 *
986 * @returns The page directory in question.
987 * @returns NULL if the page directory is not present or on an invalid page.
988 * @param pVCpu The cross context virtual CPU structure.
989 * @param GCPtr The address.
990 * @param ppPml4e Page Map Level-4 Entry (out)
991 * @param pPdpe Page directory pointer table entry (out)
992 * @param piPD Receives the index into the returned page directory
993 */
994DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
995{
996 /* The PMLE4. */
997 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
998 if (RT_UNLIKELY(!pGuestPml4))
999 return NULL;
1000 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1001 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
1002 if (!pPml4e->n.u1Present)
1003 return NULL;
1004 if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
1005 return NULL;
1006
1007 /* The PDPE. */
1008 PCX86PDPT pPdptTemp;
1009 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1010 if (RT_FAILURE(rc))
1011 {
1012 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1013 return NULL;
1014 }
1015 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1016 *pPdpe = pPdptTemp->a[iPdpt];
1017 if (!pPdpe->n.u1Present)
1018 return NULL;
1019 if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
1020 return NULL;
1021
1022 /* The PDE. */
1023 PX86PDPAE pPD;
1024 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1025 if (RT_FAILURE(rc))
1026 {
1027 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1028 return NULL;
1029 }
1030
1031 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1032 return pPD;
1033}
1034
1035#endif /* !IN_RC */
1036
1037/**
1038 * Gets the shadow page directory, 32-bit.
1039 *
1040 * @returns Pointer to the shadow 32-bit PD.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 */
1043DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPU pVCpu)
1044{
1045 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1046}
1047
1048
1049/**
1050 * Gets the shadow page directory entry for the specified address, 32-bit.
1051 *
1052 * @returns Shadow 32-bit PDE.
1053 * @param pVCpu The cross context virtual CPU structure.
1054 * @param GCPtr The address.
1055 */
1056DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1057{
1058 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
1059 if (!pShwPde)
1060 {
1061 X86PDE ZeroPde = {0};
1062 return ZeroPde;
1063 }
1064 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1065}
1066
1067
1068/**
1069 * Gets the pointer to the shadow page directory entry for the specified
1070 * address, 32-bit.
1071 *
1072 * @returns Pointer to the shadow 32-bit PDE.
1073 * @param pVCpu The cross context virtual CPU structure.
1074 * @param GCPtr The address.
1075 */
1076DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1077{
1078 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1079 AssertReturn(pPde, NULL);
1080 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1081}
1082
1083
1084/**
1085 * Gets the shadow page pointer table, PAE.
1086 *
1087 * @returns Pointer to the shadow PAE PDPT.
1088 * @param pVCpu The cross context virtual CPU structure.
1089 */
1090DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPU pVCpu)
1091{
1092 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1093}
1094
1095
1096/**
1097 * Gets the shadow page directory for the specified address, PAE.
1098 *
1099 * @returns Pointer to the shadow PD.
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param GCPtr The address.
1102 */
1103DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1104{
1105 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1106 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1107
1108 if (!pPdpt->a[iPdpt].n.u1Present)
1109 return NULL;
1110
1111 /* Fetch the pgm pool shadow descriptor. */
1112 PVM pVM = pVCpu->CTX_SUFF(pVM);
1113 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1114 AssertReturn(pShwPde, NULL);
1115
1116 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1117}
1118
1119
1120/**
1121 * Gets the shadow page directory for the specified address, PAE.
1122 *
1123 * @returns Pointer to the shadow PD.
1124 * @param pVCpu The cross context virtual CPU structure.
1125 * @param pPdpt Pointer to the page directory pointer table.
1126 * @param GCPtr The address.
1127 */
1128DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1129{
1130 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1131
1132 if (!pPdpt->a[iPdpt].n.u1Present)
1133 return NULL;
1134
1135 /* Fetch the pgm pool shadow descriptor. */
1136 PVM pVM = pVCpu->CTX_SUFF(pVM);
1137 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1138 AssertReturn(pShwPde, NULL);
1139
1140 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1141}
1142
1143
1144/**
1145 * Gets the shadow page directory entry, PAE.
1146 *
1147 * @returns PDE.
1148 * @param pVCpu The cross context virtual CPU structure.
1149 * @param GCPtr The address.
1150 */
1151DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1152{
1153 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1154
1155 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1156 if (!pShwPde)
1157 {
1158 X86PDEPAE ZeroPde = {0};
1159 return ZeroPde;
1160 }
1161 return pShwPde->a[iPd];
1162}
1163
1164
1165/**
1166 * Gets the pointer to the shadow page directory entry for an address, PAE.
1167 *
1168 * @returns Pointer to the PDE.
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param GCPtr The address.
1171 * @remarks Only used by AssertCR3.
1172 */
1173DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1174{
1175 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1176
1177 PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1178 AssertReturn(pPde, NULL);
1179 return &pPde->a[iPd];
1180}
1181
1182#ifndef IN_RC
1183
1184/**
1185 * Gets the shadow page map level-4 pointer.
1186 *
1187 * @returns Pointer to the shadow PML4.
1188 * @param pVCpu The cross context virtual CPU structure.
1189 */
1190DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPU pVCpu)
1191{
1192 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1193}
1194
1195
1196/**
1197 * Gets the shadow page map level-4 entry for the specified address.
1198 *
1199 * @returns The entry.
1200 * @param pVCpu The cross context virtual CPU structure.
1201 * @param GCPtr The address.
1202 */
1203DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPU pVCpu, RTGCPTR GCPtr)
1204{
1205 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1206 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1207
1208 if (!pShwPml4)
1209 {
1210 X86PML4E ZeroPml4e = {0};
1211 return ZeroPml4e;
1212 }
1213 return pShwPml4->a[iPml4];
1214}
1215
1216
1217/**
1218 * Gets the pointer to the specified shadow page map level-4 entry.
1219 *
1220 * @returns The entry.
1221 * @param pVCpu The cross context virtual CPU structure.
1222 * @param iPml4 The PML4 index.
1223 */
1224DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
1225{
1226 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1227 if (!pShwPml4)
1228 return NULL;
1229 return &pShwPml4->a[iPml4];
1230}
1231
1232#endif /* !IN_RC */
1233
1234/**
1235 * Cached physical handler lookup.
1236 *
1237 * @returns Physical handler covering @a GCPhys.
1238 * @param pVM The cross context VM structure.
1239 * @param GCPhys The lookup address.
1240 */
1241DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVM pVM, RTGCPHYS GCPhys)
1242{
1243 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1244 if ( pHandler
1245 && GCPhys >= pHandler->Core.Key
1246 && GCPhys < pHandler->Core.KeyLast)
1247 {
1248 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1249 return pHandler;
1250 }
1251
1252 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1253 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1254 if (pHandler)
1255 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1256 return pHandler;
1257}
1258
1259
1260#ifdef VBOX_WITH_RAW_MODE
1261/**
1262 * Clears one physical page of a virtual handler.
1263 *
1264 * @param pVM The cross context VM structure.
1265 * @param pCur Virtual handler structure.
1266 * @param iPage Physical page index.
1267 *
1268 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1269 * need to care about other handlers in the same page.
1270 */
1271DECLINLINE(void) pgmHandlerVirtualClearPage(PVM pVM, PPGMVIRTHANDLER pCur, unsigned iPage)
1272{
1273 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1274
1275 /*
1276 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1277 */
1278# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1279 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1280 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1281 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1282# endif
1283 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1284 {
1285 /* We're the head of the alias chain. */
1286 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1287# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1288 AssertReleaseMsg(pRemove != NULL,
1289 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1290 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1291 AssertReleaseMsg(pRemove == pPhys2Virt,
1292 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1293 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1294 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1295 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1296# endif
1297 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1298 {
1299 /* Insert the next list in the alias chain into the tree. */
1300 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1301# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1302 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1303 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1304 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1305# endif
1306 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1307 bool fRc = RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1308 AssertRelease(fRc);
1309 }
1310 }
1311 else
1312 {
1313 /* Locate the previous node in the alias chain. */
1314 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1315# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1316 AssertReleaseMsg(pPrev != pPhys2Virt,
1317 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1318 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1319# endif
1320 for (;;)
1321 {
1322 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1323 if (pNext == pPhys2Virt)
1324 {
1325 /* unlink. */
1326 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1327 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1328 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1329 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1330 else
1331 {
1332 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1333 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1334 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1335 }
1336 break;
1337 }
1338
1339 /* next */
1340 if (pNext == pPrev)
1341 {
1342# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1343 AssertReleaseMsg(pNext != pPrev,
1344 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1345 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1346# endif
1347 break;
1348 }
1349 pPrev = pNext;
1350 }
1351 }
1352 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1353 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1354 pPhys2Virt->offNextAlias = 0;
1355 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1356
1357 /*
1358 * Clear the ram flags for this page.
1359 */
1360 PPGMPAGE pPage = pgmPhysGetPage(pVM, pPhys2Virt->Core.Key);
1361 AssertReturnVoid(pPage);
1362 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1363}
1364#endif /* VBOX_WITH_RAW_MODE */
1365
1366
1367/**
1368 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1369 *
1370 * @returns Pointer to the shadow page structure.
1371 * @param pPool The pool.
1372 * @param idx The pool page index.
1373 */
1374DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1375{
1376 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1377 return &pPool->aPages[idx];
1378}
1379
1380
1381/**
1382 * Clear references to guest physical memory.
1383 *
1384 * @param pPool The pool.
1385 * @param pPoolPage The pool page.
1386 * @param pPhysPage The physical guest page tracking structure.
1387 * @param iPte Shadow PTE index
1388 */
1389DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1390{
1391 /*
1392 * Just deal with the simple case here.
1393 */
1394# ifdef VBOX_STRICT
1395 PVM pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1396# endif
1397# ifdef LOG_ENABLED
1398 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1399# endif
1400 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1401 if (cRefs == 1)
1402 {
1403 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1404 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1405 /* Invalidate the tracking data. */
1406 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1407 }
1408 else
1409 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1410 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1411}
1412
1413
1414/**
1415 * Moves the page to the head of the age list.
1416 *
1417 * This is done when the cached page is used in one way or another.
1418 *
1419 * @param pPool The pool.
1420 * @param pPage The cached page.
1421 */
1422DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1423{
1424 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1425
1426 /*
1427 * Move to the head of the age list.
1428 */
1429 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1430 {
1431 /* unlink */
1432 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1433 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1434 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1435 else
1436 pPool->iAgeTail = pPage->iAgePrev;
1437
1438 /* insert at head */
1439 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1440 pPage->iAgeNext = pPool->iAgeHead;
1441 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1442 pPool->iAgeHead = pPage->idx;
1443 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1444 }
1445}
1446
1447
1448/**
1449 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1450 *
1451 * @param pPool The pool.
1452 * @param pPage PGM pool page
1453 */
1454DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1455{
1456 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1457 ASMAtomicIncU32(&pPage->cLocked);
1458}
1459
1460
1461/**
1462 * Unlocks a page to allow flushing again
1463 *
1464 * @param pPool The pool.
1465 * @param pPage PGM pool page
1466 */
1467DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1468{
1469 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1470 Assert(pPage->cLocked);
1471 ASMAtomicDecU32(&pPage->cLocked);
1472}
1473
1474
1475/**
1476 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1477 *
1478 * @returns VBox status code.
1479 * @param pPage PGM pool page
1480 */
1481DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1482{
1483 if (pPage->cLocked)
1484 {
1485 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1486 if (pPage->cModifications)
1487 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1488 return true;
1489 }
1490 return false;
1491}
1492
1493
1494/**
1495 * Tells if mappings are to be put into the shadow page table or not.
1496 *
1497 * @returns boolean result
1498 * @param pVM The cross context VM structure.
1499 */
1500DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVM pVM)
1501{
1502#ifdef PGM_WITHOUT_MAPPINGS
1503 /* Only raw-mode has mappings. */
1504 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1505 return false;
1506#else
1507 Assert(pVM->cCpus == 1 || !VM_IS_RAW_MODE_ENABLED(pVM));
1508 return VM_IS_RAW_MODE_ENABLED(pVM);
1509#endif
1510}
1511
1512
1513/**
1514 * Checks if the mappings are floating and enabled.
1515 *
1516 * @returns true / false.
1517 * @param pVM The cross context VM structure.
1518 */
1519DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVM pVM)
1520{
1521#ifdef PGM_WITHOUT_MAPPINGS
1522 /* Only raw-mode has mappings. */
1523 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1524 return false;
1525#else
1526 return !pVM->pgm.s.fMappingsFixed
1527 && pgmMapAreMappingsEnabled(pVM);
1528#endif
1529}
1530
1531/** @} */
1532
1533#endif
1534
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette