VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 91854

Last change on this file since 91854 was 91854, checked in by vboxsync, 3 years ago

VMM: Removed PGM_WITHOUT_MAPPINGS and associated mapping code. bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.2 KB
Line 
1/* $Id: PGMInline.h 91854 2021-10-20 00:50:11Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
19#define VMM_INCLUDED_SRC_include_PGMInline_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/err.h>
27#include <VBox/vmm/stam.h>
28#include <VBox/param.h>
29#include <VBox/vmm/vmm.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/pdmcritsect.h>
32#include <VBox/vmm/pdmapi.h>
33#include <VBox/dis.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/log.h>
36#include <VBox/vmm/gmm.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/nem.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @addtogroup grp_pgm_int Internals
48 * @internal
49 * @{
50 */
51
52/**
53 * Gets the PGMRAMRANGE structure for a guest page.
54 *
55 * @returns Pointer to the RAM range on success.
56 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
57 *
58 * @param pVM The cross context VM structure.
59 * @param GCPhys The GC physical address.
60 */
61DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
62{
63 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
64 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
65 return pgmPhysGetRangeSlow(pVM, GCPhys);
66 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
67 return pRam;
68}
69
70
71/**
72 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
73 * range above it.
74 *
75 * @returns Pointer to the RAM range on success.
76 * @returns NULL if the address is located after the last range.
77 *
78 * @param pVM The cross context VM structure.
79 * @param GCPhys The GC physical address.
80 */
81DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
82{
83 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
84 if ( !pRam
85 || (GCPhys - pRam->GCPhys) >= pRam->cb)
86 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
87 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
88 return pRam;
89}
90
91
92/**
93 * Gets the PGMPAGE structure for a guest page.
94 *
95 * @returns Pointer to the page on success.
96 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
97 *
98 * @param pVM The cross context VM structure.
99 * @param GCPhys The GC physical address.
100 */
101DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
102{
103 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
104 RTGCPHYS off;
105 if ( !pRam
106 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
107 return pgmPhysGetPageSlow(pVM, GCPhys);
108 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pVM The cross context VM structure.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
129 RTGCPHYS off;
130 if ( !pRam
131 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
132 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
133 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
134 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
135 return VINF_SUCCESS;
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 * @param ppRamHint Where to read and store the ram list hint.
152 * The caller initializes this to NULL before the call.
153 */
154DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
155{
156 RTGCPHYS off;
157 PPGMRAMRANGE pRam = *ppRamHint;
158 if ( !pRam
159 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
160 {
161 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
162 if ( !pRam
163 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
164 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
165
166 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
167 *ppRamHint = pRam;
168 }
169 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
170 return VINF_SUCCESS;
171}
172
173
174/**
175 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
176 *
177 * @returns Pointer to the page on success.
178 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
179 *
180 * @param pVM The cross context VM structure.
181 * @param GCPhys The GC physical address.
182 * @param ppPage Where to store the pointer to the PGMPAGE structure.
183 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
184 */
185DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
186{
187 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
188 RTGCPHYS off;
189 if ( !pRam
190 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
191 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
192
193 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
194 *ppRam = pRam;
195 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
196 return VINF_SUCCESS;
197}
198
199
200/**
201 * Convert GC Phys to HC Phys.
202 *
203 * @returns VBox status code.
204 * @param pVM The cross context VM structure.
205 * @param GCPhys The GC physical address.
206 * @param pHCPhys Where to store the corresponding HC physical address.
207 *
208 * @deprecated Doesn't deal with zero, shared or write monitored pages.
209 * Avoid when writing new code!
210 */
211DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
212{
213 PPGMPAGE pPage;
214 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
215 if (RT_FAILURE(rc))
216 return rc;
217 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * Queries the Physical TLB entry for a physical guest page,
224 * attempting to load the TLB entry if necessary.
225 *
226 * @returns VBox status code.
227 * @retval VINF_SUCCESS on success
228 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
229 *
230 * @param pVM The cross context VM structure.
231 * @param GCPhys The address of the guest page.
232 * @param ppTlbe Where to store the pointer to the TLB entry.
233 */
234DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
235{
236 int rc;
237 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
238 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
239 {
240 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
241 rc = VINF_SUCCESS;
242 }
243 else
244 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
245 *ppTlbe = pTlbe;
246 return rc;
247}
248
249
250/**
251 * Queries the Physical TLB entry for a physical guest page,
252 * attempting to load the TLB entry if necessary.
253 *
254 * @returns VBox status code.
255 * @retval VINF_SUCCESS on success
256 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
257 *
258 * @param pVM The cross context VM structure.
259 * @param pPage Pointer to the PGMPAGE structure corresponding to
260 * GCPhys.
261 * @param GCPhys The address of the guest page.
262 * @param ppTlbe Where to store the pointer to the TLB entry.
263 */
264DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
265{
266 int rc;
267 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
268 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
269 {
270 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
271 rc = VINF_SUCCESS;
272 AssertPtr(pTlbe->pv);
273#ifdef IN_RING3
274 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
275#endif
276 }
277 else
278 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
279 *ppTlbe = pTlbe;
280 return rc;
281}
282
283
284/**
285 * Calculates NEM page protection flags.
286 */
287DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
288{
289 /*
290 * Deal with potentially writable pages first.
291 */
292 if (PGMPAGETYPE_IS_RWX(enmType))
293 {
294 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
295 {
296 if (PGM_PAGE_IS_ALLOCATED(pPage))
297 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
298 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
299 }
300 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
301 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
302 }
303 /*
304 * Potentially readable & executable pages.
305 */
306 else if ( PGMPAGETYPE_IS_ROX(enmType)
307 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
308 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
309
310 /*
311 * The rest is needs special access handling.
312 */
313 return NEM_PAGE_PROT_NONE;
314}
315
316
317/**
318 * Enables write monitoring for an allocated page.
319 *
320 * The caller is responsible for updating the shadow page tables.
321 *
322 * @param pVM The cross context VM structure.
323 * @param pPage The page to write monitor.
324 * @param GCPhysPage The address of the page.
325 */
326DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
327{
328 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
329 PGM_LOCK_ASSERT_OWNER(pVM);
330
331 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
332 pVM->pgm.s.cMonitoredPages++;
333
334 /* Large pages must disabled. */
335 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
336 {
337 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
338 AssertFatal(pFirstPage);
339 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
340 {
341 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
342 pVM->pgm.s.cLargePagesDisabled++;
343 }
344 else
345 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
346 }
347
348#ifdef VBOX_WITH_NATIVE_NEM
349 /* Tell NEM. */
350 if (VM_IS_NEM_ENABLED(pVM))
351 {
352 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
353 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
354 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
355 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
356 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage) : NULL,
357 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
358 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
359 }
360#endif
361}
362
363
364/**
365 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
366 *
367 * Only used when the guest is in PAE or long mode. This is inlined so that we
368 * can perform consistency checks in debug builds.
369 *
370 * @returns true if it is, false if it isn't.
371 * @param pVCpu The cross context virtual CPU structure.
372 */
373DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
374{
375 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
376 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
377 return pVCpu->pgm.s.fNoExecuteEnabled;
378}
379
380
381/**
382 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
383 *
384 * Only used when the guest is in paged 32-bit mode. This is inlined so that
385 * we can perform consistency checks in debug builds.
386 *
387 * @returns true if it is, false if it isn't.
388 * @param pVCpu The cross context virtual CPU structure.
389 */
390DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
391{
392 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
393 Assert(!CPUMIsGuestInPAEMode(pVCpu));
394 Assert(!CPUMIsGuestInLongMode(pVCpu));
395 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
396}
397
398
399/**
400 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
401 * Takes PSE-36 into account.
402 *
403 * @returns guest physical address
404 * @param pVM The cross context VM structure.
405 * @param Pde Guest Pde
406 */
407DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
408{
409 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
410 GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
411
412 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
413}
414
415
416/**
417 * Gets the address the guest page directory (32-bit paging).
418 *
419 * @returns VBox status code.
420 * @param pVCpu The cross context virtual CPU structure.
421 * @param ppPd Where to return the mapping. This is always set.
422 */
423DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
424{
425 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
426 if (RT_UNLIKELY(!*ppPd))
427 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
428 return VINF_SUCCESS;
429}
430
431
432/**
433 * Gets the address the guest page directory (32-bit paging).
434 *
435 * @returns Pointer to the page directory entry in question.
436 * @param pVCpu The cross context virtual CPU structure.
437 */
438DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
439{
440 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
441 if (RT_UNLIKELY(!pGuestPD))
442 {
443 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
444 if (RT_FAILURE(rc))
445 return NULL;
446 }
447 return pGuestPD;
448}
449
450
451/**
452 * Gets the guest page directory pointer table.
453 *
454 * @returns VBox status code.
455 * @param pVCpu The cross context virtual CPU structure.
456 * @param ppPdpt Where to return the mapping. This is always set.
457 */
458DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
459{
460 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
461 if (RT_UNLIKELY(!*ppPdpt))
462 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
463 return VINF_SUCCESS;
464}
465
466
467/**
468 * Gets the guest page directory pointer table.
469 *
470 * @returns Pointer to the page directory in question.
471 * @returns NULL if the page directory is not present or on an invalid page.
472 * @param pVCpu The cross context virtual CPU structure.
473 */
474DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
475{
476 PX86PDPT pGuestPdpt;
477 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
478 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
479 return pGuestPdpt;
480}
481
482
483/**
484 * Gets the guest page directory pointer table entry for the specified address.
485 *
486 * @returns Pointer to the page directory in question.
487 * @returns NULL if the page directory is not present or on an invalid page.
488 * @param pVCpu The cross context virtual CPU structure.
489 * @param GCPtr The address.
490 */
491DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
492{
493 AssertGCPtr32(GCPtr);
494
495 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
496 if (RT_UNLIKELY(!pGuestPDPT))
497 {
498 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
499 if (RT_FAILURE(rc))
500 return NULL;
501 }
502 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
503}
504
505
506/**
507 * Gets the page directory entry for the specified address.
508 *
509 * @returns The page directory entry in question.
510 * @returns A non-present entry if the page directory is not present or on an invalid page.
511 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
512 * @param GCPtr The address.
513 */
514DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
515{
516 AssertGCPtr32(GCPtr);
517 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
518 if (RT_LIKELY(pGuestPDPT))
519 {
520 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
521 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
522 {
523 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
524 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
525 if ( !pGuestPD
526 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
527 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
528 if (pGuestPD)
529 return pGuestPD->a[iPD];
530 }
531 }
532
533 X86PDEPAE ZeroPde = {0};
534 return ZeroPde;
535}
536
537
538/**
539 * Gets the page directory pointer table entry for the specified address
540 * and returns the index into the page directory
541 *
542 * @returns Pointer to the page directory in question.
543 * @returns NULL if the page directory is not present or on an invalid page.
544 * @param pVCpu The cross context virtual CPU structure.
545 * @param GCPtr The address.
546 * @param piPD Receives the index into the returned page directory
547 * @param pPdpe Receives the page directory pointer entry. Optional.
548 */
549DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
550{
551 AssertGCPtr32(GCPtr);
552
553 /* The PDPE. */
554 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
555 if (pGuestPDPT)
556 {
557 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
558 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
559 if (pPdpe)
560 pPdpe->u = uPdpe;
561 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
562 {
563
564 /* The PDE. */
565 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
566 if ( !pGuestPD
567 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
568 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
569 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
570 return pGuestPD;
571 }
572 }
573 return NULL;
574}
575
576
577/**
578 * Gets the page map level-4 pointer for the guest.
579 *
580 * @returns VBox status code.
581 * @param pVCpu The cross context virtual CPU structure.
582 * @param ppPml4 Where to return the mapping. Always set.
583 */
584DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
585{
586 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
587 if (RT_UNLIKELY(!*ppPml4))
588 return pgmGstLazyMapPml4(pVCpu, ppPml4);
589 return VINF_SUCCESS;
590}
591
592
593/**
594 * Gets the page map level-4 pointer for the guest.
595 *
596 * @returns Pointer to the PML4 page.
597 * @param pVCpu The cross context virtual CPU structure.
598 */
599DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
600{
601 PX86PML4 pGuestPml4;
602 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
603 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
604 return pGuestPml4;
605}
606
607
608/**
609 * Gets the pointer to a page map level-4 entry.
610 *
611 * @returns Pointer to the PML4 entry.
612 * @param pVCpu The cross context virtual CPU structure.
613 * @param iPml4 The index.
614 * @remarks Only used by AssertCR3.
615 */
616DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
617{
618 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
619 if (pGuestPml4)
620 { /* likely */ }
621 else
622 {
623 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
624 AssertRCReturn(rc, NULL);
625 }
626 return &pGuestPml4->a[iPml4];
627}
628
629
630/**
631 * Gets the page directory entry for the specified address.
632 *
633 * @returns The page directory entry in question.
634 * @returns A non-present entry if the page directory is not present or on an invalid page.
635 * @param pVCpu The cross context virtual CPU structure.
636 * @param GCPtr The address.
637 */
638DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
639{
640 /*
641 * Note! To keep things simple, ASSUME invalid physical addresses will
642 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
643 * supporting 52-bit wide physical guest addresses.
644 */
645 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
646 if (RT_LIKELY(pGuestPml4))
647 {
648 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
649 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
650 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
651 {
652 PCX86PDPT pPdptTemp;
653 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
654 if (RT_SUCCESS(rc))
655 {
656 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
657 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
658 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
659 {
660 PCX86PDPAE pPD;
661 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
662 if (RT_SUCCESS(rc))
663 {
664 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
665 return pPD->a[iPD];
666 }
667 }
668 }
669 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
670 }
671 }
672
673 X86PDEPAE ZeroPde = {0};
674 return ZeroPde;
675}
676
677
678/**
679 * Gets the GUEST page directory pointer for the specified address.
680 *
681 * @returns The page directory in question.
682 * @returns NULL if the page directory is not present or on an invalid page.
683 * @param pVCpu The cross context virtual CPU structure.
684 * @param GCPtr The address.
685 * @param ppPml4e Page Map Level-4 Entry (out)
686 * @param pPdpe Page directory pointer table entry (out)
687 * @param piPD Receives the index into the returned page directory
688 */
689DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
690{
691 /* The PMLE4. */
692 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
693 if (pGuestPml4)
694 {
695 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
696 *ppPml4e = &pGuestPml4->a[iPml4];
697 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
698 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
699 {
700 /* The PDPE. */
701 PCX86PDPT pPdptTemp;
702 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
703 if (RT_SUCCESS(rc))
704 {
705 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
706 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
707 pPdpe->u = uPdpe;
708 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
709 {
710 /* The PDE. */
711 PX86PDPAE pPD;
712 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
713 if (RT_SUCCESS(rc))
714 {
715 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
716 return pPD;
717 }
718 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
719 }
720 }
721 else
722 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
723 }
724 }
725 return NULL;
726}
727
728
729/**
730 * Gets the shadow page directory, 32-bit.
731 *
732 * @returns Pointer to the shadow 32-bit PD.
733 * @param pVCpu The cross context virtual CPU structure.
734 */
735DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
736{
737 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
738}
739
740
741/**
742 * Gets the shadow page directory entry for the specified address, 32-bit.
743 *
744 * @returns Shadow 32-bit PDE.
745 * @param pVCpu The cross context virtual CPU structure.
746 * @param GCPtr The address.
747 */
748DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
749{
750 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
751 if (!pShwPde)
752 {
753 X86PDE ZeroPde = {0};
754 return ZeroPde;
755 }
756 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
757}
758
759
760/**
761 * Gets the pointer to the shadow page directory entry for the specified
762 * address, 32-bit.
763 *
764 * @returns Pointer to the shadow 32-bit PDE.
765 * @param pVCpu The cross context virtual CPU structure.
766 * @param GCPtr The address.
767 */
768DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
769{
770 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
771 AssertReturn(pPde, NULL);
772 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
773}
774
775
776/**
777 * Gets the shadow page pointer table, PAE.
778 *
779 * @returns Pointer to the shadow PAE PDPT.
780 * @param pVCpu The cross context virtual CPU structure.
781 */
782DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
783{
784 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
785}
786
787
788/**
789 * Gets the shadow page directory for the specified address, PAE.
790 *
791 * @returns Pointer to the shadow PD.
792 * @param pVCpu The cross context virtual CPU structure.
793 * @param pPdpt Pointer to the page directory pointer table.
794 * @param GCPtr The address.
795 */
796DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
797{
798 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
799 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
800 {
801 /* Fetch the pgm pool shadow descriptor. */
802 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
803 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
804 AssertReturn(pShwPde, NULL);
805
806 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
807 }
808 return NULL;
809}
810
811
812/**
813 * Gets the shadow page directory for the specified address, PAE.
814 *
815 * @returns Pointer to the shadow PD.
816 * @param pVCpu The cross context virtual CPU structure.
817 * @param GCPtr The address.
818 */
819DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
820{
821 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
822}
823
824
825/**
826 * Gets the shadow page directory entry, PAE.
827 *
828 * @returns PDE.
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param GCPtr The address.
831 */
832DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
833{
834 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
835 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
836 if (pShwPde)
837 return pShwPde->a[iPd];
838
839 X86PDEPAE ZeroPde = {0};
840 return ZeroPde;
841}
842
843
844/**
845 * Gets the pointer to the shadow page directory entry for an address, PAE.
846 *
847 * @returns Pointer to the PDE.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param GCPtr The address.
850 * @remarks Only used by AssertCR3.
851 */
852DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
853{
854 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
855 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
856 AssertReturn(pShwPde, NULL);
857 return &pShwPde->a[iPd];
858}
859
860
861/**
862 * Gets the shadow page map level-4 pointer.
863 *
864 * @returns Pointer to the shadow PML4.
865 * @param pVCpu The cross context virtual CPU structure.
866 */
867DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
868{
869 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
870}
871
872
873/**
874 * Gets the shadow page map level-4 entry for the specified address.
875 *
876 * @returns The entry.
877 * @param pVCpu The cross context virtual CPU structure.
878 * @param GCPtr The address.
879 */
880DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
881{
882 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
883 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
884 if (pShwPml4)
885 return pShwPml4->a[iPml4];
886
887 X86PML4E ZeroPml4e = {0};
888 return ZeroPml4e;
889}
890
891
892/**
893 * Gets the pointer to the specified shadow page map level-4 entry.
894 *
895 * @returns The entry.
896 * @param pVCpu The cross context virtual CPU structure.
897 * @param iPml4 The PML4 index.
898 */
899DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
900{
901 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
902 if (pShwPml4)
903 return &pShwPml4->a[iPml4];
904 return NULL;
905}
906
907
908/**
909 * Cached physical handler lookup.
910 *
911 * @returns Physical handler covering @a GCPhys.
912 * @param pVM The cross context VM structure.
913 * @param GCPhys The lookup address.
914 */
915DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys)
916{
917 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
918 if ( pHandler
919 && GCPhys >= pHandler->Core.Key
920 && GCPhys < pHandler->Core.KeyLast)
921 {
922 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupHits));
923 return pHandler;
924 }
925
926 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupMisses));
927 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
928 if (pHandler)
929 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
930 return pHandler;
931}
932
933
934/**
935 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
936 *
937 * @returns Pointer to the shadow page structure.
938 * @param pPool The pool.
939 * @param idx The pool page index.
940 */
941DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
942{
943 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
944 return &pPool->aPages[idx];
945}
946
947
948/**
949 * Clear references to guest physical memory.
950 *
951 * @param pPool The pool.
952 * @param pPoolPage The pool page.
953 * @param pPhysPage The physical guest page tracking structure.
954 * @param iPte Shadow PTE index
955 */
956DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
957{
958 /*
959 * Just deal with the simple case here.
960 */
961#ifdef VBOX_STRICT
962 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
963#endif
964#ifdef LOG_ENABLED
965 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
966#endif
967 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
968 if (cRefs == 1)
969 {
970 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
971 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
972 /* Invalidate the tracking data. */
973 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
974 }
975 else
976 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
977 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
978}
979
980
981/**
982 * Moves the page to the head of the age list.
983 *
984 * This is done when the cached page is used in one way or another.
985 *
986 * @param pPool The pool.
987 * @param pPage The cached page.
988 */
989DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
990{
991 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
992
993 /*
994 * Move to the head of the age list.
995 */
996 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
997 {
998 /* unlink */
999 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1000 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1001 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1002 else
1003 pPool->iAgeTail = pPage->iAgePrev;
1004
1005 /* insert at head */
1006 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1007 pPage->iAgeNext = pPool->iAgeHead;
1008 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1009 pPool->iAgeHead = pPage->idx;
1010 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1011 }
1012}
1013
1014
1015/**
1016 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1017 *
1018 * @param pPool The pool.
1019 * @param pPage PGM pool page
1020 */
1021DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1022{
1023 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1024 ASMAtomicIncU32(&pPage->cLocked);
1025}
1026
1027
1028/**
1029 * Unlocks a page to allow flushing again
1030 *
1031 * @param pPool The pool.
1032 * @param pPage PGM pool page
1033 */
1034DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1035{
1036 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1037 Assert(pPage->cLocked);
1038 ASMAtomicDecU32(&pPage->cLocked);
1039}
1040
1041
1042/**
1043 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1044 *
1045 * @returns VBox status code.
1046 * @param pPage PGM pool page
1047 */
1048DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1049{
1050 if (pPage->cLocked)
1051 {
1052 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1053 if (pPage->cModifications)
1054 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1055 return true;
1056 }
1057 return false;
1058}
1059
1060
1061/**
1062 * Check if the specified page is dirty (not write monitored)
1063 *
1064 * @return dirty or not
1065 * @param pVM The cross context VM structure.
1066 * @param GCPhys Guest physical address
1067 */
1068DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1069{
1070 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1071 PGM_LOCK_ASSERT_OWNER(pVM);
1072 if (!pPool->cDirtyPages)
1073 return false;
1074 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1075}
1076
1077
1078/** @} */
1079
1080#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1081
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette