VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 89088

Last change on this file since 89088 was 87141, checked in by vboxsync, 4 years ago

VMM: Remove VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 and the code it encloses as it is unused since the removal of x86 darwin support

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.1 KB
Line 
1/* $Id: PGMInline.h 87141 2020-12-29 19:12:45Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
19#define VMM_INCLUDED_SRC_include_PGMInline_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/err.h>
27#include <VBox/vmm/stam.h>
28#include <VBox/param.h>
29#include <VBox/vmm/vmm.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/pdmcritsect.h>
32#include <VBox/vmm/pdmapi.h>
33#include <VBox/dis.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/log.h>
36#include <VBox/vmm/gmm.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/nem.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @addtogroup grp_pgm_int Internals
48 * @internal
49 * @{
50 */
51
52/**
53 * Gets the PGMRAMRANGE structure for a guest page.
54 *
55 * @returns Pointer to the RAM range on success.
56 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
57 *
58 * @param pVM The cross context VM structure.
59 * @param GCPhys The GC physical address.
60 */
61DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
62{
63 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
64 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
65 return pgmPhysGetRangeSlow(pVM, GCPhys);
66 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
67 return pRam;
68}
69
70
71/**
72 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
73 * range above it.
74 *
75 * @returns Pointer to the RAM range on success.
76 * @returns NULL if the address is located after the last range.
77 *
78 * @param pVM The cross context VM structure.
79 * @param GCPhys The GC physical address.
80 */
81DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
82{
83 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
84 if ( !pRam
85 || (GCPhys - pRam->GCPhys) >= pRam->cb)
86 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
87 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
88 return pRam;
89}
90
91
92/**
93 * Gets the PGMPAGE structure for a guest page.
94 *
95 * @returns Pointer to the page on success.
96 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
97 *
98 * @param pVM The cross context VM structure.
99 * @param GCPhys The GC physical address.
100 */
101DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
102{
103 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
104 RTGCPHYS off;
105 if ( !pRam
106 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
107 return pgmPhysGetPageSlow(pVM, GCPhys);
108 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pVM The cross context VM structure.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
129 RTGCPHYS off;
130 if ( !pRam
131 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
132 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
133 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
134 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
135 return VINF_SUCCESS;
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 * @param ppRamHint Where to read and store the ram list hint.
152 * The caller initializes this to NULL before the call.
153 */
154DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
155{
156 RTGCPHYS off;
157 PPGMRAMRANGE pRam = *ppRamHint;
158 if ( !pRam
159 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
160 {
161 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
162 if ( !pRam
163 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
164 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
165
166 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
167 *ppRamHint = pRam;
168 }
169 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
170 return VINF_SUCCESS;
171}
172
173
174/**
175 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
176 *
177 * @returns Pointer to the page on success.
178 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
179 *
180 * @param pVM The cross context VM structure.
181 * @param GCPhys The GC physical address.
182 * @param ppPage Where to store the pointer to the PGMPAGE structure.
183 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
184 */
185DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
186{
187 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
188 RTGCPHYS off;
189 if ( !pRam
190 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
191 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
192
193 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
194 *ppRam = pRam;
195 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
196 return VINF_SUCCESS;
197}
198
199
200/**
201 * Convert GC Phys to HC Phys.
202 *
203 * @returns VBox status code.
204 * @param pVM The cross context VM structure.
205 * @param GCPhys The GC physical address.
206 * @param pHCPhys Where to store the corresponding HC physical address.
207 *
208 * @deprecated Doesn't deal with zero, shared or write monitored pages.
209 * Avoid when writing new code!
210 */
211DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
212{
213 PPGMPAGE pPage;
214 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
215 if (RT_FAILURE(rc))
216 return rc;
217 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * Queries the Physical TLB entry for a physical guest page,
224 * attempting to load the TLB entry if necessary.
225 *
226 * @returns VBox status code.
227 * @retval VINF_SUCCESS on success
228 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
229 *
230 * @param pVM The cross context VM structure.
231 * @param GCPhys The address of the guest page.
232 * @param ppTlbe Where to store the pointer to the TLB entry.
233 */
234DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
235{
236 int rc;
237 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
238 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
239 {
240 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
241 rc = VINF_SUCCESS;
242 }
243 else
244 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
245 *ppTlbe = pTlbe;
246 return rc;
247}
248
249
250/**
251 * Queries the Physical TLB entry for a physical guest page,
252 * attempting to load the TLB entry if necessary.
253 *
254 * @returns VBox status code.
255 * @retval VINF_SUCCESS on success
256 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
257 *
258 * @param pVM The cross context VM structure.
259 * @param pPage Pointer to the PGMPAGE structure corresponding to
260 * GCPhys.
261 * @param GCPhys The address of the guest page.
262 * @param ppTlbe Where to store the pointer to the TLB entry.
263 */
264DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
265{
266 int rc;
267 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
268 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
269 {
270 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
271 rc = VINF_SUCCESS;
272 AssertPtr(pTlbe->pv);
273#if defined(IN_RING3) || !defined(VBOX_WITH_RAM_IN_KERNEL)
274 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
275#endif
276 }
277 else
278 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
279 *ppTlbe = pTlbe;
280 return rc;
281}
282
283
284/**
285 * Calculates NEM page protection flags.
286 */
287DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
288{
289 /*
290 * Deal with potentially writable pages first.
291 */
292 if (PGMPAGETYPE_IS_RWX(enmType))
293 {
294 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
295 {
296 if (PGM_PAGE_IS_ALLOCATED(pPage))
297 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
298 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
299 }
300 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
301 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
302 }
303 /*
304 * Potentially readable & executable pages.
305 */
306 else if ( PGMPAGETYPE_IS_ROX(enmType)
307 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
308 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
309
310 /*
311 * The rest is needs special access handling.
312 */
313 return NEM_PAGE_PROT_NONE;
314}
315
316
317/**
318 * Enables write monitoring for an allocated page.
319 *
320 * The caller is responsible for updating the shadow page tables.
321 *
322 * @param pVM The cross context VM structure.
323 * @param pPage The page to write monitor.
324 * @param GCPhysPage The address of the page.
325 */
326DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
327{
328 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
329 PGM_LOCK_ASSERT_OWNER(pVM);
330
331 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
332 pVM->pgm.s.cMonitoredPages++;
333
334 /* Large pages must disabled. */
335 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
336 {
337 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
338 AssertFatal(pFirstPage);
339 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
340 {
341 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
342 pVM->pgm.s.cLargePagesDisabled++;
343 }
344 else
345 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
346 }
347
348 /* Tell NEM. */
349 if (VM_IS_NEM_ENABLED(pVM))
350 {
351 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
352 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
353 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
354 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
355 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
356 }
357}
358
359
360/**
361 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
362 *
363 * Only used when the guest is in PAE or long mode. This is inlined so that we
364 * can perform consistency checks in debug builds.
365 *
366 * @returns true if it is, false if it isn't.
367 * @param pVCpu The cross context virtual CPU structure.
368 */
369DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
370{
371 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
372 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
373 return pVCpu->pgm.s.fNoExecuteEnabled;
374}
375
376
377/**
378 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
379 *
380 * Only used when the guest is in paged 32-bit mode. This is inlined so that
381 * we can perform consistency checks in debug builds.
382 *
383 * @returns true if it is, false if it isn't.
384 * @param pVCpu The cross context virtual CPU structure.
385 */
386DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
387{
388 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
389 Assert(!CPUMIsGuestInPAEMode(pVCpu));
390 Assert(!CPUMIsGuestInLongMode(pVCpu));
391 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
392}
393
394
395/**
396 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
397 * Takes PSE-36 into account.
398 *
399 * @returns guest physical address
400 * @param pVM The cross context VM structure.
401 * @param Pde Guest Pde
402 */
403DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
404{
405 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
406 GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
407
408 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
409}
410
411
412/**
413 * Gets the address the guest page directory (32-bit paging).
414 *
415 * @returns VBox status code.
416 * @param pVCpu The cross context virtual CPU structure.
417 * @param ppPd Where to return the mapping. This is always set.
418 */
419DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
420{
421 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
422 if (RT_UNLIKELY(!*ppPd))
423 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
424 return VINF_SUCCESS;
425}
426
427
428/**
429 * Gets the address the guest page directory (32-bit paging).
430 *
431 * @returns Pointer to the page directory entry in question.
432 * @param pVCpu The cross context virtual CPU structure.
433 */
434DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
435{
436 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
437 if (RT_UNLIKELY(!pGuestPD))
438 {
439 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
440 if (RT_FAILURE(rc))
441 return NULL;
442 }
443 return pGuestPD;
444}
445
446
447/**
448 * Gets the guest page directory pointer table.
449 *
450 * @returns VBox status code.
451 * @param pVCpu The cross context virtual CPU structure.
452 * @param ppPdpt Where to return the mapping. This is always set.
453 */
454DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
455{
456 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
457 if (RT_UNLIKELY(!*ppPdpt))
458 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
459 return VINF_SUCCESS;
460}
461
462
463/**
464 * Gets the guest page directory pointer table.
465 *
466 * @returns Pointer to the page directory in question.
467 * @returns NULL if the page directory is not present or on an invalid page.
468 * @param pVCpu The cross context virtual CPU structure.
469 */
470DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
471{
472 PX86PDPT pGuestPdpt;
473 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
474 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
475 return pGuestPdpt;
476}
477
478
479/**
480 * Gets the guest page directory pointer table entry for the specified address.
481 *
482 * @returns Pointer to the page directory in question.
483 * @returns NULL if the page directory is not present or on an invalid page.
484 * @param pVCpu The cross context virtual CPU structure.
485 * @param GCPtr The address.
486 */
487DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
488{
489 AssertGCPtr32(GCPtr);
490
491 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
492 if (RT_UNLIKELY(!pGuestPDPT))
493 {
494 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
495 if (RT_FAILURE(rc))
496 return NULL;
497 }
498 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
499}
500
501
502/**
503 * Gets the page directory entry for the specified address.
504 *
505 * @returns The page directory entry in question.
506 * @returns A non-present entry if the page directory is not present or on an invalid page.
507 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
508 * @param GCPtr The address.
509 */
510DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
511{
512 AssertGCPtr32(GCPtr);
513 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
514 if (RT_LIKELY(pGuestPDPT))
515 {
516 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
517 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
518 {
519 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
520 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
521 if ( !pGuestPD
522 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
523 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
524 if (pGuestPD)
525 return pGuestPD->a[iPD];
526 }
527 }
528
529 X86PDEPAE ZeroPde = {0};
530 return ZeroPde;
531}
532
533
534/**
535 * Gets the page directory pointer table entry for the specified address
536 * and returns the index into the page directory
537 *
538 * @returns Pointer to the page directory in question.
539 * @returns NULL if the page directory is not present or on an invalid page.
540 * @param pVCpu The cross context virtual CPU structure.
541 * @param GCPtr The address.
542 * @param piPD Receives the index into the returned page directory
543 * @param pPdpe Receives the page directory pointer entry. Optional.
544 */
545DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
546{
547 AssertGCPtr32(GCPtr);
548
549 /* The PDPE. */
550 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
551 if (pGuestPDPT)
552 {
553 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
554 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
555 if (pPdpe)
556 pPdpe->u = uPdpe;
557 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
558 {
559
560 /* The PDE. */
561 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
562 if ( !pGuestPD
563 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
564 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
565 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
566 return pGuestPD;
567 }
568 }
569 return NULL;
570}
571
572
573/**
574 * Gets the page map level-4 pointer for the guest.
575 *
576 * @returns VBox status code.
577 * @param pVCpu The cross context virtual CPU structure.
578 * @param ppPml4 Where to return the mapping. Always set.
579 */
580DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
581{
582 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
583 if (RT_UNLIKELY(!*ppPml4))
584 return pgmGstLazyMapPml4(pVCpu, ppPml4);
585 return VINF_SUCCESS;
586}
587
588
589/**
590 * Gets the page map level-4 pointer for the guest.
591 *
592 * @returns Pointer to the PML4 page.
593 * @param pVCpu The cross context virtual CPU structure.
594 */
595DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
596{
597 PX86PML4 pGuestPml4;
598 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
599 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
600 return pGuestPml4;
601}
602
603
604/**
605 * Gets the pointer to a page map level-4 entry.
606 *
607 * @returns Pointer to the PML4 entry.
608 * @param pVCpu The cross context virtual CPU structure.
609 * @param iPml4 The index.
610 * @remarks Only used by AssertCR3.
611 */
612DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
613{
614 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
615 if (pGuestPml4)
616 { /* likely */ }
617 else
618 {
619 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
620 AssertRCReturn(rc, NULL);
621 }
622 return &pGuestPml4->a[iPml4];
623}
624
625
626/**
627 * Gets the page directory entry for the specified address.
628 *
629 * @returns The page directory entry in question.
630 * @returns A non-present entry if the page directory is not present or on an invalid page.
631 * @param pVCpu The cross context virtual CPU structure.
632 * @param GCPtr The address.
633 */
634DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
635{
636 /*
637 * Note! To keep things simple, ASSUME invalid physical addresses will
638 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
639 * supporting 52-bit wide physical guest addresses.
640 */
641 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
642 if (RT_LIKELY(pGuestPml4))
643 {
644 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
645 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
646 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
647 {
648 PCX86PDPT pPdptTemp;
649 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
650 if (RT_SUCCESS(rc))
651 {
652 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
653 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
654 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
655 {
656 PCX86PDPAE pPD;
657 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
658 if (RT_SUCCESS(rc))
659 {
660 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
661 return pPD->a[iPD];
662 }
663 }
664 }
665 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
666 }
667 }
668
669 X86PDEPAE ZeroPde = {0};
670 return ZeroPde;
671}
672
673
674/**
675 * Gets the GUEST page directory pointer for the specified address.
676 *
677 * @returns The page directory in question.
678 * @returns NULL if the page directory is not present or on an invalid page.
679 * @param pVCpu The cross context virtual CPU structure.
680 * @param GCPtr The address.
681 * @param ppPml4e Page Map Level-4 Entry (out)
682 * @param pPdpe Page directory pointer table entry (out)
683 * @param piPD Receives the index into the returned page directory
684 */
685DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
686{
687 /* The PMLE4. */
688 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
689 if (pGuestPml4)
690 {
691 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
692 *ppPml4e = &pGuestPml4->a[iPml4];
693 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
694 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
695 {
696 /* The PDPE. */
697 PCX86PDPT pPdptTemp;
698 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
699 if (RT_SUCCESS(rc))
700 {
701 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
702 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
703 pPdpe->u = uPdpe;
704 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
705 {
706 /* The PDE. */
707 PX86PDPAE pPD;
708 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
709 if (RT_SUCCESS(rc))
710 {
711 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
712 return pPD;
713 }
714 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
715 }
716 }
717 else
718 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
719 }
720 }
721 return NULL;
722}
723
724
725/**
726 * Gets the shadow page directory, 32-bit.
727 *
728 * @returns Pointer to the shadow 32-bit PD.
729 * @param pVCpu The cross context virtual CPU structure.
730 */
731DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
732{
733 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
734}
735
736
737/**
738 * Gets the shadow page directory entry for the specified address, 32-bit.
739 *
740 * @returns Shadow 32-bit PDE.
741 * @param pVCpu The cross context virtual CPU structure.
742 * @param GCPtr The address.
743 */
744DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
745{
746 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
747 if (!pShwPde)
748 {
749 X86PDE ZeroPde = {0};
750 return ZeroPde;
751 }
752 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
753}
754
755
756/**
757 * Gets the pointer to the shadow page directory entry for the specified
758 * address, 32-bit.
759 *
760 * @returns Pointer to the shadow 32-bit PDE.
761 * @param pVCpu The cross context virtual CPU structure.
762 * @param GCPtr The address.
763 */
764DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
765{
766 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
767 AssertReturn(pPde, NULL);
768 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
769}
770
771
772/**
773 * Gets the shadow page pointer table, PAE.
774 *
775 * @returns Pointer to the shadow PAE PDPT.
776 * @param pVCpu The cross context virtual CPU structure.
777 */
778DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
779{
780 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
781}
782
783
784/**
785 * Gets the shadow page directory for the specified address, PAE.
786 *
787 * @returns Pointer to the shadow PD.
788 * @param pVCpu The cross context virtual CPU structure.
789 * @param pPdpt Pointer to the page directory pointer table.
790 * @param GCPtr The address.
791 */
792DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
793{
794 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
795 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
796 {
797 /* Fetch the pgm pool shadow descriptor. */
798 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
799 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
800 AssertReturn(pShwPde, NULL);
801
802 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
803 }
804 return NULL;
805}
806
807
808/**
809 * Gets the shadow page directory for the specified address, PAE.
810 *
811 * @returns Pointer to the shadow PD.
812 * @param pVCpu The cross context virtual CPU structure.
813 * @param GCPtr The address.
814 */
815DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
816{
817 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
818}
819
820
821/**
822 * Gets the shadow page directory entry, PAE.
823 *
824 * @returns PDE.
825 * @param pVCpu The cross context virtual CPU structure.
826 * @param GCPtr The address.
827 */
828DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
829{
830 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
831 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
832 if (pShwPde)
833 return pShwPde->a[iPd];
834
835 X86PDEPAE ZeroPde = {0};
836 return ZeroPde;
837}
838
839
840/**
841 * Gets the pointer to the shadow page directory entry for an address, PAE.
842 *
843 * @returns Pointer to the PDE.
844 * @param pVCpu The cross context virtual CPU structure.
845 * @param GCPtr The address.
846 * @remarks Only used by AssertCR3.
847 */
848DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
849{
850 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
851 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
852 AssertReturn(pShwPde, NULL);
853 return &pShwPde->a[iPd];
854}
855
856
857/**
858 * Gets the shadow page map level-4 pointer.
859 *
860 * @returns Pointer to the shadow PML4.
861 * @param pVCpu The cross context virtual CPU structure.
862 */
863DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
864{
865 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
866}
867
868
869/**
870 * Gets the shadow page map level-4 entry for the specified address.
871 *
872 * @returns The entry.
873 * @param pVCpu The cross context virtual CPU structure.
874 * @param GCPtr The address.
875 */
876DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
877{
878 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
879 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
880 if (pShwPml4)
881 return pShwPml4->a[iPml4];
882
883 X86PML4E ZeroPml4e = {0};
884 return ZeroPml4e;
885}
886
887
888/**
889 * Gets the pointer to the specified shadow page map level-4 entry.
890 *
891 * @returns The entry.
892 * @param pVCpu The cross context virtual CPU structure.
893 * @param iPml4 The PML4 index.
894 */
895DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
896{
897 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
898 if (pShwPml4)
899 return &pShwPml4->a[iPml4];
900 return NULL;
901}
902
903
904/**
905 * Cached physical handler lookup.
906 *
907 * @returns Physical handler covering @a GCPhys.
908 * @param pVM The cross context VM structure.
909 * @param GCPhys The lookup address.
910 */
911DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys)
912{
913 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
914 if ( pHandler
915 && GCPhys >= pHandler->Core.Key
916 && GCPhys < pHandler->Core.KeyLast)
917 {
918 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
919 return pHandler;
920 }
921
922 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
923 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
924 if (pHandler)
925 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
926 return pHandler;
927}
928
929
930/**
931 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
932 *
933 * @returns Pointer to the shadow page structure.
934 * @param pPool The pool.
935 * @param idx The pool page index.
936 */
937DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
938{
939 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
940 return &pPool->aPages[idx];
941}
942
943
944/**
945 * Clear references to guest physical memory.
946 *
947 * @param pPool The pool.
948 * @param pPoolPage The pool page.
949 * @param pPhysPage The physical guest page tracking structure.
950 * @param iPte Shadow PTE index
951 */
952DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
953{
954 /*
955 * Just deal with the simple case here.
956 */
957#ifdef VBOX_STRICT
958 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
959#endif
960#ifdef LOG_ENABLED
961 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
962#endif
963 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
964 if (cRefs == 1)
965 {
966 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
967 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
968 /* Invalidate the tracking data. */
969 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
970 }
971 else
972 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
973 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
974}
975
976
977/**
978 * Moves the page to the head of the age list.
979 *
980 * This is done when the cached page is used in one way or another.
981 *
982 * @param pPool The pool.
983 * @param pPage The cached page.
984 */
985DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
986{
987 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
988
989 /*
990 * Move to the head of the age list.
991 */
992 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
993 {
994 /* unlink */
995 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
996 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
997 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
998 else
999 pPool->iAgeTail = pPage->iAgePrev;
1000
1001 /* insert at head */
1002 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1003 pPage->iAgeNext = pPool->iAgeHead;
1004 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1005 pPool->iAgeHead = pPage->idx;
1006 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1007 }
1008}
1009
1010
1011/**
1012 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1013 *
1014 * @param pPool The pool.
1015 * @param pPage PGM pool page
1016 */
1017DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1018{
1019 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1020 ASMAtomicIncU32(&pPage->cLocked);
1021}
1022
1023
1024/**
1025 * Unlocks a page to allow flushing again
1026 *
1027 * @param pPool The pool.
1028 * @param pPage PGM pool page
1029 */
1030DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1031{
1032 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1033 Assert(pPage->cLocked);
1034 ASMAtomicDecU32(&pPage->cLocked);
1035}
1036
1037
1038/**
1039 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1040 *
1041 * @returns VBox status code.
1042 * @param pPage PGM pool page
1043 */
1044DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1045{
1046 if (pPage->cLocked)
1047 {
1048 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1049 if (pPage->cModifications)
1050 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1051 return true;
1052 }
1053 return false;
1054}
1055
1056
1057/**
1058 * Check if the specified page is dirty (not write monitored)
1059 *
1060 * @return dirty or not
1061 * @param pVM The cross context VM structure.
1062 * @param GCPhys Guest physical address
1063 */
1064DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1065{
1066 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1067 PGM_LOCK_ASSERT_OWNER(pVM);
1068 if (!pPool->cDirtyPages)
1069 return false;
1070 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1071}
1072
1073
1074/**
1075 * Tells if mappings are to be put into the shadow page table or not.
1076 *
1077 * @returns boolean result
1078 * @param pVM The cross context VM structure.
1079 */
1080DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVMCC pVM)
1081{
1082#ifdef PGM_WITHOUT_MAPPINGS
1083 /* Only raw-mode has mappings. */
1084 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1085 return false;
1086#else
1087 Assert(pVM->cCpus == 1 || !VM_IS_RAW_MODE_ENABLED(pVM));
1088 return VM_IS_RAW_MODE_ENABLED(pVM);
1089#endif
1090}
1091
1092
1093/**
1094 * Checks if the mappings are floating and enabled.
1095 *
1096 * @returns true / false.
1097 * @param pVM The cross context VM structure.
1098 */
1099DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVMCC pVM)
1100{
1101#ifdef PGM_WITHOUT_MAPPINGS
1102 /* Only raw-mode has mappings. */
1103 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1104 return false;
1105#else
1106 return !pVM->pgm.s.fMappingsFixed
1107 && pgmMapAreMappingsEnabled(pVM);
1108#endif
1109}
1110
1111/** @} */
1112
1113#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1114
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette