VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 86465

Last change on this file since 86465 was 86465, checked in by vboxsync, 4 years ago

VMM/PGMInline.h: Working on eliminating page table bitfield use. bugref:9841 bugref:9746

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.4 KB
Line 
1/* $Id: PGMInline.h 86465 2020-10-07 10:58:48Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
19#define VMM_INCLUDED_SRC_include_PGMInline_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/err.h>
27#include <VBox/vmm/stam.h>
28#include <VBox/param.h>
29#include <VBox/vmm/vmm.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/pdmcritsect.h>
32#include <VBox/vmm/pdmapi.h>
33#include <VBox/dis.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/log.h>
36#include <VBox/vmm/gmm.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/nem.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @addtogroup grp_pgm_int Internals
48 * @internal
49 * @{
50 */
51
52/**
53 * Gets the PGMRAMRANGE structure for a guest page.
54 *
55 * @returns Pointer to the RAM range on success.
56 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
57 *
58 * @param pVM The cross context VM structure.
59 * @param GCPhys The GC physical address.
60 */
61DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
62{
63 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
64 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
65 return pgmPhysGetRangeSlow(pVM, GCPhys);
66 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
67 return pRam;
68}
69
70
71/**
72 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
73 * range above it.
74 *
75 * @returns Pointer to the RAM range on success.
76 * @returns NULL if the address is located after the last range.
77 *
78 * @param pVM The cross context VM structure.
79 * @param GCPhys The GC physical address.
80 */
81DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
82{
83 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
84 if ( !pRam
85 || (GCPhys - pRam->GCPhys) >= pRam->cb)
86 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
87 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
88 return pRam;
89}
90
91
92/**
93 * Gets the PGMPAGE structure for a guest page.
94 *
95 * @returns Pointer to the page on success.
96 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
97 *
98 * @param pVM The cross context VM structure.
99 * @param GCPhys The GC physical address.
100 */
101DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
102{
103 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
104 RTGCPHYS off;
105 if ( !pRam
106 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
107 return pgmPhysGetPageSlow(pVM, GCPhys);
108 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pVM The cross context VM structure.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
129 RTGCPHYS off;
130 if ( !pRam
131 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
132 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
133 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
134 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
135 return VINF_SUCCESS;
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 * @param ppRamHint Where to read and store the ram list hint.
152 * The caller initializes this to NULL before the call.
153 */
154DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
155{
156 RTGCPHYS off;
157 PPGMRAMRANGE pRam = *ppRamHint;
158 if ( !pRam
159 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
160 {
161 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
162 if ( !pRam
163 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
164 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
165
166 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
167 *ppRamHint = pRam;
168 }
169 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
170 return VINF_SUCCESS;
171}
172
173
174/**
175 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
176 *
177 * @returns Pointer to the page on success.
178 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
179 *
180 * @param pVM The cross context VM structure.
181 * @param GCPhys The GC physical address.
182 * @param ppPage Where to store the pointer to the PGMPAGE structure.
183 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
184 */
185DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
186{
187 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
188 RTGCPHYS off;
189 if ( !pRam
190 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
191 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
192
193 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
194 *ppRam = pRam;
195 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
196 return VINF_SUCCESS;
197}
198
199
200/**
201 * Convert GC Phys to HC Phys.
202 *
203 * @returns VBox status code.
204 * @param pVM The cross context VM structure.
205 * @param GCPhys The GC physical address.
206 * @param pHCPhys Where to store the corresponding HC physical address.
207 *
208 * @deprecated Doesn't deal with zero, shared or write monitored pages.
209 * Avoid when writing new code!
210 */
211DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
212{
213 PPGMPAGE pPage;
214 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
215 if (RT_FAILURE(rc))
216 return rc;
217 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
218 return VINF_SUCCESS;
219}
220
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
222
223/**
224 * Inlined version of the ring-0 version of the host page mapping code
225 * that optimizes access to pages already in the set.
226 *
227 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
228 * @param pVCpu The cross context virtual CPU structure.
229 * @param HCPhys The physical address of the page.
230 * @param ppv Where to store the mapping address.
231 * @param SRC_POS The source location of the caller.
232 */
233DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPUCC pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
234{
235 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
236
237 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
238 Assert(!(HCPhys & PAGE_OFFSET_MASK));
239 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
240
241 unsigned iHash = PGMMAPSET_HASH(HCPhys);
242 unsigned iEntry = pSet->aiHashTable[iHash];
243 if ( iEntry < pSet->cEntries
244 && pSet->aEntries[iEntry].HCPhys == HCPhys
245 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
246 {
247 pSet->aEntries[iEntry].cInlinedRefs++;
248 *ppv = pSet->aEntries[iEntry].pvPage;
249 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
250 }
251 else
252 {
253 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
254 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
255 }
256
257 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
258 return VINF_SUCCESS;
259}
260
261
262/**
263 * Inlined version of the guest page mapping code that optimizes access to pages
264 * already in the set.
265 *
266 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
267 * @param pVM The cross context VM structure.
268 * @param pVCpu The cross context virtual CPU structure.
269 * @param GCPhys The guest physical address of the page.
270 * @param ppv Where to store the mapping address.
271 * @param SRC_POS The source location of the caller.
272 */
273DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
274{
275 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
276 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
277
278 /*
279 * Get the ram range.
280 */
281 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
282 RTGCPHYS off;
283 if ( !pRam
284 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
285 /** @todo || page state stuff */
286 )
287 {
288 /* This case is not counted into StatRZDynMapGCPageInl. */
289 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
290 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
291 }
292
293 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
294 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
295
296 /*
297 * pgmRZDynMapHCPageInlined with out stats.
298 */
299 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
300 Assert(!(HCPhys & PAGE_OFFSET_MASK));
301 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
302
303 unsigned iHash = PGMMAPSET_HASH(HCPhys);
304 unsigned iEntry = pSet->aiHashTable[iHash];
305 if ( iEntry < pSet->cEntries
306 && pSet->aEntries[iEntry].HCPhys == HCPhys
307 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
308 {
309 pSet->aEntries[iEntry].cInlinedRefs++;
310 *ppv = pSet->aEntries[iEntry].pvPage;
311 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
312 }
313 else
314 {
315 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
316 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
317 }
318
319 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
320 return VINF_SUCCESS;
321}
322
323
324/**
325 * Inlined version of the ring-0 version of guest page mapping that optimizes
326 * access to pages already in the set.
327 *
328 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
329 * @param pVCpu The cross context virtual CPU structure.
330 * @param GCPhys The guest physical address of the page.
331 * @param ppv Where to store the mapping address.
332 * @param SRC_POS The source location of the caller.
333 */
334DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
335{
336 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
337}
338
339
340/**
341 * Inlined version of the ring-0 version of the guest byte mapping code
342 * that optimizes access to pages already in the set.
343 *
344 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
345 * @param pVCpu The cross context virtual CPU structure.
346 * @param GCPhys The guest physical address of the page.
347 * @param ppv Where to store the mapping address. The offset is
348 * preserved.
349 * @param SRC_POS The source location of the caller.
350 */
351DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
352{
353 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
354
355 /*
356 * Get the ram range.
357 */
358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
359 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
360 RTGCPHYS off;
361 if ( !pRam
362 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
363 /** @todo || page state stuff */
364 )
365 {
366 /* This case is not counted into StatRZDynMapGCPageInl. */
367 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
368 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
369 }
370
371 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
372 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
373
374 /*
375 * pgmRZDynMapHCPageInlined with out stats.
376 */
377 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
378 Assert(!(HCPhys & PAGE_OFFSET_MASK));
379 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
380
381 unsigned iHash = PGMMAPSET_HASH(HCPhys);
382 unsigned iEntry = pSet->aiHashTable[iHash];
383 if ( iEntry < pSet->cEntries
384 && pSet->aEntries[iEntry].HCPhys == HCPhys
385 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
386 {
387 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
388 pSet->aEntries[iEntry].cInlinedRefs++;
389 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
390 }
391 else
392 {
393 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
394 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
395 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
396 }
397
398 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Maps the page into current context (RC and maybe R0).
405 *
406 * @returns pointer to the mapping.
407 * @param pVM The cross context VM structure.
408 * @param pPage The page.
409 * @param SRC_POS The source location of the caller.
410 */
411DECLINLINE(void *) pgmPoolMapPageInlined(PVMCC pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
412{
413 if (pPage->idx >= PGMPOOL_IDX_FIRST)
414 {
415 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
416 void *pv;
417 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
418 return pv;
419 }
420 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
421}
422
423
424/**
425 * Maps the page into current context (RC and maybe R0).
426 *
427 * @returns pointer to the mapping.
428 * @param pVM The cross context VM structure.
429 * @param pVCpu The cross context virtual CPU structure.
430 * @param pPage The page.
431 * @param SRC_POS The source location of the caller.
432 */
433DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
434{
435 if (pPage->idx >= PGMPOOL_IDX_FIRST)
436 {
437 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
438 void *pv;
439 Assert(pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
440 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
441 return pv;
442 }
443 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
444}
445
446#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
447
448/**
449 * Queries the Physical TLB entry for a physical guest page,
450 * attempting to load the TLB entry if necessary.
451 *
452 * @returns VBox status code.
453 * @retval VINF_SUCCESS on success
454 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
455 *
456 * @param pVM The cross context VM structure.
457 * @param GCPhys The address of the guest page.
458 * @param ppTlbe Where to store the pointer to the TLB entry.
459 */
460DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
461{
462 int rc;
463 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
464 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
465 {
466 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
467 rc = VINF_SUCCESS;
468 }
469 else
470 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
471 *ppTlbe = pTlbe;
472 return rc;
473}
474
475
476/**
477 * Queries the Physical TLB entry for a physical guest page,
478 * attempting to load the TLB entry if necessary.
479 *
480 * @returns VBox status code.
481 * @retval VINF_SUCCESS on success
482 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
483 *
484 * @param pVM The cross context VM structure.
485 * @param pPage Pointer to the PGMPAGE structure corresponding to
486 * GCPhys.
487 * @param GCPhys The address of the guest page.
488 * @param ppTlbe Where to store the pointer to the TLB entry.
489 */
490DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
491{
492 int rc;
493 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
494 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
495 {
496 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
497 rc = VINF_SUCCESS;
498#if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
499# ifdef IN_RING3
500 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)
501# else
502 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)
503# endif
504 pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);
505#endif
506 AssertPtr(pTlbe->pv);
507#if defined(IN_RING3) || (!defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_RAM_IN_KERNEL))
508 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
509#endif
510 }
511 else
512 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
513 *ppTlbe = pTlbe;
514 return rc;
515}
516
517
518/**
519 * Calculates NEM page protection flags.
520 */
521DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
522{
523 /*
524 * Deal with potentially writable pages first.
525 */
526 if (PGMPAGETYPE_IS_RWX(enmType))
527 {
528 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
529 {
530 if (PGM_PAGE_IS_ALLOCATED(pPage))
531 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
532 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
533 }
534 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
535 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
536 }
537 /*
538 * Potentially readable & executable pages.
539 */
540 else if ( PGMPAGETYPE_IS_ROX(enmType)
541 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
542 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
543
544 /*
545 * The rest is needs special access handling.
546 */
547 return NEM_PAGE_PROT_NONE;
548}
549
550
551/**
552 * Enables write monitoring for an allocated page.
553 *
554 * The caller is responsible for updating the shadow page tables.
555 *
556 * @param pVM The cross context VM structure.
557 * @param pPage The page to write monitor.
558 * @param GCPhysPage The address of the page.
559 */
560DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
561{
562 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
563 PGM_LOCK_ASSERT_OWNER(pVM);
564
565 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
566 pVM->pgm.s.cMonitoredPages++;
567
568 /* Large pages must disabled. */
569 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
570 {
571 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
572 AssertFatal(pFirstPage);
573 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
574 {
575 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
576 pVM->pgm.s.cLargePagesDisabled++;
577 }
578 else
579 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
580 }
581
582 /* Tell NEM. */
583 if (VM_IS_NEM_ENABLED(pVM))
584 {
585 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
586 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
587 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
588 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
589 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
590 }
591}
592
593
594/**
595 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
596 *
597 * Only used when the guest is in PAE or long mode. This is inlined so that we
598 * can perform consistency checks in debug builds.
599 *
600 * @returns true if it is, false if it isn't.
601 * @param pVCpu The cross context virtual CPU structure.
602 */
603DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
604{
605 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
606 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
607 return pVCpu->pgm.s.fNoExecuteEnabled;
608}
609
610
611/**
612 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
613 *
614 * Only used when the guest is in paged 32-bit mode. This is inlined so that
615 * we can perform consistency checks in debug builds.
616 *
617 * @returns true if it is, false if it isn't.
618 * @param pVCpu The cross context virtual CPU structure.
619 */
620DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
621{
622 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
623 Assert(!CPUMIsGuestInPAEMode(pVCpu));
624 Assert(!CPUMIsGuestInLongMode(pVCpu));
625 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
626}
627
628
629/**
630 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
631 * Takes PSE-36 into account.
632 *
633 * @returns guest physical address
634 * @param pVM The cross context VM structure.
635 * @param Pde Guest Pde
636 */
637DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
638{
639 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
640 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
641
642 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
643}
644
645
646/**
647 * Gets the address the guest page directory (32-bit paging).
648 *
649 * @returns VBox status code.
650 * @param pVCpu The cross context virtual CPU structure.
651 * @param ppPd Where to return the mapping. This is always set.
652 */
653DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
654{
655#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
656 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
657 if (RT_FAILURE(rc))
658 {
659 *ppPd = NULL;
660 return rc;
661 }
662#else
663 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
664 if (RT_UNLIKELY(!*ppPd))
665 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
666#endif
667 return VINF_SUCCESS;
668}
669
670
671/**
672 * Gets the address the guest page directory (32-bit paging).
673 *
674 * @returns Pointer to the page directory entry in question.
675 * @param pVCpu The cross context virtual CPU structure.
676 */
677DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
678{
679#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
680 PX86PD pGuestPD = NULL;
681 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
682 if (RT_FAILURE(rc))
683 {
684 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
685 return NULL;
686 }
687#else
688 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
689 if (RT_UNLIKELY(!pGuestPD))
690 {
691 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
692 if (RT_FAILURE(rc))
693 return NULL;
694 }
695#endif
696 return pGuestPD;
697}
698
699
700/**
701 * Gets the guest page directory pointer table.
702 *
703 * @returns VBox status code.
704 * @param pVCpu The cross context virtual CPU structure.
705 * @param ppPdpt Where to return the mapping. This is always set.
706 */
707DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
708{
709#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
710 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
711 if (RT_FAILURE(rc))
712 {
713 *ppPdpt = NULL;
714 return rc;
715 }
716#else
717 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
718 if (RT_UNLIKELY(!*ppPdpt))
719 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
720#endif
721 return VINF_SUCCESS;
722}
723
724
725/**
726 * Gets the guest page directory pointer table.
727 *
728 * @returns Pointer to the page directory in question.
729 * @returns NULL if the page directory is not present or on an invalid page.
730 * @param pVCpu The cross context virtual CPU structure.
731 */
732DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
733{
734 PX86PDPT pGuestPdpt;
735 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
736 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
737 return pGuestPdpt;
738}
739
740
741/**
742 * Gets the guest page directory pointer table entry for the specified address.
743 *
744 * @returns Pointer to the page directory in question.
745 * @returns NULL if the page directory is not present or on an invalid page.
746 * @param pVCpu The cross context virtual CPU structure.
747 * @param GCPtr The address.
748 */
749DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
750{
751 AssertGCPtr32(GCPtr);
752
753#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
754 PX86PDPT pGuestPDPT = NULL;
755 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
756 AssertRCReturn(rc, NULL);
757#else
758 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
759 if (RT_UNLIKELY(!pGuestPDPT))
760 {
761 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
762 if (RT_FAILURE(rc))
763 return NULL;
764 }
765#endif
766 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
767}
768
769
770/**
771 * Gets the page directory entry for the specified address.
772 *
773 * @returns The page directory entry in question.
774 * @returns A non-present entry if the page directory is not present or on an invalid page.
775 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
776 * @param GCPtr The address.
777 */
778DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
779{
780 AssertGCPtr32(GCPtr);
781 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
782 if (RT_LIKELY(pGuestPDPT))
783 {
784 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
785 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
786 {
787 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
788#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
789 PX86PDPAE pGuestPD = NULL;
790 int rc = pgmRZDynMapGCPageInlined(pVCpu,
791 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
792 (void **)&pGuestPD
793 RTLOG_COMMA_SRC_POS);
794 if (RT_SUCCESS(rc))
795 return pGuestPD->a[iPD];
796 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
797#else
798 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
799 if ( !pGuestPD
800 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
801 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
802 if (pGuestPD)
803 return pGuestPD->a[iPD];
804#endif
805 }
806 }
807
808 X86PDEPAE ZeroPde = {0};
809 return ZeroPde;
810}
811
812
813/**
814 * Gets the page directory pointer table entry for the specified address
815 * and returns the index into the page directory
816 *
817 * @returns Pointer to the page directory in question.
818 * @returns NULL if the page directory is not present or on an invalid page.
819 * @param pVCpu The cross context virtual CPU structure.
820 * @param GCPtr The address.
821 * @param piPD Receives the index into the returned page directory
822 * @param pPdpe Receives the page directory pointer entry. Optional.
823 */
824DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
825{
826 AssertGCPtr32(GCPtr);
827
828 /* The PDPE. */
829 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
830 if (pGuestPDPT)
831 {
832 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
833 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
834 if (pPdpe)
835 pPdpe->u = uPdpe;
836 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
837 {
838
839 /* The PDE. */
840#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
841 PX86PDPAE pGuestPD = NULL;
842 int rc = pgmRZDynMapGCPageInlined(pVCpu,
843 uPdpe & X86_PDPE_PG_MASK,
844 (void **)&pGuestPD
845 RTLOG_COMMA_SRC_POS);
846 if (RT_FAILURE(rc))
847 {
848 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
849 return NULL;
850 }
851#else
852 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
853 if ( !pGuestPD
854 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
855 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
856#endif
857 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
858 return pGuestPD;
859 }
860 }
861 return NULL;
862}
863
864
865/**
866 * Gets the page map level-4 pointer for the guest.
867 *
868 * @returns VBox status code.
869 * @param pVCpu The cross context virtual CPU structure.
870 * @param ppPml4 Where to return the mapping. Always set.
871 */
872DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
873{
874#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
875 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
876 if (RT_FAILURE(rc))
877 {
878 *ppPml4 = NULL;
879 return rc;
880 }
881#else
882 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
883 if (RT_UNLIKELY(!*ppPml4))
884 return pgmGstLazyMapPml4(pVCpu, ppPml4);
885#endif
886 return VINF_SUCCESS;
887}
888
889
890/**
891 * Gets the page map level-4 pointer for the guest.
892 *
893 * @returns Pointer to the PML4 page.
894 * @param pVCpu The cross context virtual CPU structure.
895 */
896DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
897{
898 PX86PML4 pGuestPml4;
899 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
900 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
901 return pGuestPml4;
902}
903
904
905/**
906 * Gets the pointer to a page map level-4 entry.
907 *
908 * @returns Pointer to the PML4 entry.
909 * @param pVCpu The cross context virtual CPU structure.
910 * @param iPml4 The index.
911 * @remarks Only used by AssertCR3.
912 */
913DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
914{
915#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
916 PX86PML4 pGuestPml4;
917 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
918 AssertRCReturn(rc, NULL);
919#else
920 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
921 if (pGuestPml4)
922 { /* likely */ }
923 else
924 {
925 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
926 AssertRCReturn(rc, NULL);
927 }
928#endif
929 return &pGuestPml4->a[iPml4];
930}
931
932
933/**
934 * Gets the page directory entry for the specified address.
935 *
936 * @returns The page directory entry in question.
937 * @returns A non-present entry if the page directory is not present or on an invalid page.
938 * @param pVCpu The cross context virtual CPU structure.
939 * @param GCPtr The address.
940 */
941DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
942{
943 /*
944 * Note! To keep things simple, ASSUME invalid physical addresses will
945 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
946 * supporting 52-bit wide physical guest addresses.
947 */
948 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
949 if (RT_LIKELY(pGuestPml4))
950 {
951 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
952 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
953 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
954 {
955 PCX86PDPT pPdptTemp;
956 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
957 if (RT_SUCCESS(rc))
958 {
959 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
960 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
961 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
962 {
963 PCX86PDPAE pPD;
964 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
965 if (RT_SUCCESS(rc))
966 {
967 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
968 return pPD->a[iPD];
969 }
970 }
971 }
972 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
973 }
974 }
975
976 X86PDEPAE ZeroPde = {0};
977 return ZeroPde;
978}
979
980
981/**
982 * Gets the GUEST page directory pointer for the specified address.
983 *
984 * @returns The page directory in question.
985 * @returns NULL if the page directory is not present or on an invalid page.
986 * @param pVCpu The cross context virtual CPU structure.
987 * @param GCPtr The address.
988 * @param ppPml4e Page Map Level-4 Entry (out)
989 * @param pPdpe Page directory pointer table entry (out)
990 * @param piPD Receives the index into the returned page directory
991 */
992DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
993{
994 /* The PMLE4. */
995 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
996 if (pGuestPml4)
997 {
998 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
999 *ppPml4e = &pGuestPml4->a[iPml4];
1000 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
1001 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
1002 {
1003 /* The PDPE. */
1004 PCX86PDPT pPdptTemp;
1005 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
1006 if (RT_SUCCESS(rc))
1007 {
1008 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1009 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
1010 pPdpe->u = uPdpe;
1011 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
1012 {
1013 /* The PDE. */
1014 PX86PDPAE pPD;
1015 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
1016 if (RT_SUCCESS(rc))
1017 {
1018 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1019 return pPD;
1020 }
1021 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1022 }
1023 }
1024 else
1025 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1026 }
1027 }
1028 return NULL;
1029}
1030
1031
1032/**
1033 * Gets the shadow page directory, 32-bit.
1034 *
1035 * @returns Pointer to the shadow 32-bit PD.
1036 * @param pVCpu The cross context virtual CPU structure.
1037 */
1038DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
1039{
1040 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1041}
1042
1043
1044/**
1045 * Gets the shadow page directory entry for the specified address, 32-bit.
1046 *
1047 * @returns Shadow 32-bit PDE.
1048 * @param pVCpu The cross context virtual CPU structure.
1049 * @param GCPtr The address.
1050 */
1051DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1052{
1053 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
1054 if (!pShwPde)
1055 {
1056 X86PDE ZeroPde = {0};
1057 return ZeroPde;
1058 }
1059 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1060}
1061
1062
1063/**
1064 * Gets the pointer to the shadow page directory entry for the specified
1065 * address, 32-bit.
1066 *
1067 * @returns Pointer to the shadow 32-bit PDE.
1068 * @param pVCpu The cross context virtual CPU structure.
1069 * @param GCPtr The address.
1070 */
1071DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1072{
1073 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1074 AssertReturn(pPde, NULL);
1075 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1076}
1077
1078
1079/**
1080 * Gets the shadow page pointer table, PAE.
1081 *
1082 * @returns Pointer to the shadow PAE PDPT.
1083 * @param pVCpu The cross context virtual CPU structure.
1084 */
1085DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
1086{
1087 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1088}
1089
1090
1091/**
1092 * Gets the shadow page directory for the specified address, PAE.
1093 *
1094 * @returns Pointer to the shadow PD.
1095 * @param pVCpu The cross context virtual CPU structure.
1096 * @param pPdpt Pointer to the page directory pointer table.
1097 * @param GCPtr The address.
1098 */
1099DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1100{
1101 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1102 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
1103 {
1104 /* Fetch the pgm pool shadow descriptor. */
1105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1106 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1107 AssertReturn(pShwPde, NULL);
1108
1109 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1110 }
1111 return NULL;
1112}
1113
1114
1115/**
1116 * Gets the shadow page directory for the specified address, PAE.
1117 *
1118 * @returns Pointer to the shadow PD.
1119 * @param pVCpu The cross context virtual CPU structure.
1120 * @param GCPtr The address.
1121 */
1122DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1123{
1124 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
1125}
1126
1127
1128/**
1129 * Gets the shadow page directory entry, PAE.
1130 *
1131 * @returns PDE.
1132 * @param pVCpu The cross context virtual CPU structure.
1133 * @param GCPtr The address.
1134 */
1135DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1136{
1137 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1138 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1139 if (pShwPde)
1140 return pShwPde->a[iPd];
1141
1142 X86PDEPAE ZeroPde = {0};
1143 return ZeroPde;
1144}
1145
1146
1147/**
1148 * Gets the pointer to the shadow page directory entry for an address, PAE.
1149 *
1150 * @returns Pointer to the PDE.
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param GCPtr The address.
1153 * @remarks Only used by AssertCR3.
1154 */
1155DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1156{
1157 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1158 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1159 AssertReturn(pShwPde, NULL);
1160 return &pShwPde->a[iPd];
1161}
1162
1163
1164/**
1165 * Gets the shadow page map level-4 pointer.
1166 *
1167 * @returns Pointer to the shadow PML4.
1168 * @param pVCpu The cross context virtual CPU structure.
1169 */
1170DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
1171{
1172 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1173}
1174
1175
1176/**
1177 * Gets the shadow page map level-4 entry for the specified address.
1178 *
1179 * @returns The entry.
1180 * @param pVCpu The cross context virtual CPU structure.
1181 * @param GCPtr The address.
1182 */
1183DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1184{
1185 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1186 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1187 if (pShwPml4)
1188 return pShwPml4->a[iPml4];
1189
1190 X86PML4E ZeroPml4e = {0};
1191 return ZeroPml4e;
1192}
1193
1194
1195/**
1196 * Gets the pointer to the specified shadow page map level-4 entry.
1197 *
1198 * @returns The entry.
1199 * @param pVCpu The cross context virtual CPU structure.
1200 * @param iPml4 The PML4 index.
1201 */
1202DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
1203{
1204 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1205 if (pShwPml4)
1206 return &pShwPml4->a[iPml4];
1207 return NULL;
1208}
1209
1210
1211/**
1212 * Cached physical handler lookup.
1213 *
1214 * @returns Physical handler covering @a GCPhys.
1215 * @param pVM The cross context VM structure.
1216 * @param GCPhys The lookup address.
1217 */
1218DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys)
1219{
1220 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1221 if ( pHandler
1222 && GCPhys >= pHandler->Core.Key
1223 && GCPhys < pHandler->Core.KeyLast)
1224 {
1225 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1226 return pHandler;
1227 }
1228
1229 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1230 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1231 if (pHandler)
1232 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1233 return pHandler;
1234}
1235
1236
1237/**
1238 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1239 *
1240 * @returns Pointer to the shadow page structure.
1241 * @param pPool The pool.
1242 * @param idx The pool page index.
1243 */
1244DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1245{
1246 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1247 return &pPool->aPages[idx];
1248}
1249
1250
1251/**
1252 * Clear references to guest physical memory.
1253 *
1254 * @param pPool The pool.
1255 * @param pPoolPage The pool page.
1256 * @param pPhysPage The physical guest page tracking structure.
1257 * @param iPte Shadow PTE index
1258 */
1259DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1260{
1261 /*
1262 * Just deal with the simple case here.
1263 */
1264#ifdef VBOX_STRICT
1265 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1266#endif
1267#ifdef LOG_ENABLED
1268 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1269#endif
1270 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1271 if (cRefs == 1)
1272 {
1273 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1274 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1275 /* Invalidate the tracking data. */
1276 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1277 }
1278 else
1279 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1280 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1281}
1282
1283
1284/**
1285 * Moves the page to the head of the age list.
1286 *
1287 * This is done when the cached page is used in one way or another.
1288 *
1289 * @param pPool The pool.
1290 * @param pPage The cached page.
1291 */
1292DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1293{
1294 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1295
1296 /*
1297 * Move to the head of the age list.
1298 */
1299 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1300 {
1301 /* unlink */
1302 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1303 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1304 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1305 else
1306 pPool->iAgeTail = pPage->iAgePrev;
1307
1308 /* insert at head */
1309 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1310 pPage->iAgeNext = pPool->iAgeHead;
1311 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1312 pPool->iAgeHead = pPage->idx;
1313 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1314 }
1315}
1316
1317
1318/**
1319 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1320 *
1321 * @param pPool The pool.
1322 * @param pPage PGM pool page
1323 */
1324DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1325{
1326 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1327 ASMAtomicIncU32(&pPage->cLocked);
1328}
1329
1330
1331/**
1332 * Unlocks a page to allow flushing again
1333 *
1334 * @param pPool The pool.
1335 * @param pPage PGM pool page
1336 */
1337DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1338{
1339 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1340 Assert(pPage->cLocked);
1341 ASMAtomicDecU32(&pPage->cLocked);
1342}
1343
1344
1345/**
1346 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1347 *
1348 * @returns VBox status code.
1349 * @param pPage PGM pool page
1350 */
1351DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1352{
1353 if (pPage->cLocked)
1354 {
1355 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1356 if (pPage->cModifications)
1357 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1358 return true;
1359 }
1360 return false;
1361}
1362
1363
1364/**
1365 * Check if the specified page is dirty (not write monitored)
1366 *
1367 * @return dirty or not
1368 * @param pVM The cross context VM structure.
1369 * @param GCPhys Guest physical address
1370 */
1371DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1372{
1373 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1374 PGM_LOCK_ASSERT_OWNER(pVM);
1375 if (!pPool->cDirtyPages)
1376 return false;
1377 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1378}
1379
1380
1381/**
1382 * Tells if mappings are to be put into the shadow page table or not.
1383 *
1384 * @returns boolean result
1385 * @param pVM The cross context VM structure.
1386 */
1387DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVMCC pVM)
1388{
1389#ifdef PGM_WITHOUT_MAPPINGS
1390 /* Only raw-mode has mappings. */
1391 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1392 return false;
1393#else
1394 Assert(pVM->cCpus == 1 || !VM_IS_RAW_MODE_ENABLED(pVM));
1395 return VM_IS_RAW_MODE_ENABLED(pVM);
1396#endif
1397}
1398
1399
1400/**
1401 * Checks if the mappings are floating and enabled.
1402 *
1403 * @returns true / false.
1404 * @param pVM The cross context VM structure.
1405 */
1406DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVMCC pVM)
1407{
1408#ifdef PGM_WITHOUT_MAPPINGS
1409 /* Only raw-mode has mappings. */
1410 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1411 return false;
1412#else
1413 return !pVM->pgm.s.fMappingsFixed
1414 && pgmMapAreMappingsEnabled(pVM);
1415#endif
1416}
1417
1418/** @} */
1419
1420#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1421
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette