VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 82968

Last change on this file since 82968 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.4 KB
Line 
1/* $Id: PGMInline.h 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
19#define VMM_INCLUDED_SRC_include_PGMInline_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/err.h>
27#include <VBox/vmm/stam.h>
28#include <VBox/param.h>
29#include <VBox/vmm/vmm.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/pdmcritsect.h>
32#include <VBox/vmm/pdmapi.h>
33#include <VBox/dis.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/log.h>
36#include <VBox/vmm/gmm.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/nem.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @addtogroup grp_pgm_int Internals
48 * @internal
49 * @{
50 */
51
52/**
53 * Gets the PGMRAMRANGE structure for a guest page.
54 *
55 * @returns Pointer to the RAM range on success.
56 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
57 *
58 * @param pVM The cross context VM structure.
59 * @param GCPhys The GC physical address.
60 */
61DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
62{
63 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
64 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
65 return pgmPhysGetRangeSlow(pVM, GCPhys);
66 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
67 return pRam;
68}
69
70
71/**
72 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
73 * range above it.
74 *
75 * @returns Pointer to the RAM range on success.
76 * @returns NULL if the address is located after the last range.
77 *
78 * @param pVM The cross context VM structure.
79 * @param GCPhys The GC physical address.
80 */
81DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
82{
83 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
84 if ( !pRam
85 || (GCPhys - pRam->GCPhys) >= pRam->cb)
86 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
87 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
88 return pRam;
89}
90
91
92/**
93 * Gets the PGMPAGE structure for a guest page.
94 *
95 * @returns Pointer to the page on success.
96 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
97 *
98 * @param pVM The cross context VM structure.
99 * @param GCPhys The GC physical address.
100 */
101DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
102{
103 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
104 RTGCPHYS off;
105 if ( !pRam
106 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
107 return pgmPhysGetPageSlow(pVM, GCPhys);
108 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pVM The cross context VM structure.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
129 RTGCPHYS off;
130 if ( !pRam
131 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
132 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
133 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
134 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
135 return VINF_SUCCESS;
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 * @param ppRamHint Where to read and store the ram list hint.
152 * The caller initializes this to NULL before the call.
153 */
154DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
155{
156 RTGCPHYS off;
157 PPGMRAMRANGE pRam = *ppRamHint;
158 if ( !pRam
159 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
160 {
161 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
162 if ( !pRam
163 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
164 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
165
166 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
167 *ppRamHint = pRam;
168 }
169 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
170 return VINF_SUCCESS;
171}
172
173
174/**
175 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
176 *
177 * @returns Pointer to the page on success.
178 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
179 *
180 * @param pVM The cross context VM structure.
181 * @param GCPhys The GC physical address.
182 * @param ppPage Where to store the pointer to the PGMPAGE structure.
183 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
184 */
185DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
186{
187 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
188 RTGCPHYS off;
189 if ( !pRam
190 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
191 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
192
193 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
194 *ppRam = pRam;
195 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
196 return VINF_SUCCESS;
197}
198
199
200/**
201 * Convert GC Phys to HC Phys.
202 *
203 * @returns VBox status code.
204 * @param pVM The cross context VM structure.
205 * @param GCPhys The GC physical address.
206 * @param pHCPhys Where to store the corresponding HC physical address.
207 *
208 * @deprecated Doesn't deal with zero, shared or write monitored pages.
209 * Avoid when writing new code!
210 */
211DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
212{
213 PPGMPAGE pPage;
214 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
215 if (RT_FAILURE(rc))
216 return rc;
217 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
218 return VINF_SUCCESS;
219}
220
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
222
223/**
224 * Inlined version of the ring-0 version of the host page mapping code
225 * that optimizes access to pages already in the set.
226 *
227 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
228 * @param pVCpu The cross context virtual CPU structure.
229 * @param HCPhys The physical address of the page.
230 * @param ppv Where to store the mapping address.
231 * @param SRC_POS The source location of the caller.
232 */
233DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPUCC pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
234{
235 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
236
237 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
238 Assert(!(HCPhys & PAGE_OFFSET_MASK));
239 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
240
241 unsigned iHash = PGMMAPSET_HASH(HCPhys);
242 unsigned iEntry = pSet->aiHashTable[iHash];
243 if ( iEntry < pSet->cEntries
244 && pSet->aEntries[iEntry].HCPhys == HCPhys
245 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
246 {
247 pSet->aEntries[iEntry].cInlinedRefs++;
248 *ppv = pSet->aEntries[iEntry].pvPage;
249 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
250 }
251 else
252 {
253 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
254 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
255 }
256
257 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
258 return VINF_SUCCESS;
259}
260
261
262/**
263 * Inlined version of the guest page mapping code that optimizes access to pages
264 * already in the set.
265 *
266 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
267 * @param pVM The cross context VM structure.
268 * @param pVCpu The cross context virtual CPU structure.
269 * @param GCPhys The guest physical address of the page.
270 * @param ppv Where to store the mapping address.
271 * @param SRC_POS The source location of the caller.
272 */
273DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
274{
275 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
276 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
277
278 /*
279 * Get the ram range.
280 */
281 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
282 RTGCPHYS off;
283 if ( !pRam
284 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
285 /** @todo || page state stuff */
286 )
287 {
288 /* This case is not counted into StatRZDynMapGCPageInl. */
289 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
290 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
291 }
292
293 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
294 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
295
296 /*
297 * pgmRZDynMapHCPageInlined with out stats.
298 */
299 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
300 Assert(!(HCPhys & PAGE_OFFSET_MASK));
301 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
302
303 unsigned iHash = PGMMAPSET_HASH(HCPhys);
304 unsigned iEntry = pSet->aiHashTable[iHash];
305 if ( iEntry < pSet->cEntries
306 && pSet->aEntries[iEntry].HCPhys == HCPhys
307 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
308 {
309 pSet->aEntries[iEntry].cInlinedRefs++;
310 *ppv = pSet->aEntries[iEntry].pvPage;
311 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
312 }
313 else
314 {
315 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
316 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
317 }
318
319 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
320 return VINF_SUCCESS;
321}
322
323
324/**
325 * Inlined version of the ring-0 version of guest page mapping that optimizes
326 * access to pages already in the set.
327 *
328 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
329 * @param pVCpu The cross context virtual CPU structure.
330 * @param GCPhys The guest physical address of the page.
331 * @param ppv Where to store the mapping address.
332 * @param SRC_POS The source location of the caller.
333 */
334DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
335{
336 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
337}
338
339
340/**
341 * Inlined version of the ring-0 version of the guest byte mapping code
342 * that optimizes access to pages already in the set.
343 *
344 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
345 * @param pVCpu The cross context virtual CPU structure.
346 * @param GCPhys The guest physical address of the page.
347 * @param ppv Where to store the mapping address. The offset is
348 * preserved.
349 * @param SRC_POS The source location of the caller.
350 */
351DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
352{
353 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
354
355 /*
356 * Get the ram range.
357 */
358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
359 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
360 RTGCPHYS off;
361 if ( !pRam
362 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
363 /** @todo || page state stuff */
364 )
365 {
366 /* This case is not counted into StatRZDynMapGCPageInl. */
367 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
368 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
369 }
370
371 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
372 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
373
374 /*
375 * pgmRZDynMapHCPageInlined with out stats.
376 */
377 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
378 Assert(!(HCPhys & PAGE_OFFSET_MASK));
379 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
380
381 unsigned iHash = PGMMAPSET_HASH(HCPhys);
382 unsigned iEntry = pSet->aiHashTable[iHash];
383 if ( iEntry < pSet->cEntries
384 && pSet->aEntries[iEntry].HCPhys == HCPhys
385 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
386 {
387 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
388 pSet->aEntries[iEntry].cInlinedRefs++;
389 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
390 }
391 else
392 {
393 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
394 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
395 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
396 }
397
398 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Maps the page into current context (RC and maybe R0).
405 *
406 * @returns pointer to the mapping.
407 * @param pVM The cross context VM structure.
408 * @param pPage The page.
409 * @param SRC_POS The source location of the caller.
410 */
411DECLINLINE(void *) pgmPoolMapPageInlined(PVMCC pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
412{
413 if (pPage->idx >= PGMPOOL_IDX_FIRST)
414 {
415 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
416 void *pv;
417 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
418 return pv;
419 }
420 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
421}
422
423
424/**
425 * Maps the page into current context (RC and maybe R0).
426 *
427 * @returns pointer to the mapping.
428 * @param pVM The cross context VM structure.
429 * @param pVCpu The cross context virtual CPU structure.
430 * @param pPage The page.
431 * @param SRC_POS The source location of the caller.
432 */
433DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
434{
435 if (pPage->idx >= PGMPOOL_IDX_FIRST)
436 {
437 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
438 void *pv;
439 Assert(pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
440 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
441 return pv;
442 }
443 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
444}
445
446#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
447
448/**
449 * Queries the Physical TLB entry for a physical guest page,
450 * attempting to load the TLB entry if necessary.
451 *
452 * @returns VBox status code.
453 * @retval VINF_SUCCESS on success
454 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
455 *
456 * @param pVM The cross context VM structure.
457 * @param GCPhys The address of the guest page.
458 * @param ppTlbe Where to store the pointer to the TLB entry.
459 */
460DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
461{
462 int rc;
463 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
464 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
465 {
466 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
467 rc = VINF_SUCCESS;
468 }
469 else
470 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
471 *ppTlbe = pTlbe;
472 return rc;
473}
474
475
476/**
477 * Queries the Physical TLB entry for a physical guest page,
478 * attempting to load the TLB entry if necessary.
479 *
480 * @returns VBox status code.
481 * @retval VINF_SUCCESS on success
482 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
483 *
484 * @param pVM The cross context VM structure.
485 * @param pPage Pointer to the PGMPAGE structure corresponding to
486 * GCPhys.
487 * @param GCPhys The address of the guest page.
488 * @param ppTlbe Where to store the pointer to the TLB entry.
489 */
490DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
491{
492 int rc;
493 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
494 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
495 {
496 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
497 rc = VINF_SUCCESS;
498#if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
499# ifdef IN_RING3
500 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)
501# else
502 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)
503# endif
504 pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);
505#endif
506 AssertPtr(pTlbe->pv);
507#if defined(IN_RING3) || (!defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_RAM_IN_KERNEL))
508 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
509#endif
510 }
511 else
512 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
513 *ppTlbe = pTlbe;
514 return rc;
515}
516
517
518/**
519 * Calculates NEM page protection flags.
520 */
521DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
522{
523 /*
524 * Deal with potentially writable pages first.
525 */
526 if (PGMPAGETYPE_IS_RWX(enmType))
527 {
528 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
529 {
530 if (PGM_PAGE_IS_ALLOCATED(pPage))
531 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
532 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
533 }
534 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
535 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
536 }
537 /*
538 * Potentially readable & executable pages.
539 */
540 else if ( PGMPAGETYPE_IS_ROX(enmType)
541 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
542 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
543
544 /*
545 * The rest is needs special access handling.
546 */
547 return NEM_PAGE_PROT_NONE;
548}
549
550
551/**
552 * Enables write monitoring for an allocated page.
553 *
554 * The caller is responsible for updating the shadow page tables.
555 *
556 * @param pVM The cross context VM structure.
557 * @param pPage The page to write monitor.
558 * @param GCPhysPage The address of the page.
559 */
560DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
561{
562 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
563 PGM_LOCK_ASSERT_OWNER(pVM);
564
565 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
566 pVM->pgm.s.cMonitoredPages++;
567
568 /* Large pages must disabled. */
569 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
570 {
571 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
572 AssertFatal(pFirstPage);
573 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
574 {
575 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
576 pVM->pgm.s.cLargePagesDisabled++;
577 }
578 else
579 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
580 }
581
582 /* Tell NEM. */
583 if (VM_IS_NEM_ENABLED(pVM))
584 {
585 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
586 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
587 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
588 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
589 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
590 }
591}
592
593
594/**
595 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
596 *
597 * Only used when the guest is in PAE or long mode. This is inlined so that we
598 * can perform consistency checks in debug builds.
599 *
600 * @returns true if it is, false if it isn't.
601 * @param pVCpu The cross context virtual CPU structure.
602 */
603DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
604{
605 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
606 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
607 return pVCpu->pgm.s.fNoExecuteEnabled;
608}
609
610
611/**
612 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
613 *
614 * Only used when the guest is in paged 32-bit mode. This is inlined so that
615 * we can perform consistency checks in debug builds.
616 *
617 * @returns true if it is, false if it isn't.
618 * @param pVCpu The cross context virtual CPU structure.
619 */
620DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
621{
622 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
623 Assert(!CPUMIsGuestInPAEMode(pVCpu));
624 Assert(!CPUMIsGuestInLongMode(pVCpu));
625 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
626}
627
628
629/**
630 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
631 * Takes PSE-36 into account.
632 *
633 * @returns guest physical address
634 * @param pVM The cross context VM structure.
635 * @param Pde Guest Pde
636 */
637DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
638{
639 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
640 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
641
642 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
643}
644
645
646/**
647 * Gets the address the guest page directory (32-bit paging).
648 *
649 * @returns VBox status code.
650 * @param pVCpu The cross context virtual CPU structure.
651 * @param ppPd Where to return the mapping. This is always set.
652 */
653DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
654{
655#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
656 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
657 if (RT_FAILURE(rc))
658 {
659 *ppPd = NULL;
660 return rc;
661 }
662#else
663 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
664 if (RT_UNLIKELY(!*ppPd))
665 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
666#endif
667 return VINF_SUCCESS;
668}
669
670
671/**
672 * Gets the address the guest page directory (32-bit paging).
673 *
674 * @returns Pointer to the page directory entry in question.
675 * @param pVCpu The cross context virtual CPU structure.
676 */
677DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
678{
679#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
680 PX86PD pGuestPD = NULL;
681 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
682 if (RT_FAILURE(rc))
683 {
684 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
685 return NULL;
686 }
687#else
688 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
689 if (RT_UNLIKELY(!pGuestPD))
690 {
691 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
692 if (RT_FAILURE(rc))
693 return NULL;
694 }
695#endif
696 return pGuestPD;
697}
698
699
700/**
701 * Gets the guest page directory pointer table.
702 *
703 * @returns VBox status code.
704 * @param pVCpu The cross context virtual CPU structure.
705 * @param ppPdpt Where to return the mapping. This is always set.
706 */
707DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
708{
709#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
710 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
711 if (RT_FAILURE(rc))
712 {
713 *ppPdpt = NULL;
714 return rc;
715 }
716#else
717 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
718 if (RT_UNLIKELY(!*ppPdpt))
719 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
720#endif
721 return VINF_SUCCESS;
722}
723
724
725/**
726 * Gets the guest page directory pointer table.
727 *
728 * @returns Pointer to the page directory in question.
729 * @returns NULL if the page directory is not present or on an invalid page.
730 * @param pVCpu The cross context virtual CPU structure.
731 */
732DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
733{
734 PX86PDPT pGuestPdpt;
735 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
736 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
737 return pGuestPdpt;
738}
739
740
741/**
742 * Gets the guest page directory pointer table entry for the specified address.
743 *
744 * @returns Pointer to the page directory in question.
745 * @returns NULL if the page directory is not present or on an invalid page.
746 * @param pVCpu The cross context virtual CPU structure.
747 * @param GCPtr The address.
748 */
749DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
750{
751 AssertGCPtr32(GCPtr);
752
753#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
754 PX86PDPT pGuestPDPT = NULL;
755 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
756 AssertRCReturn(rc, NULL);
757#else
758 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
759 if (RT_UNLIKELY(!pGuestPDPT))
760 {
761 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
762 if (RT_FAILURE(rc))
763 return NULL;
764 }
765#endif
766 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
767}
768
769
770/**
771 * Gets the page directory entry for the specified address.
772 *
773 * @returns The page directory entry in question.
774 * @returns A non-present entry if the page directory is not present or on an invalid page.
775 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
776 * @param GCPtr The address.
777 */
778DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
779{
780 AssertGCPtr32(GCPtr);
781 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
782 if (RT_LIKELY(pGuestPDPT))
783 {
784 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
785 if ( pGuestPDPT->a[iPdpt].n.u1Present
786 && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
787 {
788 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
789#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
790 PX86PDPAE pGuestPD = NULL;
791 int rc = pgmRZDynMapGCPageInlined(pVCpu,
792 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
793 (void **)&pGuestPD
794 RTLOG_COMMA_SRC_POS);
795 if (RT_SUCCESS(rc))
796 return pGuestPD->a[iPD];
797 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
798#else
799 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
800 if ( !pGuestPD
801 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
802 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
803 if (pGuestPD)
804 return pGuestPD->a[iPD];
805#endif
806 }
807 }
808
809 X86PDEPAE ZeroPde = {0};
810 return ZeroPde;
811}
812
813
814/**
815 * Gets the page directory pointer table entry for the specified address
816 * and returns the index into the page directory
817 *
818 * @returns Pointer to the page directory in question.
819 * @returns NULL if the page directory is not present or on an invalid page.
820 * @param pVCpu The cross context virtual CPU structure.
821 * @param GCPtr The address.
822 * @param piPD Receives the index into the returned page directory
823 * @param pPdpe Receives the page directory pointer entry. Optional.
824 */
825DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
826{
827 AssertGCPtr32(GCPtr);
828
829 /* The PDPE. */
830 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
831 if (RT_UNLIKELY(!pGuestPDPT))
832 return NULL;
833 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
834 if (pPdpe)
835 *pPdpe = pGuestPDPT->a[iPdpt];
836 if (!pGuestPDPT->a[iPdpt].n.u1Present)
837 return NULL;
838 if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
839 return NULL;
840
841 /* The PDE. */
842#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
843 PX86PDPAE pGuestPD = NULL;
844 int rc = pgmRZDynMapGCPageInlined(pVCpu,
845 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
846 (void **)&pGuestPD
847 RTLOG_COMMA_SRC_POS);
848 if (RT_FAILURE(rc))
849 {
850 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
851 return NULL;
852 }
853#else
854 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
855 if ( !pGuestPD
856 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
857 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
858#endif
859
860 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
861 return pGuestPD;
862}
863
864
865/**
866 * Gets the page map level-4 pointer for the guest.
867 *
868 * @returns VBox status code.
869 * @param pVCpu The cross context virtual CPU structure.
870 * @param ppPml4 Where to return the mapping. Always set.
871 */
872DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
873{
874#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
875 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
876 if (RT_FAILURE(rc))
877 {
878 *ppPml4 = NULL;
879 return rc;
880 }
881#else
882 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
883 if (RT_UNLIKELY(!*ppPml4))
884 return pgmGstLazyMapPml4(pVCpu, ppPml4);
885#endif
886 return VINF_SUCCESS;
887}
888
889
890/**
891 * Gets the page map level-4 pointer for the guest.
892 *
893 * @returns Pointer to the PML4 page.
894 * @param pVCpu The cross context virtual CPU structure.
895 */
896DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
897{
898 PX86PML4 pGuestPml4;
899 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
900 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
901 return pGuestPml4;
902}
903
904
905/**
906 * Gets the pointer to a page map level-4 entry.
907 *
908 * @returns Pointer to the PML4 entry.
909 * @param pVCpu The cross context virtual CPU structure.
910 * @param iPml4 The index.
911 * @remarks Only used by AssertCR3.
912 */
913DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
914{
915#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
916 PX86PML4 pGuestPml4;
917 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
918 AssertRCReturn(rc, NULL);
919#else
920 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
921 if (RT_UNLIKELY(!pGuestPml4))
922 {
923 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
924 AssertRCReturn(rc, NULL);
925 }
926#endif
927 return &pGuestPml4->a[iPml4];
928}
929
930
931/**
932 * Gets the page directory entry for the specified address.
933 *
934 * @returns The page directory entry in question.
935 * @returns A non-present entry if the page directory is not present or on an invalid page.
936 * @param pVCpu The cross context virtual CPU structure.
937 * @param GCPtr The address.
938 */
939DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
940{
941 /*
942 * Note! To keep things simple, ASSUME invalid physical addresses will
943 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
944 * supporting 52-bit wide physical guest addresses.
945 */
946 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
947 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
948 if ( RT_LIKELY(pGuestPml4)
949 && pGuestPml4->a[iPml4].n.u1Present
950 && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
951 {
952 PCX86PDPT pPdptTemp;
953 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
954 if (RT_SUCCESS(rc))
955 {
956 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
957 if ( pPdptTemp->a[iPdpt].n.u1Present
958 && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
959 {
960 PCX86PDPAE pPD;
961 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
962 if (RT_SUCCESS(rc))
963 {
964 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
965 return pPD->a[iPD];
966 }
967 }
968 }
969 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
970 }
971
972 X86PDEPAE ZeroPde = {0};
973 return ZeroPde;
974}
975
976
977/**
978 * Gets the GUEST page directory pointer for the specified address.
979 *
980 * @returns The page directory in question.
981 * @returns NULL if the page directory is not present or on an invalid page.
982 * @param pVCpu The cross context virtual CPU structure.
983 * @param GCPtr The address.
984 * @param ppPml4e Page Map Level-4 Entry (out)
985 * @param pPdpe Page directory pointer table entry (out)
986 * @param piPD Receives the index into the returned page directory
987 */
988DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
989{
990 /* The PMLE4. */
991 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
992 if (RT_UNLIKELY(!pGuestPml4))
993 return NULL;
994 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
995 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
996 if (!pPml4e->n.u1Present)
997 return NULL;
998 if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
999 return NULL;
1000
1001 /* The PDPE. */
1002 PCX86PDPT pPdptTemp;
1003 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1004 if (RT_FAILURE(rc))
1005 {
1006 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1007 return NULL;
1008 }
1009 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1010 *pPdpe = pPdptTemp->a[iPdpt];
1011 if (!pPdpe->n.u1Present)
1012 return NULL;
1013 if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
1014 return NULL;
1015
1016 /* The PDE. */
1017 PX86PDPAE pPD;
1018 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1019 if (RT_FAILURE(rc))
1020 {
1021 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1022 return NULL;
1023 }
1024
1025 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1026 return pPD;
1027}
1028
1029
1030/**
1031 * Gets the shadow page directory, 32-bit.
1032 *
1033 * @returns Pointer to the shadow 32-bit PD.
1034 * @param pVCpu The cross context virtual CPU structure.
1035 */
1036DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
1037{
1038 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1039}
1040
1041
1042/**
1043 * Gets the shadow page directory entry for the specified address, 32-bit.
1044 *
1045 * @returns Shadow 32-bit PDE.
1046 * @param pVCpu The cross context virtual CPU structure.
1047 * @param GCPtr The address.
1048 */
1049DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1050{
1051 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
1052 if (!pShwPde)
1053 {
1054 X86PDE ZeroPde = {0};
1055 return ZeroPde;
1056 }
1057 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1058}
1059
1060
1061/**
1062 * Gets the pointer to the shadow page directory entry for the specified
1063 * address, 32-bit.
1064 *
1065 * @returns Pointer to the shadow 32-bit PDE.
1066 * @param pVCpu The cross context virtual CPU structure.
1067 * @param GCPtr The address.
1068 */
1069DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1070{
1071 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1072 AssertReturn(pPde, NULL);
1073 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1074}
1075
1076
1077/**
1078 * Gets the shadow page pointer table, PAE.
1079 *
1080 * @returns Pointer to the shadow PAE PDPT.
1081 * @param pVCpu The cross context virtual CPU structure.
1082 */
1083DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
1084{
1085 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1086}
1087
1088
1089/**
1090 * Gets the shadow page directory for the specified address, PAE.
1091 *
1092 * @returns Pointer to the shadow PD.
1093 * @param pVCpu The cross context virtual CPU structure.
1094 * @param GCPtr The address.
1095 */
1096DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1097{
1098 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1099 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1100
1101 if (!pPdpt->a[iPdpt].n.u1Present)
1102 return NULL;
1103
1104 /* Fetch the pgm pool shadow descriptor. */
1105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1106 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1107 AssertReturn(pShwPde, NULL);
1108
1109 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1110}
1111
1112
1113/**
1114 * Gets the shadow page directory for the specified address, PAE.
1115 *
1116 * @returns Pointer to the shadow PD.
1117 * @param pVCpu The cross context virtual CPU structure.
1118 * @param pPdpt Pointer to the page directory pointer table.
1119 * @param GCPtr The address.
1120 */
1121DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1122{
1123 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1124
1125 if (!pPdpt->a[iPdpt].n.u1Present)
1126 return NULL;
1127
1128 /* Fetch the pgm pool shadow descriptor. */
1129 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1130 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1131 AssertReturn(pShwPde, NULL);
1132
1133 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1134}
1135
1136
1137/**
1138 * Gets the shadow page directory entry, PAE.
1139 *
1140 * @returns PDE.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param GCPtr The address.
1143 */
1144DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1145{
1146 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1147
1148 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1149 if (!pShwPde)
1150 {
1151 X86PDEPAE ZeroPde = {0};
1152 return ZeroPde;
1153 }
1154 return pShwPde->a[iPd];
1155}
1156
1157
1158/**
1159 * Gets the pointer to the shadow page directory entry for an address, PAE.
1160 *
1161 * @returns Pointer to the PDE.
1162 * @param pVCpu The cross context virtual CPU structure.
1163 * @param GCPtr The address.
1164 * @remarks Only used by AssertCR3.
1165 */
1166DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1167{
1168 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1169
1170 PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1171 AssertReturn(pPde, NULL);
1172 return &pPde->a[iPd];
1173}
1174
1175
1176/**
1177 * Gets the shadow page map level-4 pointer.
1178 *
1179 * @returns Pointer to the shadow PML4.
1180 * @param pVCpu The cross context virtual CPU structure.
1181 */
1182DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
1183{
1184 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1185}
1186
1187
1188/**
1189 * Gets the shadow page map level-4 entry for the specified address.
1190 *
1191 * @returns The entry.
1192 * @param pVCpu The cross context virtual CPU structure.
1193 * @param GCPtr The address.
1194 */
1195DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1196{
1197 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1198 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1199
1200 if (!pShwPml4)
1201 {
1202 X86PML4E ZeroPml4e = {0};
1203 return ZeroPml4e;
1204 }
1205 return pShwPml4->a[iPml4];
1206}
1207
1208
1209/**
1210 * Gets the pointer to the specified shadow page map level-4 entry.
1211 *
1212 * @returns The entry.
1213 * @param pVCpu The cross context virtual CPU structure.
1214 * @param iPml4 The PML4 index.
1215 */
1216DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
1217{
1218 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1219 if (!pShwPml4)
1220 return NULL;
1221 return &pShwPml4->a[iPml4];
1222}
1223
1224
1225/**
1226 * Cached physical handler lookup.
1227 *
1228 * @returns Physical handler covering @a GCPhys.
1229 * @param pVM The cross context VM structure.
1230 * @param GCPhys The lookup address.
1231 */
1232DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys)
1233{
1234 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1235 if ( pHandler
1236 && GCPhys >= pHandler->Core.Key
1237 && GCPhys < pHandler->Core.KeyLast)
1238 {
1239 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1240 return pHandler;
1241 }
1242
1243 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1244 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1245 if (pHandler)
1246 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1247 return pHandler;
1248}
1249
1250
1251/**
1252 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1253 *
1254 * @returns Pointer to the shadow page structure.
1255 * @param pPool The pool.
1256 * @param idx The pool page index.
1257 */
1258DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1259{
1260 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1261 return &pPool->aPages[idx];
1262}
1263
1264
1265/**
1266 * Clear references to guest physical memory.
1267 *
1268 * @param pPool The pool.
1269 * @param pPoolPage The pool page.
1270 * @param pPhysPage The physical guest page tracking structure.
1271 * @param iPte Shadow PTE index
1272 */
1273DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1274{
1275 /*
1276 * Just deal with the simple case here.
1277 */
1278#ifdef VBOX_STRICT
1279 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1280#endif
1281#ifdef LOG_ENABLED
1282 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1283#endif
1284 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1285 if (cRefs == 1)
1286 {
1287 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1288 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1289 /* Invalidate the tracking data. */
1290 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1291 }
1292 else
1293 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1294 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1295}
1296
1297
1298/**
1299 * Moves the page to the head of the age list.
1300 *
1301 * This is done when the cached page is used in one way or another.
1302 *
1303 * @param pPool The pool.
1304 * @param pPage The cached page.
1305 */
1306DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1307{
1308 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1309
1310 /*
1311 * Move to the head of the age list.
1312 */
1313 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1314 {
1315 /* unlink */
1316 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1317 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1318 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1319 else
1320 pPool->iAgeTail = pPage->iAgePrev;
1321
1322 /* insert at head */
1323 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1324 pPage->iAgeNext = pPool->iAgeHead;
1325 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1326 pPool->iAgeHead = pPage->idx;
1327 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1328 }
1329}
1330
1331
1332/**
1333 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1334 *
1335 * @param pPool The pool.
1336 * @param pPage PGM pool page
1337 */
1338DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1339{
1340 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1341 ASMAtomicIncU32(&pPage->cLocked);
1342}
1343
1344
1345/**
1346 * Unlocks a page to allow flushing again
1347 *
1348 * @param pPool The pool.
1349 * @param pPage PGM pool page
1350 */
1351DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1352{
1353 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1354 Assert(pPage->cLocked);
1355 ASMAtomicDecU32(&pPage->cLocked);
1356}
1357
1358
1359/**
1360 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1361 *
1362 * @returns VBox status code.
1363 * @param pPage PGM pool page
1364 */
1365DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1366{
1367 if (pPage->cLocked)
1368 {
1369 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1370 if (pPage->cModifications)
1371 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1372 return true;
1373 }
1374 return false;
1375}
1376
1377
1378/**
1379 * Check if the specified page is dirty (not write monitored)
1380 *
1381 * @return dirty or not
1382 * @param pVM The cross context VM structure.
1383 * @param GCPhys Guest physical address
1384 */
1385DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1386{
1387 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1388 PGM_LOCK_ASSERT_OWNER(pVM);
1389 if (!pPool->cDirtyPages)
1390 return false;
1391 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1392}
1393
1394
1395/**
1396 * Tells if mappings are to be put into the shadow page table or not.
1397 *
1398 * @returns boolean result
1399 * @param pVM The cross context VM structure.
1400 */
1401DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVMCC pVM)
1402{
1403#ifdef PGM_WITHOUT_MAPPINGS
1404 /* Only raw-mode has mappings. */
1405 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1406 return false;
1407#else
1408 Assert(pVM->cCpus == 1 || !VM_IS_RAW_MODE_ENABLED(pVM));
1409 return VM_IS_RAW_MODE_ENABLED(pVM);
1410#endif
1411}
1412
1413
1414/**
1415 * Checks if the mappings are floating and enabled.
1416 *
1417 * @returns true / false.
1418 * @param pVM The cross context VM structure.
1419 */
1420DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVMCC pVM)
1421{
1422#ifdef PGM_WITHOUT_MAPPINGS
1423 /* Only raw-mode has mappings. */
1424 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1425 return false;
1426#else
1427 return !pVM->pgm.s.fMappingsFixed
1428 && pgmMapAreMappingsEnabled(pVM);
1429#endif
1430}
1431
1432/** @} */
1433
1434#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1435
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette