VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGM-armv8.cpp@ 107044

Last change on this file since 107044 was 106670, checked in by vboxsync, 4 weeks ago

VMM/ARM: Workaround for the UEFI accessing MMIO space with an instruction which doesn't produce a valid instruction syndrome, bugref:10732 [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.5 KB
Line 
1/* $Id: PGM-armv8.cpp 106670 2024-10-24 17:05:00Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ARMv8 variant. (Mixing stuff here, not good?)
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_pgm_armv8 PGM - The Page Manager and Monitor (ARMv8 variant)
30 *
31 * For now this is just a stub for bringing up the ARMv8 hypervisor. We'll see how
32 * much we really need here later on and whether it makes sense to merge this with the original PGM.cpp
33 * (avoiding \#ifdef hell for with this as I'm not confident enough to fiddle around with PGM too much at this point).
34 */
35
36
37/*********************************************************************************************************************************
38* Header Files *
39*********************************************************************************************************************************/
40#define LOG_GROUP LOG_GROUP_PGM
41#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
42#include <VBox/vmm/dbgf.h>
43#include <VBox/vmm/pgm.h>
44#include <VBox/vmm/cpum.h>
45#include <VBox/vmm/cpum-armv8.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/sup.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/em.h>
50#include <VBox/vmm/stam.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/hm.h>
54#include "PGMInternal.h"
55#include <VBox/vmm/vmcc.h>
56#include <VBox/vmm/uvm.h>
57#include "PGMInline.h"
58
59#include <VBox/dbg.h>
60#include <VBox/param.h>
61#include <VBox/err.h>
62
63#include <iprt/asm.h>
64#include <iprt/assert.h>
65#include <iprt/env.h>
66#include <iprt/file.h>
67#include <iprt/mem.h>
68#include <iprt/rand.h>
69#include <iprt/string.h>
70#include <iprt/thread.h>
71
72
73/*********************************************************************************************************************************
74* Structures and Typedefs *
75*********************************************************************************************************************************/
76
77
78/*********************************************************************************************************************************
79* Internal Functions *
80*********************************************************************************************************************************/
81#ifdef VBOX_STRICT
82static FNVMATSTATE pgmR3ResetNoMorePhysWritesFlag;
83#endif
84
85
86/*********************************************************************************************************************************
87* Global Variables *
88*********************************************************************************************************************************/
89#ifndef VBOX_WITH_PGM_NEM_MODE
90# error "This requires VBOX_WITH_PGM_NEM_MODE to be set at all times!"
91#endif
92
93/**
94 * Interface that NEM uses to switch PGM into simplified memory managment mode.
95 *
96 * This call occurs before PGMR3Init.
97 *
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM)
101{
102 AssertFatal(!PDMCritSectIsInitialized(&pVM->pgm.s.CritSectX));
103 pVM->pgm.s.fNemMode = true;
104}
105
106
107/**
108 * Checks whether the simplificed memory management mode for NEM is enabled.
109 *
110 * @returns true if enabled, false if not.
111 * @param pVM The cross context VM structure.
112 */
113VMMR3_INT_DECL(bool) PGMR3IsNemModeEnabled(PVM pVM)
114{
115 return pVM->pgm.s.fNemMode;
116}
117
118
119/**
120 * Initiates the paging of VM.
121 *
122 * @returns VBox status code.
123 * @param pVM The cross context VM structure.
124 */
125VMMR3DECL(int) PGMR3Init(PVM pVM)
126{
127 LogFlow(("PGMR3Init:\n"));
128 PCFGMNODE pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
129 int rc;
130
131 /*
132 * Assert alignment and sizes.
133 */
134 AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
135 AssertCompile(sizeof(pVM->apCpusR3[0]->pgm.s) <= sizeof(pVM->apCpusR3[0]->pgm.padding));
136 AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
137
138 /*
139 * If we're in driveless mode we have to use the simplified memory mode.
140 */
141 bool const fDriverless = SUPR3IsDriverless();
142 AssertReturn(fDriverless, VERR_NOT_SUPPORTED);
143 if (!pVM->pgm.s.fNemMode)
144 pVM->pgm.s.fNemMode = true;
145
146 /*
147 * Init the structure.
148 */
149 /*pVM->pgm.s.fRestoreRomPagesAtReset = false;*/
150
151 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
152 {
153 pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
154 pVM->pgm.s.aHandyPages[i].fZeroed = false;
155 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
156 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
157 }
158
159 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLargeHandyPage); i++)
160 {
161 pVM->pgm.s.aLargeHandyPage[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
162 pVM->pgm.s.aLargeHandyPage[i].fZeroed = false;
163 pVM->pgm.s.aLargeHandyPage[i].idPage = NIL_GMM_PAGEID;
164 pVM->pgm.s.aLargeHandyPage[i].idSharedPage = NIL_GMM_PAGEID;
165 }
166
167 AssertReleaseReturn(pVM->pgm.s.cPhysHandlerTypes == 0, VERR_WRONG_ORDER);
168 for (size_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes); i++)
169 {
170 if (fDriverless)
171 pVM->pgm.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
172 pVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
173 pVM->pgm.s.aPhysHandlerTypes[i].pfnHandler = pgmR3HandlerPhysicalHandlerInvalid;
174 }
175
176#if 0
177 /* Init the per-CPU part. */
178 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
179 {
180 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
181 PPGMCPU pPGM = &pVCpu->pgm.s;
182 }
183#endif
184
185 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
186#ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
187 true
188#else
189 false
190#endif
191 );
192 AssertLogRelRCReturn(rc, rc);
193
194 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
195 AssertLogRelRCReturn(rc, rc);
196 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
197 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
198
199 /*
200 * Get the configured RAM size - to estimate saved state size.
201 */
202 uint64_t cbRam;
203 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
204 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
205 cbRam = 0;
206 else if (RT_SUCCESS(rc))
207 {
208 if (cbRam < GUEST_PAGE_SIZE)
209 cbRam = 0;
210 cbRam = RT_ALIGN_64(cbRam, GUEST_PAGE_SIZE);
211 }
212 else
213 {
214 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
215 return rc;
216 }
217
218 /** @cfgm{/PGM/ZeroRamPagesOnReset, boolean, true}
219 * Whether to clear RAM pages on (hard) reset. */
220 rc = CFGMR3QueryBoolDef(pCfgPGM, "ZeroRamPagesOnReset", &pVM->pgm.s.fZeroRamPagesOnReset, true);
221 AssertLogRelRCReturn(rc, rc);
222
223 /*
224 * Register callbacks, string formatters and the saved state data unit.
225 */
226#ifdef VBOX_STRICT
227 VMR3AtStateRegister(pVM->pUVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
228#endif
229 PGMRegisterStringFormatTypes();
230
231 rc = pgmR3InitSavedState(pVM, cbRam);
232 if (RT_FAILURE(rc))
233 return rc;
234
235 /*
236 * Initialize the PGM critical section and flush the phys TLBs
237 */
238 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
239 AssertRCReturn(rc, rc);
240
241 pgmR3PhysChunkInvalidateTLB(pVM, false /*fInRendezvous*/); /* includes pgmPhysInvalidatePageMapTLB call */
242
243 /*
244 * For the time being we sport a full set of handy pages in addition to the base
245 * memory to simplify things.
246 */
247 rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
248 AssertRCReturn(rc, rc);
249
250 /*
251 * Setup the zero page (HCPHysZeroPg is set by ring-0).
252 */
253 RT_ZERO(pVM->pgm.s.abZeroPg); /* paranoia */
254 if (fDriverless)
255 pVM->pgm.s.HCPhysZeroPg = _4G - GUEST_PAGE_SIZE * 2 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
256 AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
257 AssertRelease(pVM->pgm.s.HCPhysZeroPg != 0);
258
259 /*
260 * Setup the invalid MMIO page (HCPhysMmioPg is set by ring-0).
261 * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
262 */
263 ASMMemFill32(pVM->pgm.s.abMmioPg, sizeof(pVM->pgm.s.abMmioPg), 0xfeedface);
264 if (fDriverless)
265 pVM->pgm.s.HCPhysMmioPg = _4G - GUEST_PAGE_SIZE * 3 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
266 AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
267 AssertRelease(pVM->pgm.s.HCPhysMmioPg != 0);
268 pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
269
270 /*
271 * Initialize physical access handlers.
272 */
273 /** @cfgm{/PGM/MaxPhysicalAccessHandlers, uint32_t, 32, 65536, 6144}
274 * Number of physical access handlers allowed (subject to rounding). This is
275 * managed as one time allocation during initializations. The default is
276 * lower for a driverless setup. */
277 /** @todo can lower it for nested paging too, at least when there is no
278 * nested guest involved. */
279 uint32_t cAccessHandlers = 0;
280 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxPhysicalAccessHandlers", &cAccessHandlers, !fDriverless ? 6144 : 640);
281 AssertLogRelRCReturn(rc, rc);
282 AssertLogRelMsgStmt(cAccessHandlers >= 32, ("cAccessHandlers=%#x, min 32\n", cAccessHandlers), cAccessHandlers = 32);
283 AssertLogRelMsgStmt(cAccessHandlers <= _64K, ("cAccessHandlers=%#x, max 65536\n", cAccessHandlers), cAccessHandlers = _64K);
284 if (!fDriverless)
285 {
286 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_HANDLER_INIT, cAccessHandlers, NULL);
287 AssertRCReturn(rc, rc);
288 AssertPtr(pVM->pgm.s.pPhysHandlerTree);
289 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_paNodes);
290 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc);
291 }
292 else
293 {
294 uint32_t cbTreeAndBitmap = 0;
295 uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cAccessHandlers, &cbTreeAndBitmap);
296 uint8_t *pb = NULL;
297 rc = SUPR3PageAlloc(cbTotalAligned >> HOST_PAGE_SHIFT, 0, (void **)&pb);
298 AssertLogRelRCReturn(rc, rc);
299
300 pVM->pgm.s.PhysHandlerAllocator.initSlabAllocator(cAccessHandlers, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
301 (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
302 pVM->pgm.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
303 pVM->pgm.s.pPhysHandlerTree->initWithAllocator(&pVM->pgm.s.PhysHandlerAllocator);
304 }
305
306 /*
307 * Register the physical access handler protecting ROMs.
308 */
309 if (RT_SUCCESS(rc))
310 /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */
311 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/, pgmPhysRomWriteHandler,
312 "ROM write protection", &pVM->pgm.s.hRomPhysHandlerType);
313
314 /*
315 * Register the physical access handler doing dirty MMIO2 tracing.
316 */
317 if (RT_SUCCESS(rc))
318 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
319 pgmPhysMmio2WriteHandler, "MMIO2 dirty page tracing",
320 &pVM->pgm.s.hMmio2DirtyPhysHandlerType);
321
322 if (RT_SUCCESS(rc))
323 return VINF_SUCCESS;
324
325 /* Almost no cleanup necessary, MM frees all memory. */
326 PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
327
328 return rc;
329}
330
331
332/**
333 * Ring-3 init finalizing.
334 *
335 * @returns VBox status code.
336 * @param pVM The cross context VM structure.
337 */
338VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
339{
340 /*
341 * Allocate memory if we're supposed to do that.
342 */
343 int rc = VINF_SUCCESS;
344 if (pVM->pgm.s.fRamPreAlloc)
345 rc = pgmR3PhysRamPreAllocate(pVM);
346
347 //pgmLogState(pVM);
348 LogRel(("PGM: PGMR3InitFinalize: 4 MB PSE mask %RGp -> %Rrc\n", pVM->pgm.s.GCPhys4MBPSEMask, rc));
349 return rc;
350}
351
352
353/**
354 * Init phase completed callback.
355 *
356 * @returns VBox status code.
357 * @param pVM The cross context VM structure.
358 * @param enmWhat What has been completed.
359 * @thread EMT(0)
360 */
361VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
362{
363 switch (enmWhat)
364 {
365 case VMINITCOMPLETED_HM:
366 AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough, VERR_PGM_PCI_PASSTHRU_MISCONFIG);
367 break;
368
369 default:
370 /* shut up gcc */
371 break;
372 }
373
374 return VINF_SUCCESS;
375}
376
377
378/**
379 * Applies relocations to data and code managed by this component.
380 *
381 * This function will be called at init and whenever the VMM need to relocate it
382 * self inside the GC.
383 *
384 * @param pVM The cross context VM structure.
385 * @param offDelta Relocation delta relative to old location.
386 */
387VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
388{
389 LogFlow(("PGMR3Relocate: offDelta=%RGv\n", offDelta));
390 RT_NOREF(pVM, offDelta);
391}
392
393
394/**
395 * Resets a virtual CPU when unplugged.
396 *
397 * @param pVM The cross context VM structure.
398 * @param pVCpu The cross context virtual CPU structure.
399 */
400VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
401{
402 RT_NOREF(pVM, pVCpu);
403}
404
405
406/**
407 * The VM is being reset.
408 *
409 * For the PGM component this means that any PD write monitors
410 * needs to be removed.
411 *
412 * @param pVM The cross context VM structure.
413 */
414VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM)
415{
416 LogFlow(("PGMR3Reset:\n"));
417 VM_ASSERT_EMT(pVM);
418
419 PGM_LOCK_VOID(pVM);
420
421#ifdef DEBUG
422 DBGFR3_INFO_LOG_SAFE(pVM, "mappings", NULL);
423 DBGFR3_INFO_LOG_SAFE(pVM, "handlers", "all nostat");
424#endif
425
426 //pgmLogState(pVM);
427 PGM_UNLOCK(pVM);
428}
429
430
431/**
432 * Memory setup after VM construction or reset.
433 *
434 * @param pVM The cross context VM structure.
435 * @param fAtReset Indicates the context, after reset if @c true or after
436 * construction if @c false.
437 */
438VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fAtReset)
439{
440 if (fAtReset)
441 {
442 PGM_LOCK_VOID(pVM);
443
444 int rc = pgmR3PhysRamZeroAll(pVM);
445 AssertReleaseRC(rc);
446
447 rc = pgmR3PhysRomReset(pVM);
448 AssertReleaseRC(rc);
449
450 PGM_UNLOCK(pVM);
451 }
452}
453
454
455#ifdef VBOX_STRICT
456/**
457 * VM state change callback for clearing fNoMorePhysWrites after
458 * a snapshot has been created.
459 */
460static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PUVM pUVM, PCVMMR3VTABLE pVMM, VMSTATE enmState,
461 VMSTATE enmOldState, void *pvUser)
462{
463 if ( enmState == VMSTATE_RUNNING
464 || enmState == VMSTATE_RESUMING)
465 pUVM->pVM->pgm.s.fNoMorePhysWrites = false;
466 RT_NOREF(pVMM, enmOldState, pvUser);
467}
468#endif
469
470/**
471 * Private API to reset fNoMorePhysWrites.
472 */
473VMMR3_INT_DECL(void) PGMR3ResetNoMorePhysWritesFlag(PVM pVM)
474{
475 pVM->pgm.s.fNoMorePhysWrites = false;
476}
477
478/**
479 * Terminates the PGM.
480 *
481 * @returns VBox status code.
482 * @param pVM The cross context VM structure.
483 */
484VMMR3DECL(int) PGMR3Term(PVM pVM)
485{
486 /* Must free shared pages here. */
487 PGM_LOCK_VOID(pVM);
488 pgmR3PhysRamTerm(pVM);
489 pgmR3PhysRomTerm(pVM);
490 PGM_UNLOCK(pVM);
491
492 PGMDeregisterStringFormatTypes();
493 return PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
494}
495
496
497/**
498 * Perform an integrity check on the PGM component.
499 *
500 * @returns VINF_SUCCESS if everything is fine.
501 * @returns VBox error status after asserting on integrity breach.
502 * @param pVM The cross context VM structure.
503 */
504VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
505{
506 RT_NOREF(pVM);
507 return VINF_SUCCESS;
508}
509
510
511VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
512{
513 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
514}
515
516
517VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
518{
519 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
520}
521
522
523VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
524{
525 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
526
527 pVM->pgm.s.fUseLargePages = fUseLargePages;
528 return VINF_SUCCESS;
529}
530
531
532#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
533int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
534#else
535int pgmLock(PVMCC pVM, bool fVoid)
536#endif
537{
538#if defined(VBOX_STRICT)
539 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
540#else
541 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
542#endif
543 if (RT_SUCCESS(rc))
544 return rc;
545 if (fVoid)
546 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
547 else
548 AssertRC(rc);
549 return rc;
550}
551
552
553void pgmUnlock(PVMCC pVM)
554{
555 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
556 pVM->pgm.s.cDeprecatedPageLocks = 0;
557 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
558 if (rc == VINF_SEM_NESTED)
559 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
560}
561
562
563#if !defined(IN_R0) || defined(LOG_ENABLED)
564
565/** Format handler for PGMPAGE.
566 * @copydoc FNRTSTRFORMATTYPE */
567static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
568 const char *pszType, void const *pvValue,
569 int cchWidth, int cchPrecision, unsigned fFlags,
570 void *pvUser)
571{
572 size_t cch;
573 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
574 if (RT_VALID_PTR(pPage))
575 {
576 char szTmp[64+80];
577
578 cch = 0;
579
580 /* The single char state stuff. */
581 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
582 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
583
584# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
585 if (IS_PART_INCLUDED(5))
586 {
587 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
588 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
589 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
590 }
591
592 /* The type. */
593 if (IS_PART_INCLUDED(4))
594 {
595 szTmp[cch++] = ':';
596 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
597 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
598 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
599 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
600 }
601
602 /* The numbers. */
603 if (IS_PART_INCLUDED(3))
604 {
605 szTmp[cch++] = ':';
606 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
607 }
608
609 if (IS_PART_INCLUDED(2))
610 {
611 szTmp[cch++] = ':';
612 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
613 }
614
615 if (IS_PART_INCLUDED(6))
616 {
617 szTmp[cch++] = ':';
618 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
619 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
620 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
621 }
622# undef IS_PART_INCLUDED
623
624 cch = pfnOutput(pvArgOutput, szTmp, cch);
625 }
626 else
627 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
628 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
629 return cch;
630}
631
632
633/** Format handler for PGMRAMRANGE.
634 * @copydoc FNRTSTRFORMATTYPE */
635static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
636 const char *pszType, void const *pvValue,
637 int cchWidth, int cchPrecision, unsigned fFlags,
638 void *pvUser)
639{
640 size_t cch;
641 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
642 if (RT_VALID_PTR(pRam))
643 {
644 char szTmp[80];
645 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
646 cch = pfnOutput(pvArgOutput, szTmp, cch);
647 }
648 else
649 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
650 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
651 return cch;
652}
653
654/** Format type andlers to be registered/deregistered. */
655static const struct
656{
657 char szType[24];
658 PFNRTSTRFORMATTYPE pfnHandler;
659} g_aPgmFormatTypes[] =
660{
661 { "pgmpage", pgmFormatTypeHandlerPage },
662 { "pgmramrange", pgmFormatTypeHandlerRamRange }
663};
664
665#endif /* !IN_R0 || LOG_ENABLED */
666
667
668VMMDECL(int) PGMRegisterStringFormatTypes(void)
669{
670#if !defined(IN_R0) || defined(LOG_ENABLED)
671 int rc = VINF_SUCCESS;
672 unsigned i;
673 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
674 {
675 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
676# ifdef IN_RING0
677 if (rc == VERR_ALREADY_EXISTS)
678 {
679 /* in case of cleanup failure in ring-0 */
680 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
681 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
682 }
683# endif
684 }
685 if (RT_FAILURE(rc))
686 while (i-- > 0)
687 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
688
689 return rc;
690#else
691 return VINF_SUCCESS;
692#endif
693}
694
695
696VMMDECL(void) PGMDeregisterStringFormatTypes(void)
697{
698#if !defined(IN_R0) || defined(LOG_ENABLED)
699 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
700 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
701#endif
702}
703
704
705VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
706{
707 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
708 VMCPU_ASSERT_EMT(pVCpu);
709
710 /*
711 * Validate input.
712 */
713 Assert(cb);
714
715 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
716 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask);
717
718 AssertReleaseFailed();
719 return VERR_NOT_IMPLEMENTED;
720}
721
722
723VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
724{
725 VMCPU_ASSERT_EMT(pVCpu);
726
727 bool fMmuEnabled = CPUMGetGuestMmuEnabled(pVCpu);
728 if (!fMmuEnabled)
729 return PGMMODE_NONE;
730
731 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
732 return enmCpuMode == CPUMMODE_ARMV8_AARCH64
733 ? PGMMODE_VMSA_V8_64
734 : PGMMODE_VMSA_V8_32;
735}
736
737
738VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
739{
740 RT_NOREF(pVCpu);
741 return PGMMODE_NONE; /* NEM doesn't need any shadow paging. */
742}
743
744
745DECLINLINE(int) pgmGstWalkReturnNotPresent(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
746{
747 NOREF(pVCpu);
748 pWalk->fNotPresent = true;
749 pWalk->uLevel = uLevel;
750 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT
751 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
752 return VERR_PAGE_TABLE_NOT_PRESENT;
753}
754
755DECLINLINE(int) pgmGstWalkReturnBadPhysAddr(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
756{
757 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
758 pWalk->fBadPhysAddr = true;
759 pWalk->uLevel = uLevel;
760 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
761 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
762 return VERR_PAGE_TABLE_NOT_PRESENT;
763}
764
765
766DECLINLINE(int) pgmGstWalkReturnRsvdError(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
767{
768 NOREF(pVCpu);
769 pWalk->fRsvdError = true;
770 pWalk->uLevel = uLevel;
771 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS
772 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
773 return VERR_PAGE_TABLE_NOT_PRESENT;
774}
775
776
777VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
778{
779 VMCPU_ASSERT_EMT(pVCpu);
780 Assert(pWalk);
781
782 pWalk->fSucceeded = false;
783
784 RTGCPHYS GCPhysPt = CPUMGetEffectiveTtbr(pVCpu, GCPtr);
785 if (GCPhysPt == RTGCPHYS_MAX) /* MMU disabled? */
786 {
787 pWalk->GCPtr = GCPtr;
788 pWalk->fSucceeded = true;
789 pWalk->GCPhys = GCPtr;
790 return VINF_SUCCESS;
791 }
792
793 /* Do the translation. */
794 /** @todo This is just a sketch to get something working for debugging, assumes 4KiB granules and 48-bit output address.
795 * Needs to be moved to PGMAllGst like on x86 and implemented for 16KiB and 64KiB granule sizes. */
796 uint64_t u64TcrEl1 = CPUMGetTcrEl1(pVCpu);
797 uint8_t u8TxSz = (GCPtr & RT_BIT_64(55))
798 ? ARMV8_TCR_EL1_AARCH64_T1SZ_GET(u64TcrEl1)
799 : ARMV8_TCR_EL1_AARCH64_T0SZ_GET(u64TcrEl1);
800 uint8_t uLookupLvl;
801 RTGCPHYS fLookupMask;
802
803 /*
804 * From: https://github.com/codingbelief/arm-architecture-reference-manual-for-armv8-a/blob/master/en/chapter_d4/d42_2_controlling_address_translation_stages.md
805 * For all translation stages
806 * The maximum TxSZ value is 39. If TxSZ is programmed to a value larger than 39 then it is IMPLEMENTATION DEFINED whether:
807 * - The implementation behaves as if the field is programmed to 39 for all purposes other than reading back the value of the field.
808 * - Any use of the TxSZ value generates a Level 0 Translation fault for the stage of translation at which TxSZ is used.
809 *
810 * For a stage 1 translation
811 * The minimum TxSZ value is 16. If TxSZ is programmed to a value smaller than 16 then it is IMPLEMENTATION DEFINED whether:
812 * - The implementation behaves as if the field were programmed to 16 for all purposes other than reading back the value of the field.
813 * - Any use of the TxSZ value generates a stage 1 Level 0 Translation fault.
814 *
815 * We currently choose the former for both.
816 */
817 if (/*u8TxSz >= 16 &&*/ u8TxSz <= 24)
818 {
819 uLookupLvl = 0;
820 fLookupMask = RT_BIT_64(24 - u8TxSz + 1) - 1;
821 }
822 else if (u8TxSz >= 25 && u8TxSz <= 33)
823 {
824 uLookupLvl = 1;
825 fLookupMask = RT_BIT_64(33 - u8TxSz + 1) - 1;
826 }
827 else /*if (u8TxSz >= 34 && u8TxSz <= 39)*/
828 {
829 uLookupLvl = 2;
830 fLookupMask = RT_BIT_64(39 - u8TxSz + 1) - 1;
831 }
832 /*else
833 return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);*/ /** @todo Better status (Invalid TCR config). */
834
835 uint64_t *pu64Pt = NULL;
836 uint64_t uPt;
837 int rc;
838 if (uLookupLvl == 0)
839 {
840 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
841 if (RT_SUCCESS(rc)) { /* probable */ }
842 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, rc);
843
844 uPt = pu64Pt[(GCPtr >> 39) & fLookupMask];
845 if (uPt & RT_BIT_64(0)) { /* probable */ }
846 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 0);
847
848 if (uPt & RT_BIT_64(1)) { /* probable */ }
849 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 0); /** @todo Only supported if TCR_EL1.DS is set. */
850
851 /* All nine bits from now on. */
852 fLookupMask = RT_BIT_64(9) - 1;
853 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
854 }
855
856 if (uLookupLvl <= 1)
857 {
858 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
859 if (RT_SUCCESS(rc)) { /* probable */ }
860 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 1, rc);
861
862 uPt = pu64Pt[(GCPtr >> 30) & fLookupMask];
863 if (uPt & RT_BIT_64(0)) { /* probable */ }
864 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 1);
865
866 if (uPt & RT_BIT_64(1)) { /* probable */ }
867 else
868 {
869 /* Block descriptor (1G page). */
870 pWalk->GCPtr = GCPtr;
871 pWalk->fSucceeded = true;
872 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffc0000000)) | (GCPtr & (RTGCPTR)(_1G - 1));
873 pWalk->fGigantPage = true;
874 return VINF_SUCCESS;
875 }
876
877 /* All nine bits from now on. */
878 fLookupMask = RT_BIT_64(9) - 1;
879 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
880 }
881
882 if (uLookupLvl <= 2)
883 {
884 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
885 if (RT_SUCCESS(rc)) { /* probable */ }
886 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 2, rc);
887
888 uPt = pu64Pt[(GCPtr >> 21) & fLookupMask];
889 if (uPt & RT_BIT_64(0)) { /* probable */ }
890 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 2);
891
892 if (uPt & RT_BIT_64(1)) { /* probable */ }
893 else
894 {
895 /* Block descriptor (2M page). */
896 pWalk->GCPtr = GCPtr;
897 pWalk->fSucceeded = true;
898 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffffe00000)) | (GCPtr & (RTGCPTR)(_2M - 1));
899 pWalk->fBigPage = true;
900 return VINF_SUCCESS;
901 }
902
903 /* All nine bits from now on. */
904 fLookupMask = RT_BIT_64(9) - 1;
905 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
906 }
907
908 Assert(uLookupLvl <= 3);
909
910 /* Next level. */
911 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
912 if (RT_SUCCESS(rc)) { /* probable */ }
913 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 3, rc);
914
915 uPt = pu64Pt[(GCPtr & UINT64_C(0x1ff000)) >> 12];
916 if (uPt & RT_BIT_64(0)) { /* probable */ }
917 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 3);
918
919 if (uPt & RT_BIT_64(1)) { /* probable */ }
920 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 3); /** No block descriptors. */
921
922 pWalk->GCPtr = GCPtr;
923 pWalk->fSucceeded = true;
924 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000)) | (GCPtr & (RTGCPTR)(_4K - 1));
925 return VINF_SUCCESS;
926}
927
928
929VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
930{
931 AssertReleaseFailed();
932 RT_NOREF(pVCpu, GCPtr, fOpFlags);
933 return VERR_NOT_IMPLEMENTED;
934}
935
936
937VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
938{
939 AssertReleaseFailed();
940 RT_NOREF(pVCpu, GCPtr, fOpFlags);
941 return VERR_NOT_IMPLEMENTED;
942}
943
944
945VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
946{
947 AssertReleaseFailed();
948 RT_NOREF(pVCpu, GCPtr, fOpFlags);
949 return VERR_NOT_IMPLEMENTED;
950}
951
952
953VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
954{
955 //AssertReleaseFailed(); /** @todo Called by the PGM saved state code. */
956 RT_NOREF(pVM, pVCpu, enmGuestMode, fForce);
957 return VINF_SUCCESS;
958}
959
960
961VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
962{
963 AssertReleaseFailed();
964 RT_NOREF(pVCpu, GCPtr, pfFlags, pHCPhys);
965 return VERR_NOT_SUPPORTED;
966}
967
968
969int pgmR3ExitShadowModeBeforePoolFlush(PVMCPU pVCpu)
970{
971 RT_NOREF(pVCpu);
972 return VINF_SUCCESS;
973}
974
975
976int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
977{
978 RT_NOREF(pVM, pVCpu);
979 return VINF_SUCCESS;
980}
981
982
983void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu)
984{
985 RT_NOREF(pVCpu);
986}
987
988
989int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
990{
991 VMCPU_ASSERT_EMT(pVCpu);
992 RT_NOREF(pGstWalk);
993 return PGMGstGetPage(pVCpu, GCPtr, pWalk);
994}
995
996
997int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
998{
999 VMCPU_ASSERT_EMT(pVCpu);
1000 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk); /** @todo Always do full walk for now. */
1001}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette