VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs-armv8.cpp@ 107044

Last change on this file since 107044 was 106381, checked in by vboxsync, 5 weeks ago

VMM/CPUMAllRegs-armv8.cpp: Some helpers to get at the TCR_EL1 of the guest, effective TTBR for a given virtul address and strip the PAC or any reserved bits from a given virtual address, bugref:10388

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.2 KB
Line 
1/* $Id: CPUMAllRegs-armv8.cpp 106381 2024-10-16 13:50:09Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters, ARMv8 variant.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
41#include "CPUMInternal-armv8.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/vmm/tm.h>
48
49#include <iprt/armv8.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#ifdef IN_RING3
53# include <iprt/thread.h>
54#endif
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60/**
61 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
62 *
63 * @returns Pointer to the Virtual CPU.
64 * @param a_pGuestCtx Pointer to the guest context.
65 */
66#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
67
68/** @def CPUM_INT_ASSERT_NOT_EXTRN
69 * Macro for asserting that @a a_fNotExtrn are present.
70 *
71 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
72 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
73 */
74#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
75 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
76 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
77
78
79/**
80 * Queries the pointer to the internal CPUMCTX structure.
81 *
82 * @returns The CPUMCTX pointer.
83 * @param pVCpu The cross context virtual CPU structure.
84 */
85VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
86{
87 return &pVCpu->cpum.s.Guest;
88}
89
90
91VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
92{
93 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PC);
94 return pVCpu->cpum.s.Guest.Pc.u64;
95}
96
97
98VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
99{
100 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SP);
101 AssertReleaseFailed(); /** @todo Exception level. */
102 return pVCpu->cpum.s.Guest.aSpReg[0].u64;
103}
104
105
106/**
107 * Returns whether IRQs are currently masked.
108 *
109 * @returns true if IRQs are masked as indicated by the PState value.
110 * @param pVCpu The cross context virtual CPU structure.
111 */
112VMMDECL(bool) CPUMGetGuestIrqMasked(PVMCPUCC pVCpu)
113{
114 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
115 return RT_BOOL(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_I);
116}
117
118
119/**
120 * Returns whether FIQs are currently masked.
121 *
122 * @returns true if FIQs are masked as indicated by the PState value.
123 * @param pVCpu The cross context virtual CPU structure.
124 */
125VMMDECL(bool) CPUMGetGuestFiqMasked(PVMCPUCC pVCpu)
126{
127 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
128 return RT_BOOL(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_F);
129}
130
131
132/**
133 * Gets the host CPU vendor.
134 *
135 * @returns CPU vendor.
136 * @param pVM The cross context VM structure.
137 */
138VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
139{
140 RT_NOREF(pVM);
141 //AssertReleaseFailed();
142 return CPUMCPUVENDOR_UNKNOWN;
143}
144
145
146/**
147 * Gets the host CPU microarchitecture.
148 *
149 * @returns CPU microarchitecture.
150 * @param pVM The cross context VM structure.
151 */
152VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
153{
154 RT_NOREF(pVM);
155 AssertReleaseFailed();
156 return kCpumMicroarch_Unknown;
157}
158
159
160/**
161 * Gets the guest CPU vendor.
162 *
163 * @returns CPU vendor.
164 * @param pVM The cross context VM structure.
165 */
166VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
167{
168 RT_NOREF(pVM);
169 //AssertReleaseFailed();
170 return CPUMCPUVENDOR_UNKNOWN;
171}
172
173
174/**
175 * Gets the guest CPU architecture.
176 *
177 * @returns CPU architecture.
178 * @param pVM The cross context VM structure.
179 */
180VMMDECL(CPUMARCH) CPUMGetGuestArch(PCVM pVM)
181{
182 RT_NOREF(pVM);
183 return kCpumArch_Arm; /* Static as we are in the ARM VMM module here. */
184}
185
186
187/**
188 * Gets the guest CPU microarchitecture.
189 *
190 * @returns CPU microarchitecture.
191 * @param pVM The cross context VM structure.
192 */
193VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
194{
195 RT_NOREF(pVM);
196 AssertReleaseFailed();
197 return kCpumMicroarch_Unknown;
198}
199
200
201/**
202 * Gets the maximum number of physical and linear address bits supported by the
203 * guest.
204 *
205 * @param pVM The cross context VM structure.
206 * @param pcPhysAddrWidth Where to store the physical address width.
207 * @param pcLinearAddrWidth Where to store the linear address width.
208 */
209VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
210{
211 AssertPtr(pVM);
212 AssertReturnVoid(pcPhysAddrWidth);
213 AssertReturnVoid(pcLinearAddrWidth);
214 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
215 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
216}
217
218
219/**
220 * Tests if the guest has the paging enabled (PG).
221 *
222 * @returns true if in real mode, otherwise false.
223 * @param pVCpu The cross context virtual CPU structure.
224 */
225VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
226{
227 RT_NOREF(pVCpu);
228 AssertReleaseFailed();
229 return false;
230}
231
232
233/**
234 * Tests if the guest is running in 64 bits mode or not.
235 *
236 * @returns true if in 64 bits protected mode, otherwise false.
237 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
238 */
239VMMDECL(bool) CPUMIsGuestIn64BitCode(PCVMCPU pVCpu)
240{
241 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
242 return !RT_BOOL(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4);
243}
244
245
246/**
247 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
248 * registers.
249 *
250 * @returns true if in 64 bits protected mode, otherwise false.
251 * @param pCtx Pointer to the current guest CPU context.
252 */
253VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCCPUMCTX pCtx)
254{
255 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
256}
257
258
259/**
260 * Sets the specified changed flags (CPUM_CHANGED_*).
261 *
262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
263 * @param fChangedAdd The changed flags to add.
264 */
265VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
266{
267 pVCpu->cpum.s.fChanged |= fChangedAdd;
268}
269
270
271/**
272 * Checks if the guest debug state is active.
273 *
274 * @returns boolean
275 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
276 */
277VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
278{
279 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
280}
281
282
283/**
284 * Checks if the hyper debug state is active.
285 *
286 * @returns boolean
287 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
288 */
289VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
290{
291 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
292}
293
294
295/**
296 * Mark the guest's debug state as inactive.
297 *
298 * @returns boolean
299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
300 * @todo This API doesn't make sense any more.
301 */
302VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
303{
304 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)));
305 NOREF(pVCpu);
306}
307
308
309/**
310 * Get the current exception level of the guest.
311 *
312 * @returns Exception Level 0 - 3
313 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
314 */
315VMM_INT_DECL(uint8_t) CPUMGetGuestEL(PVMCPU pVCpu)
316{
317 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
318 return ARMV8_SPSR_EL2_AARCH64_GET_EL(pVCpu->cpum.s.Guest.fPState);
319}
320
321
322/**
323 * Returns whether the guest has the MMU enabled for address translation.
324 *
325 * @returns true if address translation is enabled, false if not.
326 */
327VMM_INT_DECL(bool) CPUMGetGuestMmuEnabled(PVMCPUCC pVCpu)
328{
329 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE | CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
330 uint8_t bEl = ARMV8_SPSR_EL2_AARCH64_GET_EL(pVCpu->cpum.s.Guest.fPState);
331 if (bEl == ARMV8_AARCH64_EL_2)
332 {
333 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_EL2);
334 return RT_BOOL(pVCpu->cpum.s.Guest.SctlrEl2.u64 & ARMV8_SCTLR_EL2_M);
335 }
336
337 Assert(bEl == ARMV8_AARCH64_EL_0 || bEl == ARMV8_AARCH64_EL_1);
338 return RT_BOOL(pVCpu->cpum.s.Guest.Sctlr.u64 & ARMV8_SCTLR_EL1_M);
339}
340
341
342/**
343 * Returns the effective TTBR value for the given guest context pointer.
344 *
345 * @returns Physical base address of the translation table being used, or RTGCPHYS_MAX
346 * if MMU is disabled.
347 */
348VMM_INT_DECL(RTGCPHYS) CPUMGetEffectiveTtbr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
349{
350 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE | CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
351
352 uint8_t bEl = ARMV8_SPSR_EL2_AARCH64_GET_EL(pVCpu->cpum.s.Guest.fPState);
353 if (bEl == ARMV8_AARCH64_EL_2)
354 {
355 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_EL2);
356 if (pVCpu->cpum.s.Guest.SctlrEl2.u64 & ARMV8_SCTLR_EL2_M)
357 return (GCPtr & RT_BIT_64(55))
358 ? ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr1El2.u64)
359 : ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr0El2.u64);
360 }
361 else
362 {
363 Assert(bEl == ARMV8_AARCH64_EL_0 || bEl == ARMV8_AARCH64_EL_1);
364 if (pVCpu->cpum.s.Guest.Sctlr.u64 & ARMV8_SCTLR_EL1_M)
365 return (GCPtr & RT_BIT_64(55))
366 ? ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr1.u64)
367 : ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr0.u64);
368 }
369
370 return RTGCPHYS_MAX;
371}
372
373
374/**
375 * Returns the current TCR_EL1 system register value for the given vCPU.
376 *
377 * @returns TCR_EL1 value
378 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
379 */
380VMM_INT_DECL(uint64_t) CPUMGetTcrEl1(PVMCPUCC pVCpu)
381{
382 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
383 return pVCpu->cpum.s.Guest.Tcr.u64;
384}
385
386
387/**
388 * Returns the virtual address given in the input stripped from any potential
389 * pointer authentication code if enabled for the given vCPU.
390 *
391 * @returns Virtual address given in GCPtr stripped from any PAC (or reserved bits).
392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
393 */
394VMM_INT_DECL(RTGCPTR) CPUMGetGCPtrPacStripped(PVMCPUCC pVCpu, RTGCPTR GCPtr)
395{
396 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
397
398 /** @todo MTE support. */
399 bool fUpper = RT_BOOL(GCPtr & RT_BIT_64(55)); /* Save the determinator for upper lower range. */
400 uint8_t u8TxSz = fUpper
401 ? ARMV8_TCR_EL1_AARCH64_T1SZ_GET(pVCpu->cpum.s.Guest.Tcr.u64)
402 : ARMV8_TCR_EL1_AARCH64_T0SZ_GET(pVCpu->cpum.s.Guest.Tcr.u64);
403 RTGCPTR fNonPacMask = RT_BIT_64(64 - u8TxSz) - 1; /* Get mask of non PAC bits. */
404 RTGCPTR fSign = fUpper
405 ? ~fNonPacMask
406 : 0;
407
408 return (GCPtr & fNonPacMask)
409 | fSign;
410}
411
412
413/**
414 * Gets the current guest CPU mode.
415 *
416 * If paging mode is what you need, check out PGMGetGuestMode().
417 *
418 * @returns The CPU mode.
419 * @param pVCpu The cross context virtual CPU structure.
420 */
421VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
422{
423 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
424 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)
425 return CPUMMODE_ARMV8_AARCH32;
426
427 return CPUMMODE_ARMV8_AARCH64;
428}
429
430
431/**
432 * Figure whether the CPU is currently executing 32 or 64 bit code.
433 *
434 * @returns 32 or 64.
435 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
436 */
437VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
438{
439 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
440 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)
441 return 32;
442
443 return 64;
444}
445
446
447VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
448{
449 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
450 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)
451 {
452 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_T)
453 return DISCPUMODE_ARMV8_T32;
454
455 return DISCPUMODE_ARMV8_A32;
456 }
457
458 return DISCPUMODE_ARMV8_A64;
459}
460
461
462/**
463 * Used to dynamically imports state residing in NEM or HM.
464 *
465 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
466 *
467 * @returns VBox status code.
468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
469 * @param fExtrnImport The fields to import.
470 * @thread EMT(pVCpu)
471 */
472VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
473{
474 VMCPU_ASSERT_EMT(pVCpu);
475 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
476 {
477 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
478 {
479 case CPUMCTX_EXTRN_KEEPER_NEM:
480 {
481 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
482 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
483 return rc;
484 }
485
486 default:
487 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
488 }
489 }
490 return VINF_SUCCESS;
491}
492
493
494/**
495 * Translates a microarchitecture enum value to the corresponding string
496 * constant.
497 *
498 * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns
499 * NULL if the value is invalid.
500 *
501 * @param enmMicroarch The enum value to convert.
502 *
503 * @todo Doesn't really belong here but for now there is no other Armv8 CPUM source file.
504 */
505VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch)
506{
507 switch (enmMicroarch)
508 {
509#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)
510 CASE_RET_STR(kCpumMicroarch_Apple_M1);
511#undef CASE_RET_STR
512 default:
513 break;
514 }
515
516 return NULL;
517}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette