VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 107044

Last change on this file since 107044 was 107026, checked in by vboxsync, 8 days ago

VMM/NEM/ARM: Loading a 32-bit value into a register clears the upper half on real hardware, workaround for ldp instruction accessing the TPM MMIO area caused by tpm.sys in a Windows 11/ARM guest, bugref:10777

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 125.8 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 107026 2024-11-18 14:21:58Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/gic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/dbgftrace.h>
44#include <VBox/vmm/gcm.h>
45#include "NEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/dis.h>
49#include <VBox/gic.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/armv8.h>
53#include <iprt/asm.h>
54#include <iprt/asm-arm.h>
55#include <iprt/asm-math.h>
56#include <iprt/ldr.h>
57#include <iprt/mem.h>
58#include <iprt/path.h>
59#include <iprt/string.h>
60#include <iprt/system.h>
61#include <iprt/utf16.h>
62
63#include <iprt/formats/arm-psci.h>
64
65#include <mach/mach_time.h>
66#include <mach/kern_return.h>
67
68#include <Hypervisor/Hypervisor.h>
69
70
71/*********************************************************************************************************************************
72* Defined Constants And Macros *
73*********************************************************************************************************************************/
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79
80#if MAC_OS_X_VERSION_MIN_REQUIRED < 150000
81
82/* Since 15.0+ */
83typedef enum hv_gic_distributor_reg_t : uint16_t
84{
85 HV_GIC_DISTRIBUTOR_REG_GICD_CTLR,
86 HV_GIC_DISTRIBUTOR_REG_GICD_ICACTIVER0
87 /** @todo */
88} hv_gic_distributor_reg_t;
89
90
91typedef enum hv_gic_icc_reg_t : uint16_t
92{
93 HV_GIC_ICC_REG_AP0R0_EL1
94 /** @todo */
95} hv_gic_icc_reg_t;
96
97
98typedef enum hv_gic_ich_reg_t : uint16_t
99{
100 HV_GIC_ICH_REG_AP0R0_EL2
101 /** @todo */
102} hv_gic_ich_reg_t;
103
104
105typedef enum hv_gic_icv_reg_t : uint16_t
106{
107 HV_GIC_ICV_REG_AP0R0_EL1
108 /** @todo */
109} hv_gic_icv_reg_t;
110
111
112typedef enum hv_gic_msi_reg_t : uint16_t
113{
114 HV_GIC_REG_GICM_SET_SPI_NSR
115 /** @todo */
116} hv_gic_msi_reg_t;
117
118
119typedef enum hv_gic_redistributor_reg_t : uint16_t
120{
121 HV_GIC_REDISTRIBUTOR_REG_GICR_ICACTIVER0
122 /** @todo */
123} hv_gic_redistributor_reg_t;
124
125
126typedef enum hv_gic_intid_t : uint16_t
127{
128 HV_GIC_INT_EL1_PHYSICAL_TIMER = 23,
129 HV_GIC_INT_EL1_VIRTUAL_TIMER = 25,
130 HV_GIC_INT_EL2_PHYSICAL_TIMER = 26,
131 HV_GIC_INT_MAINTENANCE = 27,
132 HV_GIC_INT_PERFORMANCE_MONITOR = 30
133} hv_gic_intid_t;
134
135#endif
136
137typedef hv_vm_config_t FN_HV_VM_CONFIG_CREATE(void);
138typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_SUPPORTED(bool *el2_supported);
139typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_ENABLED(hv_vm_config_t config, bool *el2_enabled);
140typedef hv_return_t FN_HV_VM_CONFIG_SET_EL2_ENABLED(hv_vm_config_t config, bool el2_enabled);
141
142typedef struct hv_gic_config_s *hv_gic_config_t;
143typedef hv_return_t FN_HV_GIC_CREATE(hv_gic_config_t gic_config);
144typedef hv_return_t FN_HV_GIC_RESET(void);
145typedef hv_gic_config_t FN_HV_GIC_CONFIG_CREATE(void);
146typedef hv_return_t FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t distributor_base_address);
147typedef hv_return_t FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t redistributor_base_address);
148typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE(hv_gic_config_t config, hv_ipa_t msi_region_base_address);
149typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE(hv_gic_config_t config, uint32_t msi_intid_base, uint32_t msi_intid_count);
150
151typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE(hv_vcpu_t vcpu, hv_ipa_t *redistributor_base_address);
152typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE(size_t *redistributor_region_size);
153typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_SIZE(size_t *redistributor_size);
154typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_SIZE(size_t *distributor_size);
155typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT(size_t *distributor_base_alignment);
156typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT(size_t *redistributor_base_alignment);
157typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT(size_t *msi_region_base_alignment);
158typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_SIZE(size_t *msi_region_size);
159typedef hv_return_t FN_HV_GIC_GET_SPI_INTERRUPT_RANGE(uint32_t *spi_intid_base, uint32_t *spi_intid_count);
160
161typedef struct hv_gic_state_s *hv_gic_state_t;
162typedef hv_gic_state_t FN_HV_GIC_STATE_CREATE(void);
163typedef hv_return_t FN_HV_GIC_SET_STATE(const void *gic_state_data, size_t gic_state_size);
164typedef hv_return_t FN_HV_GIC_STATE_GET_SIZE(hv_gic_state_t state, size_t *gic_state_size);
165typedef hv_return_t FN_HV_GIC_STATE_GET_DATA(hv_gic_state_t state, void *gic_state_data);
166
167typedef hv_return_t FN_HV_GIC_SEND_MSI(hv_ipa_t address, uint32_t intid);
168typedef hv_return_t FN_HV_GIC_SET_SPI(uint32_t intid, bool level);
169
170typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t *value);
171typedef hv_return_t FN_HV_GIC_GET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t *value);
172typedef hv_return_t FN_HV_GIC_GET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t *value);
173typedef hv_return_t FN_HV_GIC_GET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t *value);
174typedef hv_return_t FN_HV_GIC_GET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t *value);
175typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t *value);
176
177typedef hv_return_t FN_HV_GIC_SET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t value);
178typedef hv_return_t FN_HV_GIC_SET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t value);
179typedef hv_return_t FN_HV_GIC_SET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t value);
180typedef hv_return_t FN_HV_GIC_SET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t value);
181typedef hv_return_t FN_HV_GIC_SET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t value);
182typedef hv_return_t FN_HV_GIC_SET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t value);
183
184typedef hv_return_t FN_HV_GIC_GET_INTID(hv_gic_intid_t interrupt, uint32_t *intid);
185
186
187/*********************************************************************************************************************************
188* Global Variables *
189*********************************************************************************************************************************/
190/** @name Optional APIs imported from Hypervisor.framework.
191 * @{ */
192static FN_HV_VM_CONFIG_CREATE *g_pfnHvVmConfigCreate = NULL; /* Since 13.0 */
193static FN_HV_VM_CONFIG_GET_EL2_SUPPORTED *g_pfnHvVmConfigGetEl2Supported = NULL; /* Since 15.0 */
194static FN_HV_VM_CONFIG_GET_EL2_ENABLED *g_pfnHvVmConfigGetEl2Enabled = NULL; /* Since 15.0 */
195static FN_HV_VM_CONFIG_SET_EL2_ENABLED *g_pfnHvVmConfigSetEl2Enabled = NULL; /* Since 15.0 */
196
197static FN_HV_GIC_CREATE *g_pfnHvGicCreate = NULL; /* Since 15.0 */
198static FN_HV_GIC_RESET *g_pfnHvGicReset = NULL; /* Since 15.0 */
199static FN_HV_GIC_CONFIG_CREATE *g_pfnHvGicConfigCreate = NULL; /* Since 15.0 */
200static FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE *g_pfnHvGicConfigSetDistributorBase = NULL; /* Since 15.0 */
201static FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE *g_pfnHvGicConfigSetRedistributorBase = NULL; /* Since 15.0 */
202static FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE *g_pfnHvGicConfigSetMsiRegionBase = NULL; /* Since 15.0 */
203static FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE *g_pfnHvGicConfigSetMsiInterruptRange = NULL; /* Since 15.0 */
204static FN_HV_GIC_GET_REDISTRIBUTOR_BASE *g_pfnHvGicGetRedistributorBase = NULL; /* Since 15.0 */
205static FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE *g_pfnHvGicGetRedistributorRegionSize = NULL; /* Since 15.0 */
206static FN_HV_GIC_GET_REDISTRIBUTOR_SIZE *g_pfnHvGicGetRedistributorSize = NULL; /* Since 15.0 */
207static FN_HV_GIC_GET_DISTRIBUTOR_SIZE *g_pfnHvGicGetDistributorSize = NULL; /* Since 15.0 */
208static FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetDistributorBaseAlignment = NULL; /* Since 15.0 */
209static FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetRedistributorBaseAlignment = NULL; /* Since 15.0 */
210static FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT *g_pfnHvGicGetMsiRegionBaseAlignment = NULL; /* Since 15.0 */
211static FN_HV_GIC_GET_MSI_REGION_SIZE *g_pfnHvGicGetMsiRegionSize = NULL; /* Since 15.0 */
212static FN_HV_GIC_GET_SPI_INTERRUPT_RANGE *g_pfnHvGicGetSpiInterruptRange = NULL; /* Since 15.0 */
213static FN_HV_GIC_STATE_CREATE *g_pfnHvGicStateCreate = NULL; /* Since 15.0 */
214static FN_HV_GIC_SET_STATE *g_pfnHvGicSetState = NULL; /* Since 15.0 */
215static FN_HV_GIC_STATE_GET_SIZE *g_pfnHvGicStateGetSize = NULL; /* Since 15.0 */
216static FN_HV_GIC_STATE_GET_DATA *g_pfnHvGicStateGetData = NULL; /* Since 15.0 */
217static FN_HV_GIC_SEND_MSI *g_pfnHvGicSendMsi = NULL; /* Since 15.0 */
218static FN_HV_GIC_SET_SPI *g_pfnHvGicSetSpi = NULL; /* Since 15.0 */
219static FN_HV_GIC_GET_DISTRIBUTOR_REG *g_pfnHvGicGetDistributorReg = NULL; /* Since 15.0 */
220static FN_HV_GIC_GET_MSI_REG *g_pfnHvGicGetMsiReg = NULL; /* Since 15.0 */
221static FN_HV_GIC_GET_ICC_REG *g_pfnHvGicGetIccReg = NULL; /* Since 15.0 */
222static FN_HV_GIC_GET_ICH_REG *g_pfnHvGicGetIchReg = NULL; /* Since 15.0 */
223static FN_HV_GIC_GET_ICV_REG *g_pfnHvGicGetIcvReg = NULL; /* Since 15.0 */
224static FN_HV_GIC_GET_REDISTRIBUTOR_REG *g_pfnHvGicGetRedistributorReg = NULL; /* Since 15.0 */
225static FN_HV_GIC_SET_DISTRIBUTOR_REG *g_pfnHvGicSetDistributorReg = NULL; /* Since 15.0 */
226static FN_HV_GIC_SET_MSI_REG *g_pfnHvGicSetMsiReg = NULL; /* Since 15.0 */
227static FN_HV_GIC_SET_ICC_REG *g_pfnHvGicSetIccReg = NULL; /* Since 15.0 */
228static FN_HV_GIC_SET_ICH_REG *g_pfnHvGicSetIchReg = NULL; /* Since 15.0 */
229static FN_HV_GIC_SET_ICV_REG *g_pfnHvGicSetIcvReg = NULL; /* Since 15.0 */
230static FN_HV_GIC_SET_REDISTRIBUTOR_REG *g_pfnHvGicSetRedistributorReg = NULL; /* Since 15.0 */
231static FN_HV_GIC_GET_INTID *g_pfnHvGicGetIntid = NULL; /* Since 15.0 */
232/** @} */
233
234
235/**
236 * Import instructions.
237 */
238static const struct
239{
240 void **ppfn; /**< The function pointer variable. */
241 const char *pszName; /**< The function name. */
242} g_aImports[] =
243{
244#define NEM_DARWIN_IMPORT(a_Pfn, a_Name) { (void **)&(a_Pfn), #a_Name }
245 NEM_DARWIN_IMPORT(g_pfnHvVmConfigCreate, hv_vm_config_create),
246 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Supported, hv_vm_config_get_el2_supported),
247 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Enabled, hv_vm_config_get_el2_enabled),
248 NEM_DARWIN_IMPORT(g_pfnHvVmConfigSetEl2Enabled, hv_vm_config_set_el2_enabled),
249
250 NEM_DARWIN_IMPORT(g_pfnHvGicCreate, hv_gic_create),
251 NEM_DARWIN_IMPORT(g_pfnHvGicReset, hv_gic_reset),
252 NEM_DARWIN_IMPORT(g_pfnHvGicConfigCreate, hv_gic_config_create),
253 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetDistributorBase, hv_gic_config_set_distributor_base),
254 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetRedistributorBase, hv_gic_config_set_redistributor_base),
255 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiRegionBase, hv_gic_config_set_msi_region_base),
256 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiInterruptRange, hv_gic_config_set_msi_interrupt_range),
257 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBase, hv_gic_get_redistributor_base),
258 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorRegionSize, hv_gic_get_redistributor_region_size),
259 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorSize, hv_gic_get_redistributor_size),
260 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorSize, hv_gic_get_distributor_size),
261 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorBaseAlignment, hv_gic_get_distributor_base_alignment),
262 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBaseAlignment, hv_gic_get_redistributor_base_alignment),
263 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionBaseAlignment, hv_gic_get_msi_region_base_alignment),
264 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionSize, hv_gic_get_msi_region_size),
265 NEM_DARWIN_IMPORT(g_pfnHvGicGetSpiInterruptRange, hv_gic_get_spi_interrupt_range),
266 NEM_DARWIN_IMPORT(g_pfnHvGicStateCreate, hv_gic_state_create),
267 NEM_DARWIN_IMPORT(g_pfnHvGicSetState, hv_gic_set_state),
268 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetSize, hv_gic_state_get_size),
269 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetData, hv_gic_state_get_data),
270 NEM_DARWIN_IMPORT(g_pfnHvGicSendMsi, hv_gic_send_msi),
271 NEM_DARWIN_IMPORT(g_pfnHvGicSetSpi, hv_gic_set_spi),
272 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorReg, hv_gic_get_distributor_reg),
273 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiReg, hv_gic_get_msi_reg),
274 NEM_DARWIN_IMPORT(g_pfnHvGicGetIccReg, hv_gic_get_icc_reg),
275 NEM_DARWIN_IMPORT(g_pfnHvGicGetIchReg, hv_gic_get_ich_reg),
276 NEM_DARWIN_IMPORT(g_pfnHvGicGetIcvReg, hv_gic_get_icv_reg),
277 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorReg, hv_gic_get_redistributor_reg),
278 NEM_DARWIN_IMPORT(g_pfnHvGicSetDistributorReg, hv_gic_set_distributor_reg),
279 NEM_DARWIN_IMPORT(g_pfnHvGicSetMsiReg, hv_gic_set_msi_reg),
280 NEM_DARWIN_IMPORT(g_pfnHvGicSetIccReg, hv_gic_set_icc_reg),
281 NEM_DARWIN_IMPORT(g_pfnHvGicSetIchReg, hv_gic_set_ich_reg),
282 NEM_DARWIN_IMPORT(g_pfnHvGicSetIcvReg, hv_gic_set_icv_reg),
283 NEM_DARWIN_IMPORT(g_pfnHvGicSetRedistributorReg, hv_gic_set_redistributor_reg),
284 NEM_DARWIN_IMPORT(g_pfnHvGicGetIntid, hv_gic_get_intid)
285#undef NEM_DARWIN_IMPORT
286};
287
288
289/*
290 * Let the preprocessor alias the APIs to import variables for better autocompletion.
291 */
292#ifndef IN_SLICKEDIT
293# define hv_vm_config_create g_pfnHvVmConfigCreate
294# define hv_vm_config_get_el2_supported g_pfnHvVmConfigGetEl2Supported
295# define hv_vm_config_get_el2_enabled g_pfnHvVmConfigGetEl2Enabled
296# define hv_vm_config_set_el2_enabled g_pfnHvVmConfigSetEl2Enabled
297
298# define hv_gic_create g_pfnHvGicCreate
299# define hv_gic_reset g_pfnHvGicReset
300# define hv_gic_config_create g_pfnHvGicConfigCreate
301# define hv_gic_config_set_distributor_base g_pfnHvGicConfigSetDistributorBase
302# define hv_gic_config_set_redistributor_base g_pfnHvGicConfigSetRedistributorBase
303# define hv_gic_config_set_msi_region_base g_pfnHvGicConfigSetMsiRegionBase
304# define hv_gic_config_set_msi_interrupt_range g_pfnHvGicConfigSetMsiInterruptRange
305# define hv_gic_get_redistributor_base g_pfnHvGicGetRedistributorBase
306# define hv_gic_get_redistributor_region_size g_pfnHvGicGetRedistributorRegionSize
307# define hv_gic_get_redistributor_size g_pfnHvGicGetRedistributorSize
308# define hv_gic_get_distributor_size g_pfnHvGicGetDistributorSize
309# define hv_gic_get_distributor_base_alignment g_pfnHvGicGetDistributorBaseAlignment
310# define hv_gic_get_redistributor_base_alignment g_pfnHvGicGetRedistributorBaseAlignment
311# define hv_gic_get_msi_region_base_alignment g_pfnHvGicGetMsiRegionBaseAlignment
312# define hv_gic_get_msi_region_size g_pfnHvGicGetMsiRegionSize
313# define hv_gic_get_spi_interrupt_range g_pfnHvGicGetSpiInterruptRange
314# define hv_gic_state_create g_pfnHvGicStateCreate
315# define hv_gic_set_state g_pfnHvGicSetState
316# define hv_gic_state_get_size g_pfnHvGicStateGetSize
317# define hv_gic_state_get_data g_pfnHvGicStateGetData
318# define hv_gic_send_msi g_pfnHvGicSendMsi
319# define hv_gic_set_spi g_pfnHvGicSetSpi
320# define hv_gic_get_distributor_reg g_pfnHvGicGetDistributorReg
321# define hv_gic_get_msi_reg g_pfnHvGicGetMsiReg
322# define hv_gic_get_icc_reg g_pfnHvGicGetIccReg
323# define hv_gic_get_ich_reg g_pfnHvGicGetIchReg
324# define hv_gic_get_icv_reg g_pfnHvGicGetIcvReg
325# define hv_gic_get_redistributor_reg g_pfnHvGicGetRedistributorReg
326# define hv_gic_set_distributor_reg g_pfnHvGicSetDistributorReg
327# define hv_gic_set_msi_reg g_pfnHvGicSetMsiReg
328# define hv_gic_set_icc_reg g_pfnHvGicSetIccReg
329# define hv_gic_set_ich_reg g_pfnHvGicSetIchReg
330# define hv_gic_set_icv_reg g_pfnHvGicSetIcvReg
331# define hv_gic_set_redistributor_reg g_pfnHvGicSetRedistributorReg
332# define hv_gic_get_intid g_pfnHvGicGetIntid
333#endif
334
335
336/** The general registers. */
337static const struct
338{
339 hv_reg_t enmHvReg;
340 uint32_t fCpumExtrn;
341 uint32_t offCpumCtx;
342} s_aCpumRegs[] =
343{
344#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
345#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
346 CPUM_GREG_EMIT_X0_X3(0),
347 CPUM_GREG_EMIT_X0_X3(1),
348 CPUM_GREG_EMIT_X0_X3(2),
349 CPUM_GREG_EMIT_X0_X3(3),
350 CPUM_GREG_EMIT_X4_X28(4),
351 CPUM_GREG_EMIT_X4_X28(5),
352 CPUM_GREG_EMIT_X4_X28(6),
353 CPUM_GREG_EMIT_X4_X28(7),
354 CPUM_GREG_EMIT_X4_X28(8),
355 CPUM_GREG_EMIT_X4_X28(9),
356 CPUM_GREG_EMIT_X4_X28(10),
357 CPUM_GREG_EMIT_X4_X28(11),
358 CPUM_GREG_EMIT_X4_X28(12),
359 CPUM_GREG_EMIT_X4_X28(13),
360 CPUM_GREG_EMIT_X4_X28(14),
361 CPUM_GREG_EMIT_X4_X28(15),
362 CPUM_GREG_EMIT_X4_X28(16),
363 CPUM_GREG_EMIT_X4_X28(17),
364 CPUM_GREG_EMIT_X4_X28(18),
365 CPUM_GREG_EMIT_X4_X28(19),
366 CPUM_GREG_EMIT_X4_X28(20),
367 CPUM_GREG_EMIT_X4_X28(21),
368 CPUM_GREG_EMIT_X4_X28(22),
369 CPUM_GREG_EMIT_X4_X28(23),
370 CPUM_GREG_EMIT_X4_X28(24),
371 CPUM_GREG_EMIT_X4_X28(25),
372 CPUM_GREG_EMIT_X4_X28(26),
373 CPUM_GREG_EMIT_X4_X28(27),
374 CPUM_GREG_EMIT_X4_X28(28),
375 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
376 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
377 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
378 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
379 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
380#undef CPUM_GREG_EMIT_X0_X3
381#undef CPUM_GREG_EMIT_X4_X28
382};
383/** SIMD/FP registers. */
384static const struct
385{
386 hv_simd_fp_reg_t enmHvReg;
387 uint32_t offCpumCtx;
388} s_aCpumFpRegs[] =
389{
390#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
391 CPUM_VREG_EMIT(0),
392 CPUM_VREG_EMIT(1),
393 CPUM_VREG_EMIT(2),
394 CPUM_VREG_EMIT(3),
395 CPUM_VREG_EMIT(4),
396 CPUM_VREG_EMIT(5),
397 CPUM_VREG_EMIT(6),
398 CPUM_VREG_EMIT(7),
399 CPUM_VREG_EMIT(8),
400 CPUM_VREG_EMIT(9),
401 CPUM_VREG_EMIT(10),
402 CPUM_VREG_EMIT(11),
403 CPUM_VREG_EMIT(12),
404 CPUM_VREG_EMIT(13),
405 CPUM_VREG_EMIT(14),
406 CPUM_VREG_EMIT(15),
407 CPUM_VREG_EMIT(16),
408 CPUM_VREG_EMIT(17),
409 CPUM_VREG_EMIT(18),
410 CPUM_VREG_EMIT(19),
411 CPUM_VREG_EMIT(20),
412 CPUM_VREG_EMIT(21),
413 CPUM_VREG_EMIT(22),
414 CPUM_VREG_EMIT(23),
415 CPUM_VREG_EMIT(24),
416 CPUM_VREG_EMIT(25),
417 CPUM_VREG_EMIT(26),
418 CPUM_VREG_EMIT(27),
419 CPUM_VREG_EMIT(28),
420 CPUM_VREG_EMIT(29),
421 CPUM_VREG_EMIT(30),
422 CPUM_VREG_EMIT(31)
423#undef CPUM_VREG_EMIT
424};
425/** Debug system registers. */
426static const struct
427{
428 hv_sys_reg_t enmHvReg;
429 uint32_t offCpumCtx;
430} s_aCpumDbgRegs[] =
431{
432#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
433 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
434 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
435 /* Breakpoint registers. */
436 CPUM_DBGREG_EMIT(B, 0),
437 CPUM_DBGREG_EMIT(B, 1),
438 CPUM_DBGREG_EMIT(B, 2),
439 CPUM_DBGREG_EMIT(B, 3),
440 CPUM_DBGREG_EMIT(B, 4),
441 CPUM_DBGREG_EMIT(B, 5),
442 CPUM_DBGREG_EMIT(B, 6),
443 CPUM_DBGREG_EMIT(B, 7),
444 CPUM_DBGREG_EMIT(B, 8),
445 CPUM_DBGREG_EMIT(B, 9),
446 CPUM_DBGREG_EMIT(B, 10),
447 CPUM_DBGREG_EMIT(B, 11),
448 CPUM_DBGREG_EMIT(B, 12),
449 CPUM_DBGREG_EMIT(B, 13),
450 CPUM_DBGREG_EMIT(B, 14),
451 CPUM_DBGREG_EMIT(B, 15),
452 /* Watchpoint registers. */
453 CPUM_DBGREG_EMIT(W, 0),
454 CPUM_DBGREG_EMIT(W, 1),
455 CPUM_DBGREG_EMIT(W, 2),
456 CPUM_DBGREG_EMIT(W, 3),
457 CPUM_DBGREG_EMIT(W, 4),
458 CPUM_DBGREG_EMIT(W, 5),
459 CPUM_DBGREG_EMIT(W, 6),
460 CPUM_DBGREG_EMIT(W, 7),
461 CPUM_DBGREG_EMIT(W, 8),
462 CPUM_DBGREG_EMIT(W, 9),
463 CPUM_DBGREG_EMIT(W, 10),
464 CPUM_DBGREG_EMIT(W, 11),
465 CPUM_DBGREG_EMIT(W, 12),
466 CPUM_DBGREG_EMIT(W, 13),
467 CPUM_DBGREG_EMIT(W, 14),
468 CPUM_DBGREG_EMIT(W, 15),
469 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
470#undef CPUM_DBGREG_EMIT
471};
472/** PAuth key system registers. */
473static const struct
474{
475 hv_sys_reg_t enmHvReg;
476 uint32_t offCpumCtx;
477} s_aCpumPAuthKeyRegs[] =
478{
479 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
480 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
481 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
482 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
483 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
484 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
485 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
486 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
487 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
488 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
489};
490/** System registers. */
491static const struct
492{
493 hv_sys_reg_t enmHvReg;
494 uint32_t fCpumExtrn;
495 uint32_t offCpumCtx;
496} s_aCpumSysRegs[] =
497{
498 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
499 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
500 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
501 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
502 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
503 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
504 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
505 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
506 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
507 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
508 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
509 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
510 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
511 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
512 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
513 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
514 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
515 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
516 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
517 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
518 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
519 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
520 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
521 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
522
523};
524/** EL2 support system registers. */
525static const struct
526{
527 uint16_t idSysReg;
528 uint32_t offCpumCtx;
529} s_aCpumEl2SysRegs[] =
530{
531 { ARMV8_AARCH64_SYSREG_CNTHCTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHCtlEl2.u64) },
532 { ARMV8_AARCH64_SYSREG_CNTHP_CTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCtlEl2.u64) },
533 { ARMV8_AARCH64_SYSREG_CNTHP_CVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCValEl2.u64) },
534 { ARMV8_AARCH64_SYSREG_CNTHP_TVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpTValEl2.u64) },
535 { ARMV8_AARCH64_SYSREG_CNTVOFF_EL2, RT_UOFFSETOF(CPUMCTX, CntVOffEl2.u64) },
536 { ARMV8_AARCH64_SYSREG_CPTR_EL2, RT_UOFFSETOF(CPUMCTX, CptrEl2.u64) },
537 { ARMV8_AARCH64_SYSREG_ELR_EL2, RT_UOFFSETOF(CPUMCTX, ElrEl2.u64) },
538 { ARMV8_AARCH64_SYSREG_ESR_EL2, RT_UOFFSETOF(CPUMCTX, EsrEl2.u64) },
539 { ARMV8_AARCH64_SYSREG_FAR_EL2, RT_UOFFSETOF(CPUMCTX, FarEl2.u64) },
540 { ARMV8_AARCH64_SYSREG_HCR_EL2, RT_UOFFSETOF(CPUMCTX, HcrEl2.u64) },
541 { ARMV8_AARCH64_SYSREG_HPFAR_EL2, RT_UOFFSETOF(CPUMCTX, HpFarEl2.u64) },
542 { ARMV8_AARCH64_SYSREG_MAIR_EL2, RT_UOFFSETOF(CPUMCTX, MairEl2.u64) },
543 //{ ARMV8_AARCH64_SYSREG_MDCR_EL2, RT_UOFFSETOF(CPUMCTX, MdcrEl2.u64) },
544 { ARMV8_AARCH64_SYSREG_SCTLR_EL2, RT_UOFFSETOF(CPUMCTX, SctlrEl2.u64) },
545 { ARMV8_AARCH64_SYSREG_SPSR_EL2, RT_UOFFSETOF(CPUMCTX, SpsrEl2.u64) },
546 { ARMV8_AARCH64_SYSREG_SP_EL2, RT_UOFFSETOF(CPUMCTX, SpEl2.u64) },
547 { ARMV8_AARCH64_SYSREG_TCR_EL2, RT_UOFFSETOF(CPUMCTX, TcrEl2.u64) },
548 { ARMV8_AARCH64_SYSREG_TPIDR_EL2, RT_UOFFSETOF(CPUMCTX, TpidrEl2.u64) },
549 { ARMV8_AARCH64_SYSREG_TTBR0_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr0El2.u64) },
550 { ARMV8_AARCH64_SYSREG_TTBR1_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr1El2.u64) },
551 { ARMV8_AARCH64_SYSREG_VBAR_EL2, RT_UOFFSETOF(CPUMCTX, VBarEl2.u64) },
552 { ARMV8_AARCH64_SYSREG_VMPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VMpidrEl2.u64) },
553 { ARMV8_AARCH64_SYSREG_VPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VPidrEl2.u64) },
554 { ARMV8_AARCH64_SYSREG_VTCR_EL2, RT_UOFFSETOF(CPUMCTX, VTcrEl2.u64) },
555 { ARMV8_AARCH64_SYSREG_VTTBR_EL2, RT_UOFFSETOF(CPUMCTX, VTtbrEl2.u64) }
556};
557/** ID registers. */
558static const struct
559{
560 hv_feature_reg_t enmHvReg;
561 uint32_t offIdStruct;
562} s_aIdRegs[] =
563{
564 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
565 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
566 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
567 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
568 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
569 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
570 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
571 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
572 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) },
573 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1) },
574 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0) },
575 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0) }
576};
577
578
579/*********************************************************************************************************************************
580* Internal Functions *
581*********************************************************************************************************************************/
582
583
584/**
585 * Converts a HV return code to a VBox status code.
586 *
587 * @returns VBox status code.
588 * @param hrc The HV return code to convert.
589 */
590DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
591{
592 if (hrc == HV_SUCCESS)
593 return VINF_SUCCESS;
594
595 switch (hrc)
596 {
597 case HV_ERROR: return VERR_INVALID_STATE;
598 case HV_BUSY: return VERR_RESOURCE_BUSY;
599 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
600 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
601 case HV_NO_DEVICE: return VERR_NOT_FOUND;
602 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
603 }
604
605 return VERR_IPE_UNEXPECTED_STATUS;
606}
607
608
609/** Puts a name to a hypervisor framework status code. */
610static const char *nemR3DarwinHvStatusName(hv_return_t hrc)
611{
612 switch (hrc)
613 {
614 RT_CASE_RET_STR(HV_SUCCESS);
615 RT_CASE_RET_STR(HV_ERROR);
616 RT_CASE_RET_STR(HV_BUSY);
617 RT_CASE_RET_STR(HV_BAD_ARGUMENT);
618 RT_CASE_RET_STR(HV_ILLEGAL_GUEST_STATE);
619 RT_CASE_RET_STR(HV_NO_RESOURCES);
620 RT_CASE_RET_STR(HV_NO_DEVICE);
621 RT_CASE_RET_STR(HV_DENIED);
622 RT_CASE_RET_STR(HV_UNSUPPORTED);
623 }
624 return "";
625}
626
627
628/**
629 * Returns a human readable string of the given exception class.
630 *
631 * @returns Pointer to the string matching the given EC.
632 * @param u32Ec The exception class to return the string for.
633 */
634static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
635{
636 switch (u32Ec)
637 {
638#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
639 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
640 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
641 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
642 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
643 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
644 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
645 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
646 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
647 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
648 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
649 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
650 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
651 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
652 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
653 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
654 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
655 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
656 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
657 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
658 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
659 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
660 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
661 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
662 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
663 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
664 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
665 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
666 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
667 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
668 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
669 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
670 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
671 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
672 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
673 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
674 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
675 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
676 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
677 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
678 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
679 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
680 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
681 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
682 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
683 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
684#undef ARMV8_EC_CASE
685 default:
686 break;
687 }
688
689 return "<INVALID>";
690}
691
692
693/**
694 * Resolves a NEM page state from the given protection flags.
695 *
696 * @returns NEM page state.
697 * @param fPageProt The page protection flags.
698 */
699DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
700{
701 switch (fPageProt)
702 {
703 case NEM_PAGE_PROT_NONE:
704 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
705 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
706 return NEM_DARWIN_PAGE_STATE_RX;
707 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
708 return NEM_DARWIN_PAGE_STATE_RW;
709 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
710 return NEM_DARWIN_PAGE_STATE_RWX;
711 default:
712 break;
713 }
714
715 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
716 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
717}
718
719
720/**
721 * Unmaps the given guest physical address range (page aligned).
722 *
723 * @returns VBox status code.
724 * @param pVM The cross context VM structure.
725 * @param GCPhys The guest physical address to start unmapping at.
726 * @param cb The size of the range to unmap in bytes.
727 * @param pu2State Where to store the new state of the unmappd page, optional.
728 */
729DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
730{
731 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
732 {
733 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
734 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
735 return VINF_SUCCESS;
736 }
737
738 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
739 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
740 if (RT_LIKELY(hrc == HV_SUCCESS))
741 {
742 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
743 if (pu2State)
744 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
745 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
746 return VINF_SUCCESS;
747 }
748
749 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
750 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
751 GCPhys, hrc));
752 return VERR_NEM_IPE_6;
753}
754
755
756/**
757 * Maps a given guest physical address range backed by the given memory with the given
758 * protection flags.
759 *
760 * @returns VBox status code.
761 * @param pVM The cross context VM structure.
762 * @param GCPhys The guest physical address to start mapping.
763 * @param pvRam The R3 pointer of the memory to back the range with.
764 * @param cb The size of the range, page aligned.
765 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
766 * @param pu2State Where to store the state for the new page, optional.
767 */
768DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
769{
770 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
771
772 Assert(fPageProt != NEM_PAGE_PROT_NONE);
773 RT_NOREF(pVM);
774
775 hv_memory_flags_t fHvMemProt = 0;
776 if (fPageProt & NEM_PAGE_PROT_READ)
777 fHvMemProt |= HV_MEMORY_READ;
778 if (fPageProt & NEM_PAGE_PROT_WRITE)
779 fHvMemProt |= HV_MEMORY_WRITE;
780 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
781 fHvMemProt |= HV_MEMORY_EXEC;
782
783 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
784 if (hrc == HV_SUCCESS)
785 {
786 if (pu2State)
787 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
788 return VINF_SUCCESS;
789 }
790
791 return nemR3DarwinHvSts2Rc(hrc);
792}
793
794
795/**
796 * Changes the protection flags for the given guest physical address range.
797 *
798 * @returns VBox status code.
799 * @param GCPhys The guest physical address to start mapping.
800 * @param cb The size of the range, page aligned.
801 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
802 * @param pu2State Where to store the state for the new page, optional.
803 */
804DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
805{
806 hv_memory_flags_t fHvMemProt = 0;
807 if (fPageProt & NEM_PAGE_PROT_READ)
808 fHvMemProt |= HV_MEMORY_READ;
809 if (fPageProt & NEM_PAGE_PROT_WRITE)
810 fHvMemProt |= HV_MEMORY_WRITE;
811 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
812 fHvMemProt |= HV_MEMORY_EXEC;
813
814 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
815 if (hrc == HV_SUCCESS)
816 {
817 if (pu2State)
818 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
819 return VINF_SUCCESS;
820 }
821
822 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
823 GCPhys, cb, fPageProt, hrc));
824 return nemR3DarwinHvSts2Rc(hrc);
825}
826
827
828#ifdef LOG_ENABLED
829/**
830 * Logs the current CPU state.
831 */
832static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
833{
834 if (LogIs3Enabled())
835 {
836 char szRegs[4096];
837 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
838 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
839 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
840 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
841 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
842 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
843 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
844 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
845 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
846 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
847 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
848 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
849 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
850 "vbar_el1=%016VR{vbar_el1}\n"
851 );
852 if (pVM->nem.s.fEl2Enabled)
853 {
854 Log3(("%s\n", szRegs));
855 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
856 "sp_el2=%016VR{sp_el2} elr_el2=%016VR{elr_el2}\n"
857 "spsr_el2=%016VR{spsr_el2} tpidr_el2=%016VR{tpidr_el2}\n"
858 "sctlr_el2=%016VR{sctlr_el2} tcr_el2=%016VR{tcr_el2}\n"
859 "ttbr0_el2=%016VR{ttbr0_el2} ttbr1_el2=%016VR{ttbr1_el2}\n"
860 "esr_el2=%016VR{esr_el2} far_el2=%016VR{far_el2}\n"
861 "hcr_el2=%016VR{hcr_el2} tcr_el2=%016VR{tcr_el2}\n"
862 "vbar_el2=%016VR{vbar_el2} cptr_el2=%016VR{cptr_el2}\n"
863 );
864 }
865 char szInstr[256]; RT_ZERO(szInstr);
866 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
867 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
868 szInstr, sizeof(szInstr), NULL);
869 Log3(("%s%s\n", szRegs, szInstr));
870 }
871}
872#endif /* LOG_ENABLED */
873
874
875static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
876{
877 RT_NOREF(pVM);
878
879 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
880 if (hrc == HV_SUCCESS)
881 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
882
883 if ( hrc == HV_SUCCESS
884 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
885 {
886 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
887 {
888 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
889 {
890 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
891 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
892 }
893 }
894 }
895
896 if ( hrc == HV_SUCCESS
897 && (fWhat & CPUMCTX_EXTRN_V0_V31))
898 {
899 /* SIMD/FP registers. */
900 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
901 {
902 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
903 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
904 }
905 }
906
907 if ( hrc == HV_SUCCESS
908 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
909 {
910 /* Debug registers. */
911 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
912 {
913 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
914 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
915 }
916 }
917
918 if ( hrc == HV_SUCCESS
919 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
920 {
921 /* Debug registers. */
922 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
923 {
924 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
925 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
926 }
927 }
928
929 if ( hrc == HV_SUCCESS
930 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
931 {
932 /* System registers. */
933 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
934 {
935 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
936 {
937 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
938 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
939 }
940 }
941 }
942
943 if ( hrc == HV_SUCCESS
944 && (fWhat & CPUMCTX_EXTRN_SYSREG_EL2)
945 && pVM->nem.s.fEl2Enabled)
946 {
947 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
948 {
949 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
950 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, pu64);
951 }
952 }
953
954 if ( hrc == HV_SUCCESS
955 && (fWhat & CPUMCTX_EXTRN_PSTATE))
956 {
957 uint64_t u64Tmp;
958 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
959 if (hrc == HV_SUCCESS)
960 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
961 }
962
963 /* Almost done, just update extern flags. */
964 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
965 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
966 pVCpu->cpum.GstCtx.fExtrn = 0;
967
968 return nemR3DarwinHvSts2Rc(hrc);
969}
970
971
972/**
973 * Exports the guest state to HV for execution.
974 *
975 * @returns VBox status code.
976 * @param pVM The cross context VM structure.
977 * @param pVCpu The cross context virtual CPU structure of the
978 * calling EMT.
979 */
980static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
981{
982 RT_NOREF(pVM);
983 hv_return_t hrc = HV_SUCCESS;
984
985 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
986 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
987 {
988 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
989 {
990 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
991 {
992 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
993 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
994 }
995 }
996 }
997
998 if ( hrc == HV_SUCCESS
999 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
1000 {
1001 /* SIMD/FP registers. */
1002 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1003 {
1004 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1005 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
1006 }
1007 }
1008
1009 if ( hrc == HV_SUCCESS
1010 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
1011 {
1012 /* Debug registers. */
1013 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
1014 {
1015 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
1016 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
1017 }
1018 }
1019
1020 if ( hrc == HV_SUCCESS
1021 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1022 {
1023 /* Debug registers. */
1024 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1025 {
1026 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1027 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1028 }
1029 }
1030
1031 if ( hrc == HV_SUCCESS
1032 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1033 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1034 {
1035 /* System registers. */
1036 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1037 {
1038 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1039 {
1040 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1041 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
1042 }
1043 }
1044 }
1045
1046 if ( hrc == HV_SUCCESS
1047 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_EL2)
1048 && pVM->nem.s.fEl2Enabled)
1049 {
1050 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1051 {
1052 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1053 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, *pu64);
1054 Assert(hrc == HV_SUCCESS);
1055 }
1056 }
1057
1058 if ( hrc == HV_SUCCESS
1059 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1060 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
1061
1062 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1063 return nemR3DarwinHvSts2Rc(hrc);
1064}
1065
1066
1067/**
1068 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1069 *
1070 * @returns VBox status code.
1071 * @param pErrInfo Where to always return error info.
1072 */
1073static int nemR3DarwinLoadHv(PRTERRINFO pErrInfo)
1074{
1075 RTLDRMOD hMod = NIL_RTLDRMOD;
1076 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1077
1078 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1079 if (RT_SUCCESS(rc))
1080 {
1081 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1082 {
1083 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1084 if (RT_SUCCESS(rc2))
1085 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n", g_aImports[i].pszName));
1086 else
1087 {
1088 *g_aImports[i].ppfn = NULL;
1089 LogRel(("NEM: info: Optional import Hypervisor!%s not found: %Rrc\n", g_aImports[i].pszName, rc2));
1090 }
1091 }
1092 Assert(RT_SUCCESS(rc) && !RTErrInfoIsSet(pErrInfo));
1093 RTLdrClose(hMod);
1094 }
1095 else
1096 {
1097 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1098 rc = VERR_NEM_INIT_FAILED;
1099 }
1100
1101 return rc;
1102}
1103
1104
1105/**
1106 * Dumps some GIC information to the release log.
1107 */
1108static void nemR3DarwinDumpGicInfo(void)
1109{
1110 size_t val = 0;
1111 hv_return_t hrc = hv_gic_get_redistributor_size(&val);
1112 LogRel(("GICNem: hv_gic_get_redistributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1113 hrc = hv_gic_get_distributor_size(&val);
1114 LogRel(("GICNem: hv_gic_get_distributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1115 hrc = hv_gic_get_distributor_base_alignment(&val);
1116 LogRel(("GICNem: hv_gic_get_distributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1117 hrc = hv_gic_get_redistributor_base_alignment(&val);
1118 LogRel(("GICNem: hv_gic_get_redistributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1119 hrc = hv_gic_get_msi_region_base_alignment(&val);
1120 LogRel(("GICNem: hv_gic_get_msi_region_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1121 hrc = hv_gic_get_msi_region_size(&val);
1122 LogRel(("GICNem: hv_gic_get_msi_region_size() -> hrc=%#x / size=%zu\n", hrc, val));
1123 uint32_t u32SpiIntIdBase = 0;
1124 uint32_t cSpiIntIds = 0;
1125 hrc = hv_gic_get_spi_interrupt_range(&u32SpiIntIdBase, &cSpiIntIds);
1126 LogRel(("GICNem: hv_gic_get_spi_interrupt_range() -> hrc=%#x / SpiIntIdBase=%u, cSpiIntIds=%u\n", hrc, u32SpiIntIdBase, cSpiIntIds));
1127
1128 uint32_t u32IntId = 0;
1129 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER, &u32IntId);
1130 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1131 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER, &u32IntId);
1132 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1133 hrc = hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER, &u32IntId);
1134 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1135 hrc = hv_gic_get_intid(HV_GIC_INT_MAINTENANCE, &u32IntId);
1136 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_MAINTENANCE) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1137 hrc = hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR, &u32IntId);
1138 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1139}
1140
1141
1142/**
1143 * Sets the given SPI inside the in-kernel KVM GIC.
1144 *
1145 * @returns VBox status code.
1146 * @param pVM The VM instance.
1147 * @param uIntId The SPI ID to update.
1148 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1149 */
1150VMMR3_INT_DECL(int) GICR3NemSpiSet(PVMCC pVM, uint32_t uIntId, bool fAsserted)
1151{
1152 RT_NOREF(pVM);
1153 Assert(hv_gic_set_spi);
1154
1155 hv_return_t hrc = hv_gic_set_spi(uIntId + GIC_INTID_RANGE_SPI_START, fAsserted);
1156 return nemR3DarwinHvSts2Rc(hrc);
1157}
1158
1159
1160/**
1161 * Sets the given PPI inside the in-kernel KVM GIC.
1162 *
1163 * @returns VBox status code.
1164 * @param pVCpu The vCPU for whih the PPI state is updated.
1165 * @param uIntId The PPI ID to update.
1166 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1167 */
1168VMMR3_INT_DECL(int) GICR3NemPpiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1169{
1170 RT_NOREF(pVCpu, uIntId, fAsserted);
1171
1172 /* Should never be called as the PPIs are handled entirely in Hypervisor.framework/AppleHV. */
1173 AssertFailed();
1174 return VERR_NEM_IPE_9;
1175}
1176
1177
1178static int nemR3DarwinGicCreate(PVM pVM)
1179{
1180 nemR3DarwinDumpGicInfo();
1181
1182 //PCFGMNODE pGicDev = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic/0");
1183 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
1184 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
1185
1186 hv_gic_config_t hGicCfg = hv_gic_config_create();
1187
1188 /*
1189 * Query the MMIO ranges.
1190 */
1191 RTGCPHYS GCPhysMmioBaseDist = 0;
1192 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
1193 if (RT_FAILURE(rc))
1194 return VMSetError(pVM, rc, RT_SRC_POS,
1195 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
1196
1197 RTGCPHYS GCPhysMmioBaseReDist = 0;
1198 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
1199 if (RT_FAILURE(rc))
1200 return VMSetError(pVM, rc, RT_SRC_POS,
1201 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
1202
1203 hv_return_t hrc = hv_gic_config_set_distributor_base(hGicCfg, GCPhysMmioBaseDist);
1204 if (hrc != HV_SUCCESS)
1205 return nemR3DarwinHvSts2Rc(hrc);
1206
1207 hrc = hv_gic_config_set_redistributor_base(hGicCfg, GCPhysMmioBaseReDist);
1208 if (hrc != HV_SUCCESS)
1209 return nemR3DarwinHvSts2Rc(hrc);
1210
1211 hrc = hv_gic_create(hGicCfg);
1212 os_release(hGicCfg);
1213 if (hrc != HV_SUCCESS)
1214 return nemR3DarwinHvSts2Rc(hrc);
1215
1216 /* Make sure the device is not instantiated as Hypervisor.framework provides it. */
1217 //CFGMR3RemoveNode(pGicDev);
1218 return rc;
1219}
1220
1221
1222/**
1223 * Try initialize the native API.
1224 *
1225 * This may only do part of the job, more can be done in
1226 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1227 *
1228 * @returns VBox status code.
1229 * @param pVM The cross context VM structure.
1230 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1231 * the latter we'll fail if we cannot initialize.
1232 * @param fForced Whether the HMForced flag is set and we should
1233 * fail if we cannot initialize.
1234 */
1235int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1236{
1237 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1238
1239 /*
1240 * Some state init.
1241 */
1242 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
1243 RT_NOREF(pCfgNem);
1244
1245 /*
1246 * Error state.
1247 * The error message will be non-empty on failure and 'rc' will be set too.
1248 */
1249 RTERRINFOSTATIC ErrInfo;
1250 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1251
1252 /* Resolve optional imports */
1253 int rc = nemR3DarwinLoadHv(pErrInfo);
1254 if (RT_FAILURE(rc))
1255 {
1256 if ((fForced || !fFallback) && RTErrInfoIsSet(pErrInfo))
1257 return VMSetError(pVM, rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1258 return rc;
1259 }
1260
1261 /*
1262 * Need to enable nested virt here if supported and reset the CFGM value to false
1263 * if not supported. This ASSUMES that NEM is initialized before CPUM.
1264 */
1265 PCFGMNODE pCfgCpum = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/");
1266
1267 hv_vm_config_t hVmCfg = NULL;
1268 if ( hv_vm_config_create
1269 && hv_vm_config_get_el2_supported)
1270 {
1271 hVmCfg = hv_vm_config_create();
1272
1273 bool fHvEl2Supported = false;
1274 hv_return_t hrc = hv_vm_config_get_el2_supported(&fHvEl2Supported);
1275 if ( hrc == HV_SUCCESS
1276 && fHvEl2Supported)
1277 {
1278 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
1279 * Whether to expose the hardware virtualization (EL2/VHE) feature to the guest.
1280 * The default is false. Only supported on M3 and later and macOS 15.0+ (Sonoma).
1281 */
1282 bool fNestedHWVirt = false;
1283 rc = CFGMR3QueryBoolDef(pCfgCpum, "NestedHWVirt", &fNestedHWVirt, false);
1284 AssertLogRelRCReturn(rc, rc);
1285 if (fNestedHWVirt)
1286 {
1287 hrc = hv_vm_config_set_el2_enabled(hVmCfg, fNestedHWVirt);
1288 if (hrc != HV_SUCCESS)
1289 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
1290 "Cannot enable nested virtualization: hrc=%#x %s!\n", hrc, nemR3DarwinHvStatusName(hrc));
1291 pVM->nem.s.fEl2Enabled = true;
1292 LogRel(("NEM: Enabled nested virtualization (EL2) support\n"));
1293 }
1294 }
1295 else
1296 {
1297 /* Ensure nested virt is not set. */
1298 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1299
1300 LogRel(("NEM: The host doesn't supported nested virtualization! (hrc=%#x fHvEl2Supported=%RTbool)\n",
1301 hrc, fHvEl2Supported));
1302 }
1303 }
1304 else
1305 {
1306 /* Ensure nested virt is not set. */
1307 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1308 LogRel(("NEM: Hypervisor.framework doesn't supported nested virtualization!\n"));
1309 }
1310
1311 hv_return_t hrc = hv_vm_create(hVmCfg);
1312 os_release(hVmCfg);
1313 if (hrc == HV_SUCCESS)
1314 {
1315 pVM->nem.s.fCreatedVm = true;
1316 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
1317
1318 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
1319 pVM->nem.s.u64VTimerOff = 0;
1320
1321 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1322 Log(("NEM: Marked active!\n"));
1323 PGMR3EnableNemMode(pVM);
1324 return VINF_SUCCESS;
1325 }
1326
1327 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "hv_vm_create() failed: %#x %s", hrc, nemR3DarwinHvStatusName(hrc));
1328
1329 /*
1330 * We only fail if in forced mode, otherwise just log the complaint and return.
1331 */
1332 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1333 if ( (fForced || !fFallback)
1334 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1335 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1336
1337 if (RTErrInfoIsSet(pErrInfo))
1338 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1339 return VINF_SUCCESS;
1340}
1341
1342
1343/**
1344 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1345 *
1346 * @returns VBox status code
1347 * @param pVM The VM handle.
1348 * @param pVCpu The vCPU handle.
1349 * @param idCpu ID of the CPU to create.
1350 */
1351static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1352{
1353 if (idCpu == 0)
1354 {
1355 Assert(pVM->nem.s.hVCpuCfg == NULL);
1356
1357 /* Create a new vCPU config and query the ID registers. */
1358 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
1359 if (!pVM->nem.s.hVCpuCfg)
1360 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1361 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
1362
1363 /* Query ID registers and hand them to CPUM. */
1364 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
1365 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
1366 {
1367 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
1368 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
1369 if (hrc != HV_SUCCESS)
1370 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1371 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
1372 }
1373
1374 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1375 if (RT_FAILURE(rc))
1376 return rc;
1377 }
1378
1379 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
1380 if (hrc != HV_SUCCESS)
1381 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1382 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1383
1384 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
1385 if (hrc != HV_SUCCESS)
1386 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1387 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1388
1389 return VINF_SUCCESS;
1390}
1391
1392
1393/**
1394 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1395 *
1396 * @returns VBox status code.
1397 * @param pVM The VM handle.
1398 * @param pVCpu The vCPU handle.
1399 */
1400static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
1401{
1402 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1403 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1404
1405 if (pVCpu->idCpu == 0)
1406 {
1407 os_release(pVM->nem.s.hVCpuCfg);
1408 pVM->nem.s.hVCpuCfg = NULL;
1409 }
1410 return VINF_SUCCESS;
1411}
1412
1413
1414/**
1415 * This is called after CPUMR3Init is done.
1416 *
1417 * @returns VBox status code.
1418 * @param pVM The VM handle..
1419 */
1420int nemR3NativeInitAfterCPUM(PVM pVM)
1421{
1422 /*
1423 * Validate sanity.
1424 */
1425 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1426 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1427
1428 /*
1429 * Need to create the GIC here if the NEM variant is configured
1430 * before any vCPU is created according to the Apple docs.
1431 */
1432 if ( hv_gic_create
1433 && CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0"))
1434 {
1435 int rc = nemR3DarwinGicCreate(pVM);
1436 if (RT_FAILURE(rc))
1437 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Creating the GIC failed: %Rrc", rc);
1438 }
1439
1440 /*
1441 * Setup the EMTs.
1442 */
1443 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1444 {
1445 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1446
1447 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1448 if (RT_FAILURE(rc))
1449 {
1450 /* Rollback. */
1451 while (idCpu--)
1452 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
1453
1454 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1455 }
1456 }
1457
1458 pVM->nem.s.fCreatedEmts = true;
1459 return VINF_SUCCESS;
1460}
1461
1462
1463int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1464{
1465 RT_NOREF(pVM, enmWhat);
1466 return VINF_SUCCESS;
1467}
1468
1469
1470int nemR3NativeTerm(PVM pVM)
1471{
1472 /*
1473 * Delete the VM.
1474 */
1475
1476 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
1477 {
1478 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1479
1480 /*
1481 * Apple's documentation states that the vCPU should be destroyed
1482 * on the thread running the vCPU but as all the other EMTs are gone
1483 * at this point, destroying the VM would hang.
1484 *
1485 * We seem to be at luck here though as destroying apparently works
1486 * from EMT(0) as well.
1487 */
1488 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1489 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1490 }
1491
1492 pVM->nem.s.fCreatedEmts = false;
1493 if (pVM->nem.s.fCreatedVm)
1494 {
1495 hv_return_t hrc = hv_vm_destroy();
1496 if (hrc != HV_SUCCESS)
1497 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1498
1499 pVM->nem.s.fCreatedVm = false;
1500 }
1501 return VINF_SUCCESS;
1502}
1503
1504
1505/**
1506 * VM reset notification.
1507 *
1508 * @param pVM The cross context VM structure.
1509 */
1510void nemR3NativeReset(PVM pVM)
1511{
1512 RT_NOREF(pVM);
1513}
1514
1515
1516/**
1517 * Reset CPU due to INIT IPI or hot (un)plugging.
1518 *
1519 * @param pVCpu The cross context virtual CPU structure of the CPU being
1520 * reset.
1521 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1522 */
1523void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1524{
1525 RT_NOREF(pVCpu, fInitIpi);
1526}
1527
1528
1529/**
1530 * Returns the byte size from the given access SAS value.
1531 *
1532 * @returns Number of bytes to transfer.
1533 * @param uSas The SAS value to convert.
1534 */
1535DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
1536{
1537 switch (uSas)
1538 {
1539 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1540 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1541 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1542 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1543 default:
1544 AssertReleaseFailed();
1545 }
1546
1547 return 0;
1548}
1549
1550
1551/**
1552 * Sets the given general purpose register to the given value.
1553 *
1554 * @param pVCpu The cross context virtual CPU structure of the
1555 * calling EMT.
1556 * @param uReg The register index.
1557 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1558 * @param fSignExtend Flag whether to sign extend the value.
1559 * @param u64Val The value.
1560 */
1561DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1562{
1563 AssertReturnVoid(uReg < 31);
1564
1565 if (f64BitReg)
1566 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1567 else
1568 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
1569
1570 /* Mark the register as not extern anymore. */
1571 switch (uReg)
1572 {
1573 case 0:
1574 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1575 break;
1576 case 1:
1577 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1578 break;
1579 case 2:
1580 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1581 break;
1582 case 3:
1583 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1584 break;
1585 default:
1586 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1587 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1588 }
1589}
1590
1591
1592/**
1593 * Gets the given general purpose register and returns the value.
1594 *
1595 * @returns Value from the given register.
1596 * @param pVCpu The cross context virtual CPU structure of the
1597 * calling EMT.
1598 * @param uReg The register index.
1599 */
1600DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1601{
1602 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1603
1604 if (uReg == ARMV8_AARCH64_REG_ZR)
1605 return 0;
1606
1607 /** @todo Import the register if extern. */
1608 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1609
1610 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1611}
1612
1613
1614/**
1615 * Works on the data abort exception (which will be a MMIO access most of the time).
1616 *
1617 * @returns VBox strict status code.
1618 * @param pVM The cross context VM structure.
1619 * @param pVCpu The cross context virtual CPU structure of the
1620 * calling EMT.
1621 * @param uIss The instruction specific syndrome value.
1622 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1623 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1624 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1625 */
1626static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1627 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1628{
1629 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1630 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1631 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1632 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1633 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1634 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1635 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1636 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1637 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1638 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1639
1640 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1641
1642 if (fWrite)
1643 {
1644 /*
1645 * Check whether this is one of the dirty tracked regions, mark it as dirty
1646 * and enable write support for this region again.
1647 *
1648 * This is required for proper VRAM tracking or the display might not get updated
1649 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1650 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1651 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1652 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1653 * write access again (due to a missing interpreter right now).
1654 */
1655 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1656 {
1657 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1658
1659 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1660 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1661 {
1662 pMmio2Region->fDirty = true;
1663
1664 uint8_t u2State;
1665 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1666 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1667
1668 /* Restart the instruction if there is no instruction syndrome available. */
1669 if (RT_FAILURE(rc) || !fIsv)
1670 return rc;
1671 }
1672 }
1673 }
1674
1675 VBOXSTRICTRC rcStrict;
1676 if (fIsv)
1677 {
1678 EMHistoryAddExit(pVCpu,
1679 fWrite
1680 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1681 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1682 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1683
1684 uint64_t u64Val = 0;
1685 if (fWrite)
1686 {
1687 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1688 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1689 Log4(("MmioExit/%u: %08RX64: WRITE %#RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1690 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1691 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1692 }
1693 else
1694 {
1695 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1696 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1697 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1698 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1699 if (rcStrict == VINF_SUCCESS)
1700 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1701 }
1702 }
1703 else
1704 {
1705 /** @todo Our UEFI firmware accesses the flash region with the following instruction
1706 * when the NVRAM actually contains data:
1707 * ldrb w9, [x6, #-0x0001]!
1708 * This is too complicated for the hardware so the ISV bit is not set. Until there
1709 * is a proper IEM implementation we just handle this here for now to avoid annoying
1710 * users too much.
1711 */
1712 /* The following ASSUMES that the vCPU state is completely synced. */
1713
1714 /* Read instruction. */
1715 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
1716 const void *pvPageR3 = NULL;
1717 PGMPAGEMAPLOCK PageMapLock;
1718
1719 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
1720 if (rcStrict == VINF_SUCCESS)
1721 {
1722 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
1723 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
1724
1725 DISSTATE Dis;
1726 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
1727 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
1728 if (rcStrict == VINF_SUCCESS)
1729 {
1730 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
1731 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1732 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1733 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
1734 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
1735 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
1736 {
1737 /* The fault address is already the final address. */
1738 uint8_t bVal = 0;
1739 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &bVal, 1, PGMACCESSORIGIN_HM);
1740 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1741 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, sizeof(bVal), sizeof(bVal),
1742 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
1743 if (rcStrict == VINF_SUCCESS)
1744 {
1745 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
1746 /* Update the indexed register. */
1747 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
1748 }
1749 }
1750 /*
1751 * Seeing the following with the Windows 11/ARM TPM driver:
1752 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
1753 */
1754 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
1755 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1756 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1757 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
1758 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1759 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
1760 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
1761 {
1762 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
1763 /* The fault address is already the final address. */
1764 uint32_t u32Val1 = 0;
1765 uint32_t u32Val2 = 0;
1766 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
1767 if (rcStrict == VINF_SUCCESS)
1768 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
1769 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
1770 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, 2 * sizeof(uint32_t), sizeof(u32Val1),
1771 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
1772 if (rcStrict == VINF_SUCCESS)
1773 {
1774 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
1775 nemR3DarwinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
1776 }
1777 }
1778 else
1779 AssertFailedReturn(VERR_NOT_SUPPORTED);
1780 }
1781 }
1782 }
1783
1784 if (rcStrict == VINF_SUCCESS)
1785 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1786
1787 return rcStrict;
1788}
1789
1790
1791/**
1792 * Works on the trapped MRS, MSR and system instruction exception.
1793 *
1794 * @returns VBox strict status code.
1795 * @param pVM The cross context VM structure.
1796 * @param pVCpu The cross context virtual CPU structure of the
1797 * calling EMT.
1798 * @param uIss The instruction specific syndrome value.
1799 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1800 */
1801static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1802{
1803 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1804 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1805 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1806 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1807 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1808 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1809 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1810 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1811 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1812 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1813
1814 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1815 EMHistoryAddExit(pVCpu,
1816 fRead
1817 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1818 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1819 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1820
1821 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1822 uint64_t u64Val = 0;
1823 if (fRead)
1824 {
1825 RT_NOREF(pVM);
1826 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1827 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1828 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1829 VBOXSTRICTRC_VAL(rcStrict) ));
1830 if (rcStrict == VINF_SUCCESS)
1831 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1832 }
1833 else
1834 {
1835 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1836 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1837 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1838 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1839 VBOXSTRICTRC_VAL(rcStrict) ));
1840 }
1841
1842 if (rcStrict == VINF_SUCCESS)
1843 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1844
1845 return rcStrict;
1846}
1847
1848
1849/**
1850 * Works on the trapped HVC instruction exception.
1851 *
1852 * @returns VBox strict status code.
1853 * @param pVM The cross context VM structure.
1854 * @param pVCpu The cross context virtual CPU structure of the
1855 * calling EMT.
1856 * @param uIss The instruction specific syndrome value.
1857 * @param fAdvancePc Flag whether to advance the guest program counter.
1858 */
1859static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fAdvancePc = false)
1860{
1861 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1862 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1863
1864#if 0 /** @todo For later */
1865 EMHistoryAddExit(pVCpu,
1866 fRead
1867 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1868 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1869 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1870#endif
1871
1872 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1873 if (u16Imm == 0)
1874 {
1875 /** @todo Raise exception to EL1 if PSCI not configured. */
1876 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1877 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1878 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1879 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1880 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1881 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1882 {
1883 switch (uFunNum)
1884 {
1885 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1886 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1887 break;
1888 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1889 rcStrict = VMR3PowerOff(pVM->pUVM);
1890 break;
1891 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1892 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1893 {
1894 bool fHaltOnReset;
1895 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1896 if (RT_SUCCESS(rc) && fHaltOnReset)
1897 {
1898 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1899 rc = VINF_EM_HALT;
1900 }
1901 else
1902 {
1903 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1904 VM_FF_SET(pVM, VM_FF_RESET);
1905 rc = VINF_EM_RESET;
1906 }
1907 break;
1908 }
1909 case ARM_PSCI_FUNC_ID_CPU_ON:
1910 {
1911 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1912 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1913 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1914 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1915 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1916 break;
1917 }
1918 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1919 {
1920 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1921 switch (u32FunNum)
1922 {
1923 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1924 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1925 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1926 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1927 case ARM_PSCI_FUNC_ID_CPU_ON:
1928 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1929 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1930 false /*f64BitReg*/, false /*fSignExtend*/,
1931 (uint64_t)ARM_PSCI_STS_SUCCESS);
1932 break;
1933 default:
1934 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1935 false /*f64BitReg*/, false /*fSignExtend*/,
1936 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1937 }
1938 break;
1939 }
1940 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1941 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_MIGRATE_INFO_TYPE_TOS_NOT_PRESENT);
1942 break;
1943 default:
1944 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1945 }
1946 }
1947 else
1948 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1949 }
1950
1951 /** @todo What to do if immediate is != 0? */
1952
1953 if ( rcStrict == VINF_SUCCESS
1954 && fAdvancePc)
1955 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
1956
1957 return rcStrict;
1958}
1959
1960
1961/**
1962 * Handles an exception VM exit.
1963 *
1964 * @returns VBox strict status code.
1965 * @param pVM The cross context VM structure.
1966 * @param pVCpu The cross context virtual CPU structure of the
1967 * calling EMT.
1968 * @param pExit Pointer to the exit information.
1969 */
1970static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1971{
1972 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1973 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1974 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1975
1976 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1977 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1978
1979 switch (uEc)
1980 {
1981 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1982 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1983 pExit->exception.physical_address);
1984 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1985 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1986 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1987 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1988 case ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN:
1989 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss, true);
1990 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1991 {
1992 /* No need to halt if there is an interrupt pending already. */
1993 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1994 {
1995 LogFlowFunc(("IRQ | FIQ set => VINF_SUCCESS\n"));
1996 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1997 return VINF_SUCCESS;
1998 }
1999
2000 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
2001 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
2002 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
2003 {
2004 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
2005
2006 /* Check whether it expired and start executing guest code. */
2007 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
2008 {
2009 LogFlowFunc(("Guest timer expired (cTicksVTimer=%RU64 CntvCValEl0=%RU64) => VINF_SUCCESS\n",
2010 cTicksVTimer, pVCpu->cpum.GstCtx.CntvCValEl0));
2011 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2012 return VINF_SUCCESS;
2013 }
2014
2015 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
2016 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
2017
2018 /*
2019 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
2020 * + scheduling overhead which would increase the wakeup latency.
2021 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
2022 * between CPU load when the guest is idle and performance).
2023 */
2024 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
2025 {
2026 LogFlowFunc(("Guest timer expiration < 2ms (cNanoSecsVTimerToExpire=%RU64) => VINF_SUCCESS\n",
2027 cNanoSecsVTimerToExpire));
2028 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2029 return VINF_SUCCESS;
2030 }
2031
2032 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
2033 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
2034 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
2035 }
2036 else
2037 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2038
2039 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2040 return VINF_EM_HALT;
2041 }
2042 case ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN:
2043 {
2044 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
2045 /** @todo Forward genuine guest traps to the guest by either single stepping instruction with debug exception trapping turned off
2046 * or create instruction interpreter and inject exception ourselves. */
2047 Assert(rcStrict == VINF_EM_DBG_BREAKPOINT);
2048 return rcStrict;
2049 }
2050 case ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL:
2051 return VINF_EM_DBG_STEPPED;
2052 case ARMV8_ESR_EL2_EC_UNKNOWN:
2053 default:
2054 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
2055 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
2056 AssertReleaseFailed();
2057 return VERR_NOT_IMPLEMENTED;
2058 }
2059
2060 return VINF_SUCCESS;
2061}
2062
2063
2064/**
2065 * Handles an exit from hv_vcpu_run().
2066 *
2067 * @returns VBox strict status code.
2068 * @param pVM The cross context VM structure.
2069 * @param pVCpu The cross context virtual CPU structure of the
2070 * calling EMT.
2071 */
2072static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
2073{
2074 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2075 if (RT_FAILURE(rc))
2076 return rc;
2077
2078#ifdef LOG_ENABLED
2079 if (LogIs3Enabled())
2080 nemR3DarwinLogState(pVM, pVCpu);
2081#endif
2082
2083 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
2084 switch (pExit->reason)
2085 {
2086 case HV_EXIT_REASON_CANCELED:
2087 return VINF_EM_RAW_INTERRUPT;
2088 case HV_EXIT_REASON_EXCEPTION:
2089 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
2090 case HV_EXIT_REASON_VTIMER_ACTIVATED:
2091 {
2092 LogFlowFunc(("vTimer got activated\n"));
2093 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2094 pVCpu->nem.s.fVTimerActivated = true;
2095 return GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
2096 }
2097 default:
2098 AssertReleaseFailed();
2099 break;
2100 }
2101
2102 return VERR_INVALID_STATE;
2103}
2104
2105
2106/**
2107 * Runs the guest once until an exit occurs.
2108 *
2109 * @returns HV status code.
2110 * @param pVM The cross context VM structure.
2111 * @param pVCpu The cross context virtual CPU structure.
2112 */
2113static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
2114{
2115 TMNotifyStartOfExecution(pVM, pVCpu);
2116
2117 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
2118
2119 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2120
2121 return hrc;
2122}
2123
2124
2125/**
2126 * Prepares the VM to run the guest.
2127 *
2128 * @returns Strict VBox status code.
2129 * @param pVM The cross context VM structure.
2130 * @param pVCpu The cross context virtual CPU structure.
2131 * @param fSingleStepping Flag whether we run in single stepping mode.
2132 */
2133static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
2134{
2135#ifdef LOG_ENABLED
2136 bool fIrq = false;
2137 bool fFiq = false;
2138
2139 if (LogIs3Enabled())
2140 nemR3DarwinLogState(pVM, pVCpu);
2141#endif
2142
2143 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
2144 AssertRCReturn(rc, rc);
2145
2146 /* In single stepping mode we will re-read SPSR and MDSCR and enable the software step bits. */
2147 if (fSingleStepping)
2148 {
2149 uint64_t u64Tmp;
2150 hv_return_t hrc = hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
2151 if (hrc == HV_SUCCESS)
2152 {
2153 u64Tmp |= ARMV8_SPSR_EL2_AARCH64_SS;
2154 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, u64Tmp);
2155 }
2156
2157 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, &u64Tmp);
2158 if (hrc == HV_SUCCESS)
2159 {
2160 u64Tmp |= ARMV8_MDSCR_EL1_AARCH64_SS;
2161 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, u64Tmp);
2162 }
2163
2164 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2165 }
2166
2167 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
2168 if (pVCpu->nem.s.fVTimerActivated)
2169 {
2170 /* Read the CNTV_CTL_EL0 register. */
2171 uint64_t u64CntvCtl = 0;
2172
2173 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
2174 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2175
2176 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2177 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2178 {
2179 /* Clear the interrupt. */
2180 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
2181
2182 pVCpu->nem.s.fVTimerActivated = false;
2183 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
2184 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2185 }
2186 }
2187
2188 /* Set the pending interrupt state. */
2189 hv_return_t hrc = HV_SUCCESS;
2190 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
2191 {
2192 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
2193 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2194#ifdef LOG_ENABLED
2195 fIrq = true;
2196#endif
2197 }
2198 else
2199 {
2200 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
2201 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2202 }
2203
2204 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
2205 {
2206 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
2207 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2208#ifdef LOG_ENABLED
2209 fFiq = true;
2210#endif
2211 }
2212 else
2213 {
2214 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
2215 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2216 }
2217
2218 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
2219 pVCpu->nem.s.fEventPending = false;
2220 return VINF_SUCCESS;
2221}
2222
2223
2224/**
2225 * The normal runloop (no debugging features enabled).
2226 *
2227 * @returns Strict VBox status code.
2228 * @param pVM The cross context VM structure.
2229 * @param pVCpu The cross context virtual CPU structure.
2230 */
2231static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
2232{
2233 /*
2234 * The run loop.
2235 *
2236 * Current approach to state updating to use the sledgehammer and sync
2237 * everything every time. This will be optimized later.
2238 */
2239
2240 /* Update the vTimer offset after resuming if instructed. */
2241 if (pVCpu->nem.s.fVTimerOffUpdate)
2242 {
2243 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2244 if (hrc != HV_SUCCESS)
2245 return nemR3DarwinHvSts2Rc(hrc);
2246
2247 pVCpu->nem.s.fVTimerOffUpdate = false;
2248
2249 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2250 if (hrc == HV_SUCCESS)
2251 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2252 if (hrc != HV_SUCCESS)
2253 return nemR3DarwinHvSts2Rc(hrc);
2254 }
2255
2256 /*
2257 * Poll timers and run for a bit.
2258 */
2259 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2260 * the whole polling job when timers have changed... */
2261 uint64_t offDeltaIgnored;
2262 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2263 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2264 for (unsigned iLoop = 0;; iLoop++)
2265 {
2266 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
2267 if (rcStrict != VINF_SUCCESS)
2268 break;
2269
2270 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2271 if (hrc == HV_SUCCESS)
2272 {
2273 /*
2274 * Deal with the message.
2275 */
2276 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2277 if (rcStrict == VINF_SUCCESS)
2278 { /* hopefully likely */ }
2279 else
2280 {
2281 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2282 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2283 break;
2284 }
2285 }
2286 else
2287 {
2288 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2289 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2290 }
2291 } /* the run loop */
2292
2293 return rcStrict;
2294}
2295
2296
2297/**
2298 * The debug runloop.
2299 *
2300 * @returns Strict VBox status code.
2301 * @param pVM The cross context VM structure.
2302 * @param pVCpu The cross context virtual CPU structure.
2303 */
2304static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
2305{
2306 /*
2307 * The run loop.
2308 *
2309 * Current approach to state updating to use the sledgehammer and sync
2310 * everything every time. This will be optimized later.
2311 */
2312
2313 bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
2314 pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
2315 pVCpu->nem.s.fUsingDebugLoop = true;
2316
2317 /* Trap any debug exceptions. */
2318 hv_return_t hrc = hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, true);
2319 if (hrc != HV_SUCCESS)
2320 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2321 "Trapping debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2322
2323 /* Update the vTimer offset after resuming if instructed. */
2324 if (pVCpu->nem.s.fVTimerOffUpdate)
2325 {
2326 hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2327 if (hrc != HV_SUCCESS)
2328 return nemR3DarwinHvSts2Rc(hrc);
2329
2330 pVCpu->nem.s.fVTimerOffUpdate = false;
2331
2332 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2333 if (hrc == HV_SUCCESS)
2334 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2335 if (hrc != HV_SUCCESS)
2336 return nemR3DarwinHvSts2Rc(hrc);
2337 }
2338
2339 /* Save the guest MDSCR_EL1 */
2340 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2341 uint64_t u64RegMdscrEl1 = pVCpu->cpum.GstCtx.Mdscr.u64;
2342
2343 /*
2344 * Poll timers and run for a bit.
2345 */
2346 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2347 * the whole polling job when timers have changed... */
2348 uint64_t offDeltaIgnored;
2349 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2350 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2351 for (unsigned iLoop = 0;; iLoop++)
2352 {
2353 bool const fStepping = pVCpu->nem.s.fSingleInstruction;
2354
2355 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, fStepping);
2356 if (rcStrict != VINF_SUCCESS)
2357 break;
2358
2359 hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2360 if (hrc == HV_SUCCESS)
2361 {
2362 /*
2363 * Deal with the message.
2364 */
2365 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2366 if (rcStrict == VINF_SUCCESS)
2367 { /* hopefully likely */ }
2368 else
2369 {
2370 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2371 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2372 break;
2373 }
2374 }
2375 else
2376 {
2377 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2378 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2379 }
2380 } /* the run loop */
2381
2382 /* Restore single stepping state. */
2383 if (pVCpu->nem.s.fSingleInstruction)
2384 {
2385 /** @todo This ASSUMES that guest code being single stepped is not modifying the MDSCR_EL1 register. */
2386 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2387 Assert(pVCpu->cpum.GstCtx.Mdscr.u64 & ARMV8_MDSCR_EL1_AARCH64_SS);
2388
2389 pVCpu->cpum.GstCtx.Mdscr.u64 = u64RegMdscrEl1;
2390 }
2391
2392 /* Restore debug exceptions trapping. */
2393 hrc != hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, false);
2394 if (hrc != HV_SUCCESS)
2395 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2396 "Clearing trapping of debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2397
2398 pVCpu->nem.s.fUsingDebugLoop = false;
2399 pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
2400
2401 return rcStrict;
2402
2403}
2404
2405
2406VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2407{
2408#ifdef LOG_ENABLED
2409 if (LogIs3Enabled())
2410 nemR3DarwinLogState(pVM, pVCpu);
2411#endif
2412
2413 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
2414
2415 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2416 {
2417 /*
2418 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2419 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2420 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2421 */
2422 static const struct
2423 {
2424 const char *pszIdReg;
2425 hv_sys_reg_t enmHvReg;
2426 uint32_t offIdStruct;
2427 } s_aSysIdRegs[] =
2428 {
2429#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
2430 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
2431 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
2432 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
2433 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
2434 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
2435 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
2436 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
2437 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
2438 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
2439#undef ID_SYS_REG_CREATE
2440 };
2441
2442 PCCPUMIDREGS pIdRegsGst = NULL;
2443 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2444 AssertRCReturn(rc, rc);
2445
2446 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
2447 {
2448 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
2449 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
2450 if (hrc != HV_SUCCESS)
2451 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2452 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2453 }
2454
2455 pVCpu->nem.s.fIdRegsSynced = true;
2456 }
2457
2458 /*
2459 * Try switch to NEM runloop state.
2460 */
2461 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2462 { /* likely */ }
2463 else
2464 {
2465 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2466 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2467 return VINF_SUCCESS;
2468 }
2469
2470 VBOXSTRICTRC rcStrict;
2471 if ( !pVCpu->nem.s.fUseDebugLoop
2472 /*&& !nemR3DarwinAnyExpensiveProbesEnabled()*/
2473 && !DBGFIsStepping(pVCpu)
2474 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledSwBreakpoints)
2475 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
2476 else
2477 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
2478
2479 if (rcStrict == VINF_EM_RAW_TO_R3)
2480 rcStrict = VINF_SUCCESS;
2481
2482 /*
2483 * Convert any pending HM events back to TRPM due to premature exits.
2484 *
2485 * This is because execution may continue from IEM and we would need to inject
2486 * the event from there (hence place it back in TRPM).
2487 */
2488 if (pVCpu->nem.s.fEventPending)
2489 {
2490 /** @todo */
2491 }
2492
2493
2494 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2495 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2496
2497 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2498 {
2499 /* Try anticipate what we might need. */
2500 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2501 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2502 || RT_FAILURE(rcStrict))
2503 fImport = CPUMCTX_EXTRN_ALL;
2504 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2505 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2506 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2507
2508 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2509 {
2510 /* Only import what is external currently. */
2511 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2512 if (RT_SUCCESS(rc2))
2513 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2514 else if (RT_SUCCESS(rcStrict))
2515 rcStrict = rc2;
2516 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2517 pVCpu->cpum.GstCtx.fExtrn = 0;
2518 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2519 }
2520 else
2521 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2522 }
2523 else
2524 {
2525 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2526 pVCpu->cpum.GstCtx.fExtrn = 0;
2527 }
2528
2529 return rcStrict;
2530}
2531
2532
2533VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2534{
2535 RT_NOREF(pVM, pVCpu);
2536 return true; /** @todo Are there any cases where we have to emulate? */
2537}
2538
2539
2540bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2541{
2542 VMCPU_ASSERT_EMT(pVCpu);
2543 bool fOld = pVCpu->nem.s.fSingleInstruction;
2544 pVCpu->nem.s.fSingleInstruction = fEnable;
2545 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
2546 return fOld;
2547}
2548
2549
2550void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2551{
2552 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2553
2554 RT_NOREF(pVM, fFlags);
2555
2556 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
2557 if (hrc != HV_SUCCESS)
2558 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
2559}
2560
2561
2562DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2563{
2564 RT_NOREF(pVM, fUseDebugLoop);
2565 //AssertReleaseFailed();
2566 return false;
2567}
2568
2569
2570DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2571{
2572 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2573 return fUseDebugLoop;
2574}
2575
2576
2577VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2578 uint8_t *pu2State, uint32_t *puNemRange)
2579{
2580 RT_NOREF(pVM, puNemRange);
2581
2582 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2583#if defined(VBOX_WITH_PGM_NEM_MODE)
2584 if (pvR3)
2585 {
2586 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2587 if (RT_FAILURE(rc))
2588 {
2589 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2590 return VERR_NEM_MAP_PAGES_FAILED;
2591 }
2592 }
2593 return VINF_SUCCESS;
2594#else
2595 RT_NOREF(pVM, GCPhys, cb, pvR3);
2596 return VERR_NEM_MAP_PAGES_FAILED;
2597#endif
2598}
2599
2600
2601VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2602{
2603 RT_NOREF(pVM);
2604 return true;
2605}
2606
2607
2608VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2609 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2610{
2611 RT_NOREF(pvRam);
2612
2613 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2614 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2615
2616#if defined(VBOX_WITH_PGM_NEM_MODE)
2617 /*
2618 * Unmap the RAM we're replacing.
2619 */
2620 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2621 {
2622 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2623 if (RT_SUCCESS(rc))
2624 { /* likely */ }
2625 else if (pvMmio2)
2626 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2627 GCPhys, cb, fFlags, rc));
2628 else
2629 {
2630 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2631 GCPhys, cb, fFlags, rc));
2632 return VERR_NEM_UNMAP_PAGES_FAILED;
2633 }
2634 }
2635
2636 /*
2637 * Map MMIO2 if any.
2638 */
2639 if (pvMmio2)
2640 {
2641 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2642
2643 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
2644 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
2645 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2646 {
2647 /* Find a slot for dirty tracking. */
2648 PNEMHVMMIO2REGION pMmio2Region = NULL;
2649 uint32_t idSlot;
2650 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
2651 {
2652 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
2653 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
2654 {
2655 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
2656 break;
2657 }
2658 }
2659
2660 if (!pMmio2Region)
2661 {
2662 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
2663 return VERR_NEM_MAP_PAGES_FAILED;
2664 }
2665
2666 pMmio2Region->GCPhysStart = GCPhys;
2667 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
2668 pMmio2Region->fDirty = false;
2669 *puNemRange = idSlot;
2670 }
2671 else
2672 fProt |= NEM_PAGE_PROT_WRITE;
2673
2674 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
2675 if (RT_FAILURE(rc))
2676 {
2677 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2678 GCPhys, cb, fFlags, pvMmio2, rc));
2679 return VERR_NEM_MAP_PAGES_FAILED;
2680 }
2681 }
2682 else
2683 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2684
2685#else
2686 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2687 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2688#endif
2689 return VINF_SUCCESS;
2690}
2691
2692
2693VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2694 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2695{
2696 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2697 return VINF_SUCCESS;
2698}
2699
2700
2701VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2702 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2703{
2704 RT_NOREF(pVM, puNemRange);
2705
2706 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2707 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2708
2709 int rc = VINF_SUCCESS;
2710#if defined(VBOX_WITH_PGM_NEM_MODE)
2711 /*
2712 * Unmap the MMIO2 pages.
2713 */
2714 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2715 * we may have more stuff to unmap even in case of pure MMIO... */
2716 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2717 {
2718 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2719 if (RT_FAILURE(rc))
2720 {
2721 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2722 GCPhys, cb, fFlags, rc));
2723 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2724 }
2725
2726 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2727 {
2728 /* Reset tracking structure. */
2729 uint32_t idSlot = *puNemRange;
2730 *puNemRange = UINT32_MAX;
2731
2732 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2733 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
2734 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
2735 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
2736 }
2737 }
2738
2739 /* Ensure the page is masked as unmapped if relevant. */
2740 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
2741
2742 /*
2743 * Restore the RAM we replaced.
2744 */
2745 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2746 {
2747 AssertPtr(pvRam);
2748 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2749 if (RT_SUCCESS(rc))
2750 { /* likely */ }
2751 else
2752 {
2753 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2754 rc = VERR_NEM_MAP_PAGES_FAILED;
2755 }
2756 }
2757
2758 RT_NOREF(pvMmio2);
2759#else
2760 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2761 if (pu2State)
2762 *pu2State = UINT8_MAX;
2763 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2764#endif
2765 return rc;
2766}
2767
2768
2769VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2770 void *pvBitmap, size_t cbBitmap)
2771{
2772 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
2773 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2774
2775 /* Keep it simple for now and mark everything as dirty if it is. */
2776 int rc = VINF_SUCCESS;
2777 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
2778 {
2779 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
2780
2781 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
2782 /* Restore as RX only. */
2783 uint8_t u2State;
2784 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
2785 }
2786 else
2787 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
2788
2789 return rc;
2790}
2791
2792
2793VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2794 uint8_t *pu2State, uint32_t *puNemRange)
2795{
2796 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2797
2798 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2799 *pu2State = UINT8_MAX;
2800 *puNemRange = 0;
2801 return VINF_SUCCESS;
2802}
2803
2804
2805VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2806 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2807{
2808 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2809 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2810 *pu2State = UINT8_MAX;
2811
2812#if defined(VBOX_WITH_PGM_NEM_MODE)
2813 /*
2814 * (Re-)map readonly.
2815 */
2816 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2817
2818 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2819 AssertRC(rc);
2820
2821 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2822 if (RT_FAILURE(rc))
2823 {
2824 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2825 GCPhys, cb, pvPages, fFlags, rc));
2826 return VERR_NEM_MAP_PAGES_FAILED;
2827 }
2828 RT_NOREF(fFlags, puNemRange);
2829 return VINF_SUCCESS;
2830#else
2831 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2832 return VERR_NEM_MAP_PAGES_FAILED;
2833#endif
2834}
2835
2836
2837VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2838 RTR3PTR pvMemR3, uint8_t *pu2State)
2839{
2840 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2841 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2842
2843 *pu2State = UINT8_MAX;
2844#if defined(VBOX_WITH_PGM_NEM_MODE)
2845 if (pvMemR3)
2846 {
2847 /* Unregister what was there before. */
2848 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2849 AssertRC(rc);
2850
2851 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2852 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2853 pvMemR3, GCPhys, cb, rc));
2854 }
2855 RT_NOREF(enmKind);
2856#else
2857 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2858 AssertFailed();
2859#endif
2860}
2861
2862
2863VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2864{
2865 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2866 RT_NOREF(pVCpu, fEnabled);
2867}
2868
2869
2870void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2871{
2872 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2873 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2874}
2875
2876
2877void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2878 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2879{
2880 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2881 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2882 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2883}
2884
2885
2886int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2887 PGMPAGETYPE enmType, uint8_t *pu2State)
2888{
2889 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2890 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2891 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2892
2893 AssertFailed();
2894 return VINF_SUCCESS;
2895}
2896
2897
2898VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2899 PGMPAGETYPE enmType, uint8_t *pu2State)
2900{
2901 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2902 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2903 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2904}
2905
2906
2907VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2908 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2909{
2910 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2911 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2912 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2913
2914 AssertFailed();
2915}
2916
2917
2918/**
2919 * Interface for importing state on demand (used by IEM).
2920 *
2921 * @returns VBox status code.
2922 * @param pVCpu The cross context CPU structure.
2923 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2924 */
2925VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2926{
2927 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
2928 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2929
2930 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2931}
2932
2933
2934/**
2935 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2936 *
2937 * @returns VBox status code.
2938 * @param pVCpu The cross context CPU structure.
2939 * @param pcTicks Where to return the CPU tick count.
2940 * @param puAux Where to return the TSC_AUX register value.
2941 */
2942VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2943{
2944 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2945 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2946
2947 if (puAux)
2948 *puAux = 0;
2949 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
2950 return VINF_SUCCESS;
2951}
2952
2953
2954/**
2955 * Resumes CPU clock (TSC) on all virtual CPUs.
2956 *
2957 * This is called by TM when the VM is started, restored, resumed or similar.
2958 *
2959 * @returns VBox status code.
2960 * @param pVM The cross context VM structure.
2961 * @param pVCpu The cross context CPU structure of the calling EMT.
2962 * @param uPausedTscValue The TSC value at the time of pausing.
2963 */
2964VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2965{
2966 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
2967 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2968 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2969
2970 /*
2971 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
2972 * the new offset to let the guest not notice the pause.
2973 */
2974 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
2975 Assert(u64TscNew >= uPausedTscValue);
2976 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
2977 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
2978 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
2979
2980 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
2981
2982 /*
2983 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
2984 * (needs to be done on the actual EMT).
2985 */
2986 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2987 {
2988 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
2989 pVCpuDst->nem.s.fVTimerOffUpdate = true;
2990 }
2991
2992 return VINF_SUCCESS;
2993}
2994
2995
2996/**
2997 * Returns features supported by the NEM backend.
2998 *
2999 * @returns Flags of features supported by the native NEM backend.
3000 * @param pVM The cross context VM structure.
3001 */
3002VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3003{
3004 RT_NOREF(pVM);
3005 /*
3006 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
3007 * and unrestricted guest execution support so we can safely return these flags here always.
3008 */
3009 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
3010}
3011
3012
3013/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
3014 *
3015 * @todo Add notes as the implementation progresses...
3016 */
3017
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette