VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 102869

Last change on this file since 102869 was 101496, checked in by vboxsync, 14 months ago

VMM,Main: Don't hardcode the vTimer interrupt number in the NEM backend but let the configuration constructor decide the value so it matches the FDT, bugref:10390 bugref:10528

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 84.2 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 101496 2023-10-18 11:27:55Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/gic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/dbgftrace.h>
44#include <VBox/vmm/gcm.h>
45#include "NEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/vmm/vmm.h>
48#include "dtrace/VBoxVMM.h"
49
50#include <iprt/armv8.h>
51#include <iprt/asm.h>
52#include <iprt/asm-arm.h>
53#include <iprt/asm-math.h>
54#include <iprt/ldr.h>
55#include <iprt/mem.h>
56#include <iprt/path.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59#include <iprt/utf16.h>
60
61#include <iprt/formats/arm-psci.h>
62
63#include <mach/mach_time.h>
64#include <mach/kern_return.h>
65
66#include <Hypervisor/Hypervisor.h>
67
68
69/*********************************************************************************************************************************
70* Defined Constants And Macros *
71*********************************************************************************************************************************/
72
73
74/*********************************************************************************************************************************
75* Structures and Typedefs *
76*********************************************************************************************************************************/
77
78
79/*********************************************************************************************************************************
80* Global Variables *
81*********************************************************************************************************************************/
82/** The general registers. */
83static const struct
84{
85 hv_reg_t enmHvReg;
86 uint32_t fCpumExtrn;
87 uint32_t offCpumCtx;
88} s_aCpumRegs[] =
89{
90#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
91#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
92 CPUM_GREG_EMIT_X0_X3(0),
93 CPUM_GREG_EMIT_X0_X3(1),
94 CPUM_GREG_EMIT_X0_X3(2),
95 CPUM_GREG_EMIT_X0_X3(3),
96 CPUM_GREG_EMIT_X4_X28(4),
97 CPUM_GREG_EMIT_X4_X28(5),
98 CPUM_GREG_EMIT_X4_X28(6),
99 CPUM_GREG_EMIT_X4_X28(7),
100 CPUM_GREG_EMIT_X4_X28(8),
101 CPUM_GREG_EMIT_X4_X28(9),
102 CPUM_GREG_EMIT_X4_X28(10),
103 CPUM_GREG_EMIT_X4_X28(11),
104 CPUM_GREG_EMIT_X4_X28(12),
105 CPUM_GREG_EMIT_X4_X28(13),
106 CPUM_GREG_EMIT_X4_X28(14),
107 CPUM_GREG_EMIT_X4_X28(15),
108 CPUM_GREG_EMIT_X4_X28(16),
109 CPUM_GREG_EMIT_X4_X28(17),
110 CPUM_GREG_EMIT_X4_X28(18),
111 CPUM_GREG_EMIT_X4_X28(19),
112 CPUM_GREG_EMIT_X4_X28(20),
113 CPUM_GREG_EMIT_X4_X28(21),
114 CPUM_GREG_EMIT_X4_X28(22),
115 CPUM_GREG_EMIT_X4_X28(23),
116 CPUM_GREG_EMIT_X4_X28(24),
117 CPUM_GREG_EMIT_X4_X28(25),
118 CPUM_GREG_EMIT_X4_X28(26),
119 CPUM_GREG_EMIT_X4_X28(27),
120 CPUM_GREG_EMIT_X4_X28(28),
121 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
122 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
123 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
124 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
125 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
126#undef CPUM_GREG_EMIT_X0_X3
127#undef CPUM_GREG_EMIT_X4_X28
128};
129/** SIMD/FP registers. */
130static const struct
131{
132 hv_simd_fp_reg_t enmHvReg;
133 uint32_t offCpumCtx;
134} s_aCpumFpRegs[] =
135{
136#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
137 CPUM_VREG_EMIT(0),
138 CPUM_VREG_EMIT(1),
139 CPUM_VREG_EMIT(2),
140 CPUM_VREG_EMIT(3),
141 CPUM_VREG_EMIT(4),
142 CPUM_VREG_EMIT(5),
143 CPUM_VREG_EMIT(6),
144 CPUM_VREG_EMIT(7),
145 CPUM_VREG_EMIT(8),
146 CPUM_VREG_EMIT(9),
147 CPUM_VREG_EMIT(10),
148 CPUM_VREG_EMIT(11),
149 CPUM_VREG_EMIT(12),
150 CPUM_VREG_EMIT(13),
151 CPUM_VREG_EMIT(14),
152 CPUM_VREG_EMIT(15),
153 CPUM_VREG_EMIT(16),
154 CPUM_VREG_EMIT(17),
155 CPUM_VREG_EMIT(18),
156 CPUM_VREG_EMIT(19),
157 CPUM_VREG_EMIT(20),
158 CPUM_VREG_EMIT(21),
159 CPUM_VREG_EMIT(22),
160 CPUM_VREG_EMIT(23),
161 CPUM_VREG_EMIT(24),
162 CPUM_VREG_EMIT(25),
163 CPUM_VREG_EMIT(26),
164 CPUM_VREG_EMIT(27),
165 CPUM_VREG_EMIT(28),
166 CPUM_VREG_EMIT(29),
167 CPUM_VREG_EMIT(30),
168 CPUM_VREG_EMIT(31)
169#undef CPUM_VREG_EMIT
170};
171/** Debug system registers. */
172static const struct
173{
174 hv_sys_reg_t enmHvReg;
175 uint32_t offCpumCtx;
176} s_aCpumDbgRegs[] =
177{
178#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
179 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
180 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
181 /* Breakpoint registers. */
182 CPUM_DBGREG_EMIT(B, 0),
183 CPUM_DBGREG_EMIT(B, 1),
184 CPUM_DBGREG_EMIT(B, 2),
185 CPUM_DBGREG_EMIT(B, 3),
186 CPUM_DBGREG_EMIT(B, 4),
187 CPUM_DBGREG_EMIT(B, 5),
188 CPUM_DBGREG_EMIT(B, 6),
189 CPUM_DBGREG_EMIT(B, 7),
190 CPUM_DBGREG_EMIT(B, 8),
191 CPUM_DBGREG_EMIT(B, 9),
192 CPUM_DBGREG_EMIT(B, 10),
193 CPUM_DBGREG_EMIT(B, 11),
194 CPUM_DBGREG_EMIT(B, 12),
195 CPUM_DBGREG_EMIT(B, 13),
196 CPUM_DBGREG_EMIT(B, 14),
197 CPUM_DBGREG_EMIT(B, 15),
198 /* Watchpoint registers. */
199 CPUM_DBGREG_EMIT(W, 0),
200 CPUM_DBGREG_EMIT(W, 1),
201 CPUM_DBGREG_EMIT(W, 2),
202 CPUM_DBGREG_EMIT(W, 3),
203 CPUM_DBGREG_EMIT(W, 4),
204 CPUM_DBGREG_EMIT(W, 5),
205 CPUM_DBGREG_EMIT(W, 6),
206 CPUM_DBGREG_EMIT(W, 7),
207 CPUM_DBGREG_EMIT(W, 8),
208 CPUM_DBGREG_EMIT(W, 9),
209 CPUM_DBGREG_EMIT(W, 10),
210 CPUM_DBGREG_EMIT(W, 11),
211 CPUM_DBGREG_EMIT(W, 12),
212 CPUM_DBGREG_EMIT(W, 13),
213 CPUM_DBGREG_EMIT(W, 14),
214 CPUM_DBGREG_EMIT(W, 15),
215 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
216#undef CPUM_DBGREG_EMIT
217};
218/** PAuth key system registers. */
219static const struct
220{
221 hv_sys_reg_t enmHvReg;
222 uint32_t offCpumCtx;
223} s_aCpumPAuthKeyRegs[] =
224{
225 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
226 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
227 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
228 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
229 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
230 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
231 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
232 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
233 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
234 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
235};
236/** System registers. */
237static const struct
238{
239 hv_sys_reg_t enmHvReg;
240 uint32_t fCpumExtrn;
241 uint32_t offCpumCtx;
242} s_aCpumSysRegs[] =
243{
244 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
245 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
246 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
247 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
248 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
249 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
250 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
251 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
252 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
253 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
254 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
255 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
256 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
257 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
258 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
259 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
260 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
261 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
262 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
263 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
264 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
265 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
266 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
267 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
268
269};
270/** ID registers. */
271static const struct
272{
273 hv_feature_reg_t enmHvReg;
274 uint32_t offIdStruct;
275} s_aIdRegs[] =
276{
277 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
278 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
279 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
280 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
281 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
282 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
283 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
284 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
285 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) },
286 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1) },
287 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0) },
288 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0) }
289};
290
291
292/*********************************************************************************************************************************
293* Internal Functions *
294*********************************************************************************************************************************/
295
296
297/**
298 * Converts a HV return code to a VBox status code.
299 *
300 * @returns VBox status code.
301 * @param hrc The HV return code to convert.
302 */
303DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
304{
305 if (hrc == HV_SUCCESS)
306 return VINF_SUCCESS;
307
308 switch (hrc)
309 {
310 case HV_ERROR: return VERR_INVALID_STATE;
311 case HV_BUSY: return VERR_RESOURCE_BUSY;
312 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
313 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
314 case HV_NO_DEVICE: return VERR_NOT_FOUND;
315 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
316 }
317
318 return VERR_IPE_UNEXPECTED_STATUS;
319}
320
321
322/**
323 * Returns a human readable string of the given exception class.
324 *
325 * @returns Pointer to the string matching the given EC.
326 * @param u32Ec The exception class to return the string for.
327 */
328static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
329{
330 switch (u32Ec)
331 {
332#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
333 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
334 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
335 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
336 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
337 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
338 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
339 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
340 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
341 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
342 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
343 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
344 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
345 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
346 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
347 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
348 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
349 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
350 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
351 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
352 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
353 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
354 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
355 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
356 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
357 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
358 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
359 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
360 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
361 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
362 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
363 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
364 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
365 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
366 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
367 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
368 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
369 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
370 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
371 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
372 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
373 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
374 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
375 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
376 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
377 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
378#undef ARMV8_EC_CASE
379 default:
380 break;
381 }
382
383 return "<INVALID>";
384}
385
386
387/**
388 * Resolves a NEM page state from the given protection flags.
389 *
390 * @returns NEM page state.
391 * @param fPageProt The page protection flags.
392 */
393DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
394{
395 switch (fPageProt)
396 {
397 case NEM_PAGE_PROT_NONE:
398 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
399 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
400 return NEM_DARWIN_PAGE_STATE_RX;
401 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
402 return NEM_DARWIN_PAGE_STATE_RW;
403 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
404 return NEM_DARWIN_PAGE_STATE_RWX;
405 default:
406 break;
407 }
408
409 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
410 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
411}
412
413
414/**
415 * Unmaps the given guest physical address range (page aligned).
416 *
417 * @returns VBox status code.
418 * @param pVM The cross context VM structure.
419 * @param GCPhys The guest physical address to start unmapping at.
420 * @param cb The size of the range to unmap in bytes.
421 * @param pu2State Where to store the new state of the unmappd page, optional.
422 */
423DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
424{
425 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
426 {
427 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
428 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
429 return VINF_SUCCESS;
430 }
431
432 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
433 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
434 if (RT_LIKELY(hrc == HV_SUCCESS))
435 {
436 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
437 if (pu2State)
438 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
439 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
440 return VINF_SUCCESS;
441 }
442
443 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
444 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
445 GCPhys, hrc));
446 return VERR_NEM_IPE_6;
447}
448
449
450/**
451 * Maps a given guest physical address range backed by the given memory with the given
452 * protection flags.
453 *
454 * @returns VBox status code.
455 * @param pVM The cross context VM structure.
456 * @param GCPhys The guest physical address to start mapping.
457 * @param pvRam The R3 pointer of the memory to back the range with.
458 * @param cb The size of the range, page aligned.
459 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
460 * @param pu2State Where to store the state for the new page, optional.
461 */
462DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
463{
464 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
465
466 Assert(fPageProt != NEM_PAGE_PROT_NONE);
467 RT_NOREF(pVM);
468
469 hv_memory_flags_t fHvMemProt = 0;
470 if (fPageProt & NEM_PAGE_PROT_READ)
471 fHvMemProt |= HV_MEMORY_READ;
472 if (fPageProt & NEM_PAGE_PROT_WRITE)
473 fHvMemProt |= HV_MEMORY_WRITE;
474 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
475 fHvMemProt |= HV_MEMORY_EXEC;
476
477 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
478 if (hrc == HV_SUCCESS)
479 {
480 if (pu2State)
481 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
482 return VINF_SUCCESS;
483 }
484
485 return nemR3DarwinHvSts2Rc(hrc);
486}
487
488
489/**
490 * Changes the protection flags for the given guest physical address range.
491 *
492 * @returns VBox status code.
493 * @param GCPhys The guest physical address to start mapping.
494 * @param cb The size of the range, page aligned.
495 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
496 * @param pu2State Where to store the state for the new page, optional.
497 */
498DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
499{
500 hv_memory_flags_t fHvMemProt = 0;
501 if (fPageProt & NEM_PAGE_PROT_READ)
502 fHvMemProt |= HV_MEMORY_READ;
503 if (fPageProt & NEM_PAGE_PROT_WRITE)
504 fHvMemProt |= HV_MEMORY_WRITE;
505 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
506 fHvMemProt |= HV_MEMORY_EXEC;
507
508 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
509 if (hrc == HV_SUCCESS)
510 {
511 if (pu2State)
512 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
513 return VINF_SUCCESS;
514 }
515
516 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
517 GCPhys, cb, fPageProt, hrc));
518 return nemR3DarwinHvSts2Rc(hrc);
519}
520
521
522#ifdef LOG_ENABLED
523/**
524 * Logs the current CPU state.
525 */
526static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
527{
528 if (LogIs3Enabled())
529 {
530 char szRegs[4096];
531 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
532 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
533 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
534 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
535 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
536 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
537 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
538 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
539 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
540 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
541 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
542 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
543 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
544 "vbar_el1=%016VR{vbar_el1}\n"
545 );
546 char szInstr[256]; RT_ZERO(szInstr);
547#if 0
548 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
549 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
550 szInstr, sizeof(szInstr), NULL);
551#endif
552 Log3(("%s%s\n", szRegs, szInstr));
553 }
554}
555#endif /* LOG_ENABLED */
556
557
558static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
559{
560 RT_NOREF(pVM);
561
562 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
563 if (hrc == HV_SUCCESS)
564 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
565
566 if ( hrc == HV_SUCCESS
567 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
568 {
569 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
570 {
571 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
572 {
573 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
574 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
575 }
576 }
577 }
578
579 if ( hrc == HV_SUCCESS
580 && (fWhat & CPUMCTX_EXTRN_V0_V31))
581 {
582 /* SIMD/FP registers. */
583 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
584 {
585 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
586 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
587 }
588 }
589
590 if ( hrc == HV_SUCCESS
591 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
592 {
593 /* Debug registers. */
594 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
595 {
596 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
597 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
598 }
599 }
600
601 if ( hrc == HV_SUCCESS
602 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
603 {
604 /* Debug registers. */
605 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
606 {
607 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
608 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
609 }
610 }
611
612 if ( hrc == HV_SUCCESS
613 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
614 {
615 /* System registers. */
616 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
617 {
618 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
619 {
620 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
621 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
622 }
623 }
624 }
625
626 if ( hrc == HV_SUCCESS
627 && (fWhat & CPUMCTX_EXTRN_PSTATE))
628 {
629 uint64_t u64Tmp;
630 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
631 if (hrc == HV_SUCCESS)
632 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
633 }
634
635 /* Almost done, just update extern flags. */
636 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
637 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
638 pVCpu->cpum.GstCtx.fExtrn = 0;
639
640 return nemR3DarwinHvSts2Rc(hrc);
641}
642
643
644/**
645 * Exports the guest state to HV for execution.
646 *
647 * @returns VBox status code.
648 * @param pVM The cross context VM structure.
649 * @param pVCpu The cross context virtual CPU structure of the
650 * calling EMT.
651 */
652static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
653{
654 RT_NOREF(pVM);
655 hv_return_t hrc = HV_SUCCESS;
656
657 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
658 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
659 {
660 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
661 {
662 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
663 {
664 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
665 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
666 }
667 }
668 }
669
670 if ( hrc == HV_SUCCESS
671 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
672 {
673 /* SIMD/FP registers. */
674 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
675 {
676 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
677 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
678 }
679 }
680
681 if ( hrc == HV_SUCCESS
682 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
683 {
684 /* Debug registers. */
685 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
686 {
687 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
688 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
689 }
690 }
691
692 if ( hrc == HV_SUCCESS
693 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
694 {
695 /* Debug registers. */
696 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
697 {
698 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
699 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
700 }
701 }
702
703 if ( hrc == HV_SUCCESS
704 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
705 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
706 {
707 /* System registers. */
708 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
709 {
710 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
711 {
712 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
713 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
714 }
715 }
716 }
717
718 if ( hrc == HV_SUCCESS
719 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
720 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
721
722 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
723 return nemR3DarwinHvSts2Rc(hrc);
724}
725
726
727/**
728 * Try initialize the native API.
729 *
730 * This may only do part of the job, more can be done in
731 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
732 *
733 * @returns VBox status code.
734 * @param pVM The cross context VM structure.
735 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
736 * the latter we'll fail if we cannot initialize.
737 * @param fForced Whether the HMForced flag is set and we should
738 * fail if we cannot initialize.
739 */
740int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
741{
742 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
743
744 /*
745 * Some state init.
746 */
747 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
748 RT_NOREF(pCfgNem);
749
750 /*
751 * Error state.
752 * The error message will be non-empty on failure and 'rc' will be set too.
753 */
754 RTERRINFOSTATIC ErrInfo;
755 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
756
757 int rc = VINF_SUCCESS;
758 hv_return_t hrc = hv_vm_create(NULL);
759 if (hrc == HV_SUCCESS)
760 {
761 pVM->nem.s.fCreatedVm = true;
762 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
763
764 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
765 pVM->nem.s.u64VTimerOff = 0;
766
767 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
768 Log(("NEM: Marked active!\n"));
769 PGMR3EnableNemMode(pVM);
770 }
771 else
772 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
773 "hv_vm_create() failed: %#x", hrc);
774
775 /*
776 * We only fail if in forced mode, otherwise just log the complaint and return.
777 */
778 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
779 if ( (fForced || !fFallback)
780 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
781 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
782
783if (RTErrInfoIsSet(pErrInfo))
784 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
791 *
792 * @returns VBox status code
793 * @param pVM The VM handle.
794 * @param pVCpu The vCPU handle.
795 * @param idCpu ID of the CPU to create.
796 */
797static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
798{
799 if (idCpu == 0)
800 {
801 Assert(pVM->nem.s.hVCpuCfg == NULL);
802
803 /* Create a new vCPU config and query the ID registers. */
804 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
805 if (!pVM->nem.s.hVCpuCfg)
806 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
807 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
808
809 /* Query ID registers and hand them to CPUM. */
810 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
811 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
812 {
813 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
814 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
815 if (hrc != HV_SUCCESS)
816 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
817 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
818 }
819
820 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
821 if (RT_FAILURE(rc))
822 return rc;
823 }
824
825 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
826 if (hrc != HV_SUCCESS)
827 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
828 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
829
830 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
831 if (hrc != HV_SUCCESS)
832 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
833 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
834
835 return VINF_SUCCESS;
836}
837
838
839/**
840 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
841 *
842 * @returns VBox status code.
843 * @param pVM The VM handle.
844 * @param pVCpu The vCPU handle.
845 */
846static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
847{
848 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
849 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
850
851 if (pVCpu->idCpu == 0)
852 {
853 os_release(pVM->nem.s.hVCpuCfg);
854 pVM->nem.s.hVCpuCfg = NULL;
855 }
856 return VINF_SUCCESS;
857}
858
859
860/**
861 * This is called after CPUMR3Init is done.
862 *
863 * @returns VBox status code.
864 * @param pVM The VM handle..
865 */
866int nemR3NativeInitAfterCPUM(PVM pVM)
867{
868 /*
869 * Validate sanity.
870 */
871 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
872 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
873
874 /*
875 * Setup the EMTs.
876 */
877 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
878 {
879 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
880
881 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
882 if (RT_FAILURE(rc))
883 {
884 /* Rollback. */
885 while (idCpu--)
886 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
887
888 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
889 }
890 }
891
892 pVM->nem.s.fCreatedEmts = true;
893 return VINF_SUCCESS;
894}
895
896
897int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
898{
899 RT_NOREF(pVM, enmWhat);
900 return VINF_SUCCESS;
901}
902
903
904int nemR3NativeTerm(PVM pVM)
905{
906 /*
907 * Delete the VM.
908 */
909
910 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
911 {
912 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
913
914 /*
915 * Apple's documentation states that the vCPU should be destroyed
916 * on the thread running the vCPU but as all the other EMTs are gone
917 * at this point, destroying the VM would hang.
918 *
919 * We seem to be at luck here though as destroying apparently works
920 * from EMT(0) as well.
921 */
922 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
923 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
924 }
925
926 pVM->nem.s.fCreatedEmts = false;
927 if (pVM->nem.s.fCreatedVm)
928 {
929 hv_return_t hrc = hv_vm_destroy();
930 if (hrc != HV_SUCCESS)
931 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
932
933 pVM->nem.s.fCreatedVm = false;
934 }
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * VM reset notification.
941 *
942 * @param pVM The cross context VM structure.
943 */
944void nemR3NativeReset(PVM pVM)
945{
946 RT_NOREF(pVM);
947}
948
949
950/**
951 * Reset CPU due to INIT IPI or hot (un)plugging.
952 *
953 * @param pVCpu The cross context virtual CPU structure of the CPU being
954 * reset.
955 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
956 */
957void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
958{
959 RT_NOREF(pVCpu, fInitIpi);
960}
961
962
963/**
964 * Returns the byte size from the given access SAS value.
965 *
966 * @returns Number of bytes to transfer.
967 * @param uSas The SAS value to convert.
968 */
969DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
970{
971 switch (uSas)
972 {
973 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
974 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
975 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
976 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
977 default:
978 AssertReleaseFailed();
979 }
980
981 return 0;
982}
983
984
985/**
986 * Sets the given general purpose register to the given value.
987 *
988 * @param pVCpu The cross context virtual CPU structure of the
989 * calling EMT.
990 * @param uReg The register index.
991 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
992 * @param fSignExtend Flag whether to sign extend the value.
993 * @param u64Val The value.
994 */
995DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
996{
997 AssertReturnVoid(uReg < 31);
998
999 if (f64BitReg)
1000 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1001 else
1002 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1003
1004 /* Mark the register as not extern anymore. */
1005 switch (uReg)
1006 {
1007 case 0:
1008 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1009 break;
1010 case 1:
1011 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1012 break;
1013 case 2:
1014 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1015 break;
1016 case 3:
1017 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1018 break;
1019 default:
1020 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1021 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1022 }
1023}
1024
1025
1026/**
1027 * Gets the given general purpose register and returns the value.
1028 *
1029 * @returns Value from the given register.
1030 * @param pVCpu The cross context virtual CPU structure of the
1031 * calling EMT.
1032 * @param uReg The register index.
1033 */
1034DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1035{
1036 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1037
1038 if (uReg == ARMV8_AARCH64_REG_ZR)
1039 return 0;
1040
1041 /** @todo Import the register if extern. */
1042 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1043
1044 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1045}
1046
1047
1048/**
1049 * Works on the data abort exception (which will be a MMIO access most of the time).
1050 *
1051 * @returns VBox strict status code.
1052 * @param pVM The cross context VM structure.
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling EMT.
1055 * @param uIss The instruction specific syndrome value.
1056 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1057 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1058 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1059 */
1060static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1061 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1062{
1063 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1064 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1065 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1066 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1067 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1068 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1069 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1070 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1071 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1072 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1073
1074 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1075
1076 if (fWrite)
1077 {
1078 /*
1079 * Check whether this is one of the dirty tracked regions, mark it as dirty
1080 * and enable write support for this region again.
1081 *
1082 * This is required for proper VRAM tracking or the display might not get updated
1083 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1084 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1085 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1086 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1087 * write access again (due to a missing interpreter right now).
1088 */
1089 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1090 {
1091 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1092
1093 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1094 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1095 {
1096 pMmio2Region->fDirty = true;
1097
1098 uint8_t u2State;
1099 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1100 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1101
1102 /* Restart the instruction if there is no instruction syndrome available. */
1103 if (RT_FAILURE(rc) || !fIsv)
1104 return rc;
1105 }
1106 }
1107 }
1108
1109 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
1110
1111 EMHistoryAddExit(pVCpu,
1112 fWrite
1113 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1114 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1115 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1116
1117 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1118 uint64_t u64Val = 0;
1119 if (fWrite)
1120 {
1121 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1122 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1123 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1124 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1125 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1126 }
1127 else
1128 {
1129 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1130 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1131 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1132 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1133 if (rcStrict == VINF_SUCCESS)
1134 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1135 }
1136
1137 if (rcStrict == VINF_SUCCESS)
1138 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1139
1140 return rcStrict;
1141}
1142
1143
1144/**
1145 * Works on the trapped MRS, MSR and system instruction exception.
1146 *
1147 * @returns VBox strict status code.
1148 * @param pVM The cross context VM structure.
1149 * @param pVCpu The cross context virtual CPU structure of the
1150 * calling EMT.
1151 * @param uIss The instruction specific syndrome value.
1152 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1153 */
1154static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1155{
1156 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1157 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1158 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1159 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1160 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1161 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1162 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1163 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1164 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1165 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1166
1167 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1168 EMHistoryAddExit(pVCpu,
1169 fRead
1170 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1171 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1172 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1173
1174 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1175 uint64_t u64Val = 0;
1176 if (fRead)
1177 {
1178 RT_NOREF(pVM);
1179 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1180 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1181 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1182 VBOXSTRICTRC_VAL(rcStrict) ));
1183 if (rcStrict == VINF_SUCCESS)
1184 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1185 }
1186 else
1187 {
1188 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1189 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1190 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1191 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1192 VBOXSTRICTRC_VAL(rcStrict) ));
1193 }
1194
1195 if (rcStrict == VINF_SUCCESS)
1196 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1197
1198 return rcStrict;
1199}
1200
1201
1202/**
1203 * Works on the trapped HVC instruction exception.
1204 *
1205 * @returns VBox strict status code.
1206 * @param pVM The cross context VM structure.
1207 * @param pVCpu The cross context virtual CPU structure of the
1208 * calling EMT.
1209 * @param uIss The instruction specific syndrome value.
1210 */
1211static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
1212{
1213 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1214 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1215
1216#if 0 /** @todo For later */
1217 EMHistoryAddExit(pVCpu,
1218 fRead
1219 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1220 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1221 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1222#endif
1223
1224 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1225 if (u16Imm == 0)
1226 {
1227 /** @todo Raise exception to EL1 if PSCI not configured. */
1228 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1229 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1230 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1231 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1232 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1233 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1234 {
1235 switch (uFunNum)
1236 {
1237 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1238 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1239 break;
1240 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1241 rcStrict = VMR3PowerOff(pVM->pUVM);
1242 break;
1243 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1244 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1245 {
1246 bool fHaltOnReset;
1247 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1248 if (RT_SUCCESS(rc) && fHaltOnReset)
1249 {
1250 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1251 rc = VINF_EM_HALT;
1252 }
1253 else
1254 {
1255 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1256 VM_FF_SET(pVM, VM_FF_RESET);
1257 rc = VINF_EM_RESET;
1258 }
1259 break;
1260 }
1261 case ARM_PSCI_FUNC_ID_CPU_ON:
1262 {
1263 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1264 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1265 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1266 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1267 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1268 break;
1269 }
1270 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1271 {
1272 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1273 switch (u32FunNum)
1274 {
1275 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1276 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1277 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1278 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1279 case ARM_PSCI_FUNC_ID_CPU_ON:
1280 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1281 false /*f64BitReg*/, false /*fSignExtend*/,
1282 (uint64_t)ARM_PSCI_STS_SUCCESS);
1283 break;
1284 default:
1285 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1286 false /*f64BitReg*/, false /*fSignExtend*/,
1287 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1288 }
1289 }
1290 default:
1291 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1292 }
1293 }
1294 else
1295 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1296 }
1297 /** @todo What to do if immediate is != 0? */
1298
1299 return rcStrict;
1300}
1301
1302
1303/**
1304 * Handles an exception VM exit.
1305 *
1306 * @returns VBox strict status code.
1307 * @param pVM The cross context VM structure.
1308 * @param pVCpu The cross context virtual CPU structure of the
1309 * calling EMT.
1310 * @param pExit Pointer to the exit information.
1311 */
1312static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1313{
1314 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1315 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1316 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1317
1318 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1319 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1320
1321 switch (uEc)
1322 {
1323 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1324 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1325 pExit->exception.physical_address);
1326 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1327 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1328 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1329 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1330 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1331 {
1332 /* No need to halt if there is an interrupt pending already. */
1333 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1334 return VINF_SUCCESS;
1335
1336 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1337 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1338 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1339 {
1340 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1341
1342 /* Check whether it expired and start executing guest code. */
1343 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1344 return VINF_SUCCESS;
1345
1346 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1347 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1348
1349 /*
1350 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1351 * + scheduling overhead which would increase the wakeup latency.
1352 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1353 * between CPU load when the guest is idle and performance).
1354 */
1355 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1356 return VINF_SUCCESS;
1357
1358 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1359 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1360 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1361 }
1362 else
1363 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1364
1365 return VINF_EM_HALT;
1366 }
1367 case ARMV8_ESR_EL2_EC_UNKNOWN:
1368 default:
1369 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1370 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1371 AssertReleaseFailed();
1372 return VERR_NOT_IMPLEMENTED;
1373 }
1374
1375 return VINF_SUCCESS;
1376}
1377
1378
1379/**
1380 * Handles an exit from hv_vcpu_run().
1381 *
1382 * @returns VBox strict status code.
1383 * @param pVM The cross context VM structure.
1384 * @param pVCpu The cross context virtual CPU structure of the
1385 * calling EMT.
1386 */
1387static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1388{
1389 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1390 if (RT_FAILURE(rc))
1391 return rc;
1392
1393#ifdef LOG_ENABLED
1394 if (LogIs3Enabled())
1395 nemR3DarwinLogState(pVM, pVCpu);
1396#endif
1397
1398 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1399 switch (pExit->reason)
1400 {
1401 case HV_EXIT_REASON_CANCELED:
1402 return VINF_EM_RAW_INTERRUPT;
1403 case HV_EXIT_REASON_EXCEPTION:
1404 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1405 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1406 {
1407 LogFlowFunc(("vTimer got activated\n"));
1408 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1409 pVCpu->nem.s.fVTimerActivated = true;
1410 return GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
1411 }
1412 default:
1413 AssertReleaseFailed();
1414 break;
1415 }
1416
1417 return VERR_INVALID_STATE;
1418}
1419
1420
1421/**
1422 * Runs the guest once until an exit occurs.
1423 *
1424 * @returns HV status code.
1425 * @param pVM The cross context VM structure.
1426 * @param pVCpu The cross context virtual CPU structure.
1427 */
1428static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1429{
1430 TMNotifyStartOfExecution(pVM, pVCpu);
1431
1432 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1433
1434 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1435
1436 return hrc;
1437}
1438
1439
1440/**
1441 * Prepares the VM to run the guest.
1442 *
1443 * @returns Strict VBox status code.
1444 * @param pVM The cross context VM structure.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param fSingleStepping Flag whether we run in single stepping mode.
1447 */
1448static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1449{
1450#ifdef LOG_ENABLED
1451 bool fIrq = false;
1452 bool fFiq = false;
1453
1454 if (LogIs3Enabled())
1455 nemR3DarwinLogState(pVM, pVCpu);
1456#endif
1457
1458 /** @todo */ RT_NOREF(fSingleStepping);
1459 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1460 AssertRCReturn(rc, rc);
1461
1462 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
1463 if (pVCpu->nem.s.fVTimerActivated)
1464 {
1465 /* Read the CNTV_CTL_EL0 register. */
1466 uint64_t u64CntvCtl = 0;
1467
1468 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
1469 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1470
1471 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1472 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1473 {
1474 /* Clear the interrupt. */
1475 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
1476
1477 pVCpu->nem.s.fVTimerActivated = false;
1478 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
1479 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1480 }
1481 }
1482
1483 /* Set the pending interrupt state. */
1484 hv_return_t hrc = HV_SUCCESS;
1485 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1486 {
1487 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1488 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1489#ifdef LOG_ENABLED
1490 fIrq = true;
1491#endif
1492 }
1493 else
1494 {
1495 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1496 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1497 }
1498
1499 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1500 {
1501 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1502 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1503#ifdef LOG_ENABLED
1504 fFiq = true;
1505#endif
1506 }
1507 else
1508 {
1509 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1510 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1511 }
1512
1513 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1514 pVCpu->nem.s.fEventPending = false;
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/**
1520 * The normal runloop (no debugging features enabled).
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pVM The cross context VM structure.
1524 * @param pVCpu The cross context virtual CPU structure.
1525 */
1526static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1527{
1528 /*
1529 * The run loop.
1530 *
1531 * Current approach to state updating to use the sledgehammer and sync
1532 * everything every time. This will be optimized later.
1533 */
1534
1535 /* Update the vTimer offset after resuming if instructed. */
1536 if (pVCpu->nem.s.fVTimerOffUpdate)
1537 {
1538 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
1539 if (hrc != HV_SUCCESS)
1540 return nemR3DarwinHvSts2Rc(hrc);
1541
1542 pVCpu->nem.s.fVTimerOffUpdate = false;
1543
1544 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
1545 if (hrc == HV_SUCCESS)
1546 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
1547 if (hrc != HV_SUCCESS)
1548 return nemR3DarwinHvSts2Rc(hrc);
1549 }
1550
1551 /*
1552 * Poll timers and run for a bit.
1553 */
1554 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1555 * the whole polling job when timers have changed... */
1556 uint64_t offDeltaIgnored;
1557 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1558 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1559 for (unsigned iLoop = 0;; iLoop++)
1560 {
1561 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1562 if (rcStrict != VINF_SUCCESS)
1563 break;
1564
1565 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1566 if (hrc == HV_SUCCESS)
1567 {
1568 /*
1569 * Deal with the message.
1570 */
1571 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1572 if (rcStrict == VINF_SUCCESS)
1573 { /* hopefully likely */ }
1574 else
1575 {
1576 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1577 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1578 break;
1579 }
1580 }
1581 else
1582 {
1583 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1584 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1585 }
1586 } /* the run loop */
1587
1588 return rcStrict;
1589}
1590
1591
1592VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1593{
1594#ifdef LOG_ENABLED
1595 if (LogIs3Enabled())
1596 nemR3DarwinLogState(pVM, pVCpu);
1597#endif
1598
1599 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1600
1601 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
1602 {
1603 /*
1604 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
1605 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
1606 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
1607 */
1608 static const struct
1609 {
1610 const char *pszIdReg;
1611 hv_sys_reg_t enmHvReg;
1612 uint32_t offIdStruct;
1613 } s_aSysIdRegs[] =
1614 {
1615#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
1616 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
1617 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
1618 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
1619 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
1620 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
1621 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
1622 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
1623 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
1624 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
1625#undef ID_SYS_REG_CREATE
1626 };
1627
1628 PCCPUMIDREGS pIdRegsGst = NULL;
1629 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
1630 AssertRCReturn(rc, rc);
1631
1632 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
1633 {
1634 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
1635 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
1636 if (hrc != HV_SUCCESS)
1637 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
1638 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1639 }
1640
1641 pVCpu->nem.s.fIdRegsSynced = true;
1642 }
1643
1644 /*
1645 * Try switch to NEM runloop state.
1646 */
1647 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1648 { /* likely */ }
1649 else
1650 {
1651 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1652 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1653 return VINF_SUCCESS;
1654 }
1655
1656 VBOXSTRICTRC rcStrict;
1657#if 0
1658 if ( !pVCpu->nem.s.fUseDebugLoop
1659 && !nemR3DarwinAnyExpensiveProbesEnabled()
1660 && !DBGFIsStepping(pVCpu)
1661 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1662#endif
1663 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1664#if 0
1665 else
1666 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1667#endif
1668
1669 if (rcStrict == VINF_EM_RAW_TO_R3)
1670 rcStrict = VINF_SUCCESS;
1671
1672 /*
1673 * Convert any pending HM events back to TRPM due to premature exits.
1674 *
1675 * This is because execution may continue from IEM and we would need to inject
1676 * the event from there (hence place it back in TRPM).
1677 */
1678 if (pVCpu->nem.s.fEventPending)
1679 {
1680 /** @todo */
1681 }
1682
1683
1684 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1685 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1686
1687 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1688 {
1689 /* Try anticipate what we might need. */
1690 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1691 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1692 || RT_FAILURE(rcStrict))
1693 fImport = CPUMCTX_EXTRN_ALL;
1694 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1695 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1696 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1697
1698 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1699 {
1700 /* Only import what is external currently. */
1701 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1702 if (RT_SUCCESS(rc2))
1703 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1704 else if (RT_SUCCESS(rcStrict))
1705 rcStrict = rc2;
1706 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1707 pVCpu->cpum.GstCtx.fExtrn = 0;
1708 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1709 }
1710 else
1711 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1712 }
1713 else
1714 {
1715 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1716 pVCpu->cpum.GstCtx.fExtrn = 0;
1717 }
1718
1719 return rcStrict;
1720}
1721
1722
1723VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1724{
1725 RT_NOREF(pVM, pVCpu);
1726 return true; /** @todo Are there any cases where we have to emulate? */
1727}
1728
1729
1730bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1731{
1732 VMCPU_ASSERT_EMT(pVCpu);
1733 bool fOld = pVCpu->nem.s.fSingleInstruction;
1734 pVCpu->nem.s.fSingleInstruction = fEnable;
1735 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1736 return fOld;
1737}
1738
1739
1740void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1741{
1742 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1743
1744 RT_NOREF(pVM, fFlags);
1745
1746 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1747 if (hrc != HV_SUCCESS)
1748 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1749}
1750
1751
1752DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1753{
1754 RT_NOREF(pVM, fUseDebugLoop);
1755 //AssertReleaseFailed();
1756 return false;
1757}
1758
1759
1760DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1761{
1762 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1763 return fUseDebugLoop;
1764}
1765
1766
1767VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1768 uint8_t *pu2State, uint32_t *puNemRange)
1769{
1770 RT_NOREF(pVM, puNemRange);
1771
1772 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1773#if defined(VBOX_WITH_PGM_NEM_MODE)
1774 if (pvR3)
1775 {
1776 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1777 if (RT_FAILURE(rc))
1778 {
1779 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1780 return VERR_NEM_MAP_PAGES_FAILED;
1781 }
1782 }
1783 return VINF_SUCCESS;
1784#else
1785 RT_NOREF(pVM, GCPhys, cb, pvR3);
1786 return VERR_NEM_MAP_PAGES_FAILED;
1787#endif
1788}
1789
1790
1791VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1792{
1793 RT_NOREF(pVM);
1794 return true;
1795}
1796
1797
1798VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1799 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1800{
1801 RT_NOREF(pvRam);
1802
1803 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1804 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1805
1806#if defined(VBOX_WITH_PGM_NEM_MODE)
1807 /*
1808 * Unmap the RAM we're replacing.
1809 */
1810 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1811 {
1812 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1813 if (RT_SUCCESS(rc))
1814 { /* likely */ }
1815 else if (pvMmio2)
1816 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1817 GCPhys, cb, fFlags, rc));
1818 else
1819 {
1820 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1821 GCPhys, cb, fFlags, rc));
1822 return VERR_NEM_UNMAP_PAGES_FAILED;
1823 }
1824 }
1825
1826 /*
1827 * Map MMIO2 if any.
1828 */
1829 if (pvMmio2)
1830 {
1831 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1832
1833 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
1834 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
1835 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
1836 {
1837 /* Find a slot for dirty tracking. */
1838 PNEMHVMMIO2REGION pMmio2Region = NULL;
1839 uint32_t idSlot;
1840 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1841 {
1842 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
1843 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
1844 {
1845 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1846 break;
1847 }
1848 }
1849
1850 if (!pMmio2Region)
1851 {
1852 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
1853 return VERR_NEM_MAP_PAGES_FAILED;
1854 }
1855
1856 pMmio2Region->GCPhysStart = GCPhys;
1857 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
1858 pMmio2Region->fDirty = false;
1859 *puNemRange = idSlot;
1860 }
1861 else
1862 fProt |= NEM_PAGE_PROT_WRITE;
1863
1864 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
1865 if (RT_FAILURE(rc))
1866 {
1867 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1868 GCPhys, cb, fFlags, pvMmio2, rc));
1869 return VERR_NEM_MAP_PAGES_FAILED;
1870 }
1871 }
1872 else
1873 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1874
1875#else
1876 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1877 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1878#endif
1879 return VINF_SUCCESS;
1880}
1881
1882
1883VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1884 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1885{
1886 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1887 return VINF_SUCCESS;
1888}
1889
1890
1891VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1892 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1893{
1894 RT_NOREF(pVM, puNemRange);
1895
1896 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1897 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1898
1899 int rc = VINF_SUCCESS;
1900#if defined(VBOX_WITH_PGM_NEM_MODE)
1901 /*
1902 * Unmap the MMIO2 pages.
1903 */
1904 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1905 * we may have more stuff to unmap even in case of pure MMIO... */
1906 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1907 {
1908 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1909 if (RT_FAILURE(rc))
1910 {
1911 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1912 GCPhys, cb, fFlags, rc));
1913 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1914 }
1915
1916 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
1917 {
1918 /* Reset tracking structure. */
1919 uint32_t idSlot = *puNemRange;
1920 *puNemRange = UINT32_MAX;
1921
1922 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
1923 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
1924 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
1925 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
1926 }
1927 }
1928
1929 /* Ensure the page is masked as unmapped if relevant. */
1930 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1931
1932 /*
1933 * Restore the RAM we replaced.
1934 */
1935 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1936 {
1937 AssertPtr(pvRam);
1938 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1939 if (RT_SUCCESS(rc))
1940 { /* likely */ }
1941 else
1942 {
1943 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1944 rc = VERR_NEM_MAP_PAGES_FAILED;
1945 }
1946 }
1947
1948 RT_NOREF(pvMmio2);
1949#else
1950 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1951 if (pu2State)
1952 *pu2State = UINT8_MAX;
1953 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1954#endif
1955 return rc;
1956}
1957
1958
1959VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1960 void *pvBitmap, size_t cbBitmap)
1961{
1962 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
1963 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
1964
1965 /* Keep it simple for now and mark everything as dirty if it is. */
1966 int rc = VINF_SUCCESS;
1967 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
1968 {
1969 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
1970
1971 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
1972 /* Restore as RX only. */
1973 uint8_t u2State;
1974 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
1975 }
1976 else
1977 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
1978
1979 return rc;
1980}
1981
1982
1983VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1984 uint8_t *pu2State, uint32_t *puNemRange)
1985{
1986 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1987
1988 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1989 *pu2State = UINT8_MAX;
1990 *puNemRange = 0;
1991 return VINF_SUCCESS;
1992}
1993
1994
1995VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1996 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1997{
1998 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1999 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2000 *pu2State = UINT8_MAX;
2001
2002#if defined(VBOX_WITH_PGM_NEM_MODE)
2003 /*
2004 * (Re-)map readonly.
2005 */
2006 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2007
2008 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2009 AssertRC(rc);
2010
2011 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2012 if (RT_FAILURE(rc))
2013 {
2014 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2015 GCPhys, cb, pvPages, fFlags, rc));
2016 return VERR_NEM_MAP_PAGES_FAILED;
2017 }
2018 RT_NOREF(fFlags, puNemRange);
2019 return VINF_SUCCESS;
2020#else
2021 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2022 return VERR_NEM_MAP_PAGES_FAILED;
2023#endif
2024}
2025
2026
2027VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2028 RTR3PTR pvMemR3, uint8_t *pu2State)
2029{
2030 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2031 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2032
2033 *pu2State = UINT8_MAX;
2034#if defined(VBOX_WITH_PGM_NEM_MODE)
2035 if (pvMemR3)
2036 {
2037 /* Unregister what was there before. */
2038 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2039 AssertRC(rc);
2040
2041 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2042 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2043 pvMemR3, GCPhys, cb, rc));
2044 }
2045 RT_NOREF(enmKind);
2046#else
2047 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2048 AssertFailed();
2049#endif
2050}
2051
2052
2053VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2054{
2055 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2056 RT_NOREF(pVCpu, fEnabled);
2057}
2058
2059
2060void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2061{
2062 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2063 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2064}
2065
2066
2067void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2068 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2069{
2070 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2071 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2072 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2073}
2074
2075
2076int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2077 PGMPAGETYPE enmType, uint8_t *pu2State)
2078{
2079 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2080 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2081 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2082
2083 AssertFailed();
2084 return VINF_SUCCESS;
2085}
2086
2087
2088VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2089 PGMPAGETYPE enmType, uint8_t *pu2State)
2090{
2091 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2092 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2093 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2094}
2095
2096
2097VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2098 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2099{
2100 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2101 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2102 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2103
2104 AssertFailed();
2105}
2106
2107
2108/**
2109 * Interface for importing state on demand (used by IEM).
2110 *
2111 * @returns VBox status code.
2112 * @param pVCpu The cross context CPU structure.
2113 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2114 */
2115VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2116{
2117 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
2118 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2119
2120 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2121}
2122
2123
2124/**
2125 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2126 *
2127 * @returns VBox status code.
2128 * @param pVCpu The cross context CPU structure.
2129 * @param pcTicks Where to return the CPU tick count.
2130 * @param puAux Where to return the TSC_AUX register value.
2131 */
2132VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2133{
2134 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2135 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2136
2137 if (puAux)
2138 *puAux = 0;
2139 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
2140 return VINF_SUCCESS;
2141}
2142
2143
2144/**
2145 * Resumes CPU clock (TSC) on all virtual CPUs.
2146 *
2147 * This is called by TM when the VM is started, restored, resumed or similar.
2148 *
2149 * @returns VBox status code.
2150 * @param pVM The cross context VM structure.
2151 * @param pVCpu The cross context CPU structure of the calling EMT.
2152 * @param uPausedTscValue The TSC value at the time of pausing.
2153 */
2154VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2155{
2156 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
2157 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2158 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2159
2160 /*
2161 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
2162 * the new offset to let the guest not notice the pause.
2163 */
2164 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
2165 Assert(u64TscNew >= uPausedTscValue);
2166 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
2167 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
2168 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
2169
2170 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
2171
2172 /*
2173 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
2174 * (needs to be done on the actual EMT).
2175 */
2176 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2177 {
2178 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
2179 pVCpuDst->nem.s.fVTimerOffUpdate = true;
2180 }
2181
2182 return VINF_SUCCESS;
2183}
2184
2185
2186/**
2187 * Returns features supported by the NEM backend.
2188 *
2189 * @returns Flags of features supported by the native NEM backend.
2190 * @param pVM The cross context VM structure.
2191 */
2192VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2193{
2194 RT_NOREF(pVM);
2195 /*
2196 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
2197 * and unrestricted guest execution support so we can safely return these flags here always.
2198 */
2199 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
2200}
2201
2202
2203/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
2204 *
2205 * @todo Add notes as the implementation progresses...
2206 */
2207
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette