VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 100101

Last change on this file since 100101 was 100076, checked in by vboxsync, 17 months ago

VMM/NEMR3Native-darwin-armv8: Fix setting the vTimer offset, this requires using mach_absolute_time() instead of reading the CNTVCT_EL0 directly or the guest will read an underflowed counter value programming the timer completely wrong leading to stalls without any interrupt source, bugref:10390

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.4 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 100076 2023-06-05 16:41:14Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/gic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/dbgftrace.h>
45#include <VBox/vmm/gcm.h>
46#include "NEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "dtrace/VBoxVMM.h"
49
50#include <iprt/armv8.h>
51#include <iprt/asm.h>
52#include <iprt/asm-arm.h>
53#include <iprt/asm-math.h>
54#include <iprt/ldr.h>
55#include <iprt/mem.h>
56#include <iprt/path.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59#include <iprt/utf16.h>
60
61#include <mach/mach_time.h>
62#include <mach/kern_return.h>
63
64#include <Hypervisor/Hypervisor.h>
65
66
67/*********************************************************************************************************************************
68* Defined Constants And Macros *
69*********************************************************************************************************************************/
70
71
72/** @todo The vTimer PPI for the virt platform, make it configurable. */
73#define NEM_DARWIN_VTIMER_GIC_PPI_IRQ 11
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79
80
81/*********************************************************************************************************************************
82* Global Variables *
83*********************************************************************************************************************************/
84/** The general registers. */
85static const struct
86{
87 hv_reg_t enmHvReg;
88 uint32_t fCpumExtrn;
89 uint32_t offCpumCtx;
90} s_aCpumRegs[] =
91{
92#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
93#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
94 CPUM_GREG_EMIT_X0_X3(0),
95 CPUM_GREG_EMIT_X0_X3(1),
96 CPUM_GREG_EMIT_X0_X3(2),
97 CPUM_GREG_EMIT_X0_X3(3),
98 CPUM_GREG_EMIT_X4_X28(4),
99 CPUM_GREG_EMIT_X4_X28(5),
100 CPUM_GREG_EMIT_X4_X28(6),
101 CPUM_GREG_EMIT_X4_X28(7),
102 CPUM_GREG_EMIT_X4_X28(8),
103 CPUM_GREG_EMIT_X4_X28(9),
104 CPUM_GREG_EMIT_X4_X28(10),
105 CPUM_GREG_EMIT_X4_X28(11),
106 CPUM_GREG_EMIT_X4_X28(12),
107 CPUM_GREG_EMIT_X4_X28(13),
108 CPUM_GREG_EMIT_X4_X28(14),
109 CPUM_GREG_EMIT_X4_X28(15),
110 CPUM_GREG_EMIT_X4_X28(16),
111 CPUM_GREG_EMIT_X4_X28(17),
112 CPUM_GREG_EMIT_X4_X28(18),
113 CPUM_GREG_EMIT_X4_X28(19),
114 CPUM_GREG_EMIT_X4_X28(20),
115 CPUM_GREG_EMIT_X4_X28(21),
116 CPUM_GREG_EMIT_X4_X28(22),
117 CPUM_GREG_EMIT_X4_X28(23),
118 CPUM_GREG_EMIT_X4_X28(24),
119 CPUM_GREG_EMIT_X4_X28(25),
120 CPUM_GREG_EMIT_X4_X28(26),
121 CPUM_GREG_EMIT_X4_X28(27),
122 CPUM_GREG_EMIT_X4_X28(28),
123 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
124 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
125 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
126 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
127 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
128#undef CPUM_GREG_EMIT_X0_X3
129#undef CPUM_GREG_EMIT_X4_X28
130};
131/** SIMD/FP registers. */
132static const struct
133{
134 hv_simd_fp_reg_t enmHvReg;
135 uint32_t offCpumCtx;
136} s_aCpumFpRegs[] =
137{
138#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
139 CPUM_VREG_EMIT(0),
140 CPUM_VREG_EMIT(1),
141 CPUM_VREG_EMIT(2),
142 CPUM_VREG_EMIT(3),
143 CPUM_VREG_EMIT(4),
144 CPUM_VREG_EMIT(5),
145 CPUM_VREG_EMIT(6),
146 CPUM_VREG_EMIT(7),
147 CPUM_VREG_EMIT(8),
148 CPUM_VREG_EMIT(9),
149 CPUM_VREG_EMIT(10),
150 CPUM_VREG_EMIT(11),
151 CPUM_VREG_EMIT(12),
152 CPUM_VREG_EMIT(13),
153 CPUM_VREG_EMIT(14),
154 CPUM_VREG_EMIT(15),
155 CPUM_VREG_EMIT(16),
156 CPUM_VREG_EMIT(17),
157 CPUM_VREG_EMIT(18),
158 CPUM_VREG_EMIT(19),
159 CPUM_VREG_EMIT(20),
160 CPUM_VREG_EMIT(21),
161 CPUM_VREG_EMIT(22),
162 CPUM_VREG_EMIT(23),
163 CPUM_VREG_EMIT(24),
164 CPUM_VREG_EMIT(25),
165 CPUM_VREG_EMIT(26),
166 CPUM_VREG_EMIT(27),
167 CPUM_VREG_EMIT(28),
168 CPUM_VREG_EMIT(29),
169 CPUM_VREG_EMIT(30),
170 CPUM_VREG_EMIT(31)
171#undef CPUM_VREG_EMIT
172};
173/** System registers. */
174static const struct
175{
176 hv_sys_reg_t enmHvReg;
177 uint32_t fCpumExtrn;
178 uint32_t offCpumCtx;
179} s_aCpumSysRegs[] =
180{
181 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
182 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
183 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
184 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
185 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
186 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
187 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
188 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
189};
190
191
192/*********************************************************************************************************************************
193* Internal Functions *
194*********************************************************************************************************************************/
195
196
197/**
198 * Converts a HV return code to a VBox status code.
199 *
200 * @returns VBox status code.
201 * @param hrc The HV return code to convert.
202 */
203DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
204{
205 if (hrc == HV_SUCCESS)
206 return VINF_SUCCESS;
207
208 switch (hrc)
209 {
210 case HV_ERROR: return VERR_INVALID_STATE;
211 case HV_BUSY: return VERR_RESOURCE_BUSY;
212 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
213 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
214 case HV_NO_DEVICE: return VERR_NOT_FOUND;
215 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
216 }
217
218 return VERR_IPE_UNEXPECTED_STATUS;
219}
220
221
222/**
223 * Returns a human readable string of the given exception class.
224 *
225 * @returns Pointer to the string matching the given EC.
226 * @param u32Ec The exception class to return the string for.
227 */
228static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
229{
230 switch (u32Ec)
231 {
232#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
233 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
234 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
235 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
236 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
272 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
273 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
274 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
275 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
276 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
277 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
278#undef ARMV8_EC_CASE
279 default:
280 break;
281 }
282
283 return "<INVALID>";
284}
285
286
287/**
288 * Resolves a NEM page state from the given protection flags.
289 *
290 * @returns NEM page state.
291 * @param fPageProt The page protection flags.
292 */
293DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
294{
295 switch (fPageProt)
296 {
297 case NEM_PAGE_PROT_NONE:
298 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
299 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
300 return NEM_DARWIN_PAGE_STATE_RX;
301 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
302 return NEM_DARWIN_PAGE_STATE_RW;
303 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
304 return NEM_DARWIN_PAGE_STATE_RWX;
305 default:
306 break;
307 }
308
309 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
310 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
311}
312
313
314/**
315 * Unmaps the given guest physical address range (page aligned).
316 *
317 * @returns VBox status code.
318 * @param pVM The cross context VM structure.
319 * @param GCPhys The guest physical address to start unmapping at.
320 * @param cb The size of the range to unmap in bytes.
321 * @param pu2State Where to store the new state of the unmappd page, optional.
322 */
323DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
324{
325 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
326 {
327 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
328 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
329 return VINF_SUCCESS;
330 }
331
332 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
333 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
334 if (RT_LIKELY(hrc == HV_SUCCESS))
335 {
336 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
337 if (pu2State)
338 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
339 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
340 return VINF_SUCCESS;
341 }
342
343 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
344 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
345 GCPhys, hrc));
346 return VERR_NEM_IPE_6;
347}
348
349
350/**
351 * Maps a given guest physical address range backed by the given memory with the given
352 * protection flags.
353 *
354 * @returns VBox status code.
355 * @param pVM The cross context VM structure.
356 * @param GCPhys The guest physical address to start mapping.
357 * @param pvRam The R3 pointer of the memory to back the range with.
358 * @param cb The size of the range, page aligned.
359 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
360 * @param pu2State Where to store the state for the new page, optional.
361 */
362DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
363{
364 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
365
366 Assert(fPageProt != NEM_PAGE_PROT_NONE);
367 RT_NOREF(pVM);
368
369 hv_memory_flags_t fHvMemProt = 0;
370 if (fPageProt & NEM_PAGE_PROT_READ)
371 fHvMemProt |= HV_MEMORY_READ;
372 if (fPageProt & NEM_PAGE_PROT_WRITE)
373 fHvMemProt |= HV_MEMORY_WRITE;
374 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
375 fHvMemProt |= HV_MEMORY_EXEC;
376
377 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
378 if (hrc == HV_SUCCESS)
379 {
380 if (pu2State)
381 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
382 return VINF_SUCCESS;
383 }
384
385 return nemR3DarwinHvSts2Rc(hrc);
386}
387
388#if 0 /* unused */
389DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
390{
391 hv_memory_flags_t fHvMemProt = 0;
392 if (fPageProt & NEM_PAGE_PROT_READ)
393 fHvMemProt |= HV_MEMORY_READ;
394 if (fPageProt & NEM_PAGE_PROT_WRITE)
395 fHvMemProt |= HV_MEMORY_WRITE;
396 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
397 fHvMemProt |= HV_MEMORY_EXEC;
398
399 hv_return_t hrc;
400 if (pVM->nem.s.fCreatedAsid)
401 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
402 else
403 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
404
405 return nemR3DarwinHvSts2Rc(hrc);
406}
407#endif
408
409#ifdef LOG_ENABLED
410/**
411 * Logs the current CPU state.
412 */
413static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
414{
415 if (LogIs3Enabled())
416 {
417 char szRegs[4096];
418 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
419 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
420 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
421 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
422 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
423 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
424 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
425 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
426 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
427 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
428 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
429 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
430 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
431 );
432 char szInstr[256]; RT_ZERO(szInstr);
433#if 0
434 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
435 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
436 szInstr, sizeof(szInstr), NULL);
437#endif
438 Log3(("%s%s\n", szRegs, szInstr));
439 }
440}
441#endif /* LOG_ENABLED */
442
443
444static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
445{
446 RT_NOREF(pVM);
447
448 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
449 if (hrc == HV_SUCCESS)
450 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
451
452 if ( hrc == HV_SUCCESS
453 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
454 {
455 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
456 {
457 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
458 {
459 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
460 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
461 }
462 }
463 }
464
465 if ( hrc == HV_SUCCESS
466 && (fWhat & CPUMCTX_EXTRN_V0_V31))
467 {
468 /* SIMD/FP registers. */
469 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
470 {
471 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
472 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
473 }
474 }
475
476 if ( hrc == HV_SUCCESS
477 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR)))
478 {
479 /* System registers. */
480 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
481 {
482 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
483 {
484 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
485 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
486 }
487 }
488 }
489
490 if ( hrc == HV_SUCCESS
491 && (fWhat & CPUMCTX_EXTRN_PSTATE))
492 {
493 uint64_t u64Tmp;
494 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
495 if (hrc == HV_SUCCESS)
496 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
497 }
498
499 /* Almost done, just update extern flags. */
500 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
501 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
502 pVCpu->cpum.GstCtx.fExtrn = 0;
503
504 return nemR3DarwinHvSts2Rc(hrc);
505}
506
507
508/**
509 * Exports the guest state to HV for execution.
510 *
511 * @returns VBox status code.
512 * @param pVM The cross context VM structure.
513 * @param pVCpu The cross context virtual CPU structure of the
514 * calling EMT.
515 */
516static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
517{
518 RT_NOREF(pVM);
519 hv_return_t hrc = HV_SUCCESS;
520
521 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
522 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
523 {
524 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
525 {
526 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
527 {
528 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
529 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
530 }
531 }
532 }
533
534 if ( hrc == HV_SUCCESS
535 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
536 {
537 /* SIMD/FP registers. */
538 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
539 {
540 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
541 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
542 }
543 }
544
545 if ( hrc == HV_SUCCESS
546 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
547 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
548 {
549 /* System registers. */
550 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
551 {
552 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
553 {
554 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
555 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
556 }
557 }
558 }
559
560 if ( hrc == HV_SUCCESS
561 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
562 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
563
564 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
565 return nemR3DarwinHvSts2Rc(hrc);
566}
567
568
569/**
570 * Try initialize the native API.
571 *
572 * This may only do part of the job, more can be done in
573 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
574 *
575 * @returns VBox status code.
576 * @param pVM The cross context VM structure.
577 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
578 * the latter we'll fail if we cannot initialize.
579 * @param fForced Whether the HMForced flag is set and we should
580 * fail if we cannot initialize.
581 */
582int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
583{
584 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
585
586 /*
587 * Some state init.
588 */
589 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
590 RT_NOREF(pCfgNem);
591
592 /*
593 * Error state.
594 * The error message will be non-empty on failure and 'rc' will be set too.
595 */
596 RTERRINFOSTATIC ErrInfo;
597 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
598
599 int rc = VINF_SUCCESS;
600 hv_return_t hrc = hv_vm_create(NULL);
601 if (hrc == HV_SUCCESS)
602 {
603 pVM->nem.s.fCreatedVm = true;
604 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
605 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
606 Log(("NEM: Marked active!\n"));
607 PGMR3EnableNemMode(pVM);
608 }
609 else
610 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
611 "hv_vm_create() failed: %#x", hrc);
612
613 /*
614 * We only fail if in forced mode, otherwise just log the complaint and return.
615 */
616 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
617 if ( (fForced || !fFallback)
618 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
619 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
620
621if (RTErrInfoIsSet(pErrInfo))
622 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
629 *
630 * @returns VBox status code
631 * @param pVM The VM handle.
632 * @param pVCpu The vCPU handle.
633 * @param idCpu ID of the CPU to create.
634 */
635static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
636{
637 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
638 if (hrc != HV_SUCCESS)
639 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
640 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
641
642 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
643 pVCpu->nem.s.u64VTimerOff = 0;
644
645 if (idCpu == 0)
646 {
647 /** @todo */
648 }
649
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
656 *
657 * @returns VBox status code
658 * @param pVCpu The vCPU handle.
659 */
660static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
661{
662 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
663 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
664 return VINF_SUCCESS;
665}
666
667
668/**
669 * This is called after CPUMR3Init is done.
670 *
671 * @returns VBox status code.
672 * @param pVM The VM handle..
673 */
674int nemR3NativeInitAfterCPUM(PVM pVM)
675{
676 /*
677 * Validate sanity.
678 */
679 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
680 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
681
682 /*
683 * Setup the EMTs.
684 */
685 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
686 {
687 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
688
689 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
690 if (RT_FAILURE(rc))
691 {
692 /* Rollback. */
693 while (idCpu--)
694 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
695
696 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
697 }
698 }
699
700 pVM->nem.s.fCreatedEmts = true;
701 return VINF_SUCCESS;
702}
703
704
705int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
706{
707 RT_NOREF(pVM, enmWhat);
708 return VINF_SUCCESS;
709}
710
711
712int nemR3NativeTerm(PVM pVM)
713{
714 /*
715 * Delete the VM.
716 */
717
718 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
719 {
720 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
721
722 /*
723 * Apple's documentation states that the vCPU should be destroyed
724 * on the thread running the vCPU but as all the other EMTs are gone
725 * at this point, destroying the VM would hang.
726 *
727 * We seem to be at luck here though as destroying apparently works
728 * from EMT(0) as well.
729 */
730 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
731 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
732 }
733
734 pVM->nem.s.fCreatedEmts = false;
735 if (pVM->nem.s.fCreatedVm)
736 {
737 hv_return_t hrc = hv_vm_destroy();
738 if (hrc != HV_SUCCESS)
739 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
740
741 pVM->nem.s.fCreatedVm = false;
742 }
743 return VINF_SUCCESS;
744}
745
746
747/**
748 * VM reset notification.
749 *
750 * @param pVM The cross context VM structure.
751 */
752void nemR3NativeReset(PVM pVM)
753{
754 RT_NOREF(pVM);
755}
756
757
758/**
759 * Reset CPU due to INIT IPI or hot (un)plugging.
760 *
761 * @param pVCpu The cross context virtual CPU structure of the CPU being
762 * reset.
763 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
764 */
765void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
766{
767 RT_NOREF(pVCpu, fInitIpi);
768}
769
770
771/**
772 * Returns the byte size from the given access SAS value.
773 *
774 * @returns Number of bytes to transfer.
775 * @param uSas The SAS value to convert.
776 */
777DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
778{
779 switch (uSas)
780 {
781 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
782 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
783 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
784 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
785 default:
786 AssertReleaseFailed();
787 }
788
789 return 0;
790}
791
792
793/**
794 * Sets the given general purpose register to the given value.
795 *
796 * @param pVCpu The cross context virtual CPU structure of the
797 * calling EMT.
798 * @param uReg The register index.
799 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
800 * @param fSignExtend Flag whether to sign extend the value.
801 * @param u64Val The value.
802 */
803DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
804{
805 AssertReturnVoid(uReg < 31);
806
807 if (f64BitReg)
808 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
809 else
810 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
811
812 /* Mark the register as not extern anymore. */
813 switch (uReg)
814 {
815 case 0:
816 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
817 break;
818 case 1:
819 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
820 break;
821 case 2:
822 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
823 break;
824 case 3:
825 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
826 break;
827 default:
828 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
829 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
830 }
831}
832
833
834/**
835 * Gets the given general purpose register and returns the value.
836 *
837 * @returns Value from the given register.
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling EMT.
840 * @param uReg The register index.
841 */
842DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
843{
844 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
845
846 if (uReg == ARMV8_AARCH64_REG_ZR)
847 return 0;
848
849 /** @todo Import the register if extern. */
850 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
851
852 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
853}
854
855
856/**
857 * Works on the data abort exception (which will be a MMIO access most of the time).
858 *
859 * @returns VBox strict status code.
860 * @param pVM The cross context VM structure.
861 * @param pVCpu The cross context virtual CPU structure of the
862 * calling EMT.
863 * @param uIss The instruction specific syndrome value.
864 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
865 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
866 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
867 */
868static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
869 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
870{
871 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
872 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
873 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
874 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
875 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
876 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
877 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
878 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
879 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
880 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
881
882 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
883
884 EMHistoryAddExit(pVCpu,
885 fWrite
886 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
887 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
888 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
889
890 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
891 uint64_t u64Val = 0;
892 if (fWrite)
893 {
894 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
895 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
896 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
897 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
898 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
899 }
900 else
901 {
902 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
903 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
904 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
905 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
906 if (rcStrict == VINF_SUCCESS)
907 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
908 }
909
910 if (rcStrict == VINF_SUCCESS)
911 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
912
913 return rcStrict;
914}
915
916
917/**
918 * Works on the trapped MRS, MSR and system instruction exception.
919 *
920 * @returns VBox strict status code.
921 * @param pVM The cross context VM structure.
922 * @param pVCpu The cross context virtual CPU structure of the
923 * calling EMT.
924 * @param uIss The instruction specific syndrome value.
925 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
926 */
927static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
928{
929 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
930 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
931 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
932 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
933 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
934 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
935 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
936 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
937 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
938 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
939
940 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
941 EMHistoryAddExit(pVCpu,
942 fRead
943 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
944 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
945 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
946
947 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
948 uint64_t u64Val = 0;
949 if (fRead)
950 {
951 RT_NOREF(pVM);
952 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
953 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
954 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
955 VBOXSTRICTRC_VAL(rcStrict) ));
956 if (rcStrict == VINF_SUCCESS)
957 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
958 }
959 else
960 {
961 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
962 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
963 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
964 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
965 VBOXSTRICTRC_VAL(rcStrict) ));
966 }
967
968 if (rcStrict == VINF_SUCCESS)
969 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
970
971 return rcStrict;
972}
973
974
975/**
976 * Works on the trapped HVC instruction exception.
977 *
978 * @returns VBox strict status code.
979 * @param pVM The cross context VM structure.
980 * @param pVCpu The cross context virtual CPU structure of the
981 * calling EMT.
982 * @param uIss The instruction specific syndrome value.
983 */
984static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
985{
986 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
987 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
988
989#if 0 /** @todo For later */
990 EMHistoryAddExit(pVCpu,
991 fRead
992 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
993 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
994 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
995#endif
996
997 RT_NOREF(pVM);
998 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
999 /** @todo Raise exception to EL1 if PSCI not configured. */
1000 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. Always return -1 for now (PSCI). */
1001 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)-1);
1002
1003 return rcStrict;
1004}
1005
1006
1007/**
1008 * Handles an exception VM exit.
1009 *
1010 * @returns VBox strict status code.
1011 * @param pVM The cross context VM structure.
1012 * @param pVCpu The cross context virtual CPU structure of the
1013 * calling EMT.
1014 * @param pExit Pointer to the exit information.
1015 */
1016static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1017{
1018 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1019 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1020 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1021
1022 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1023 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1024
1025 switch (uEc)
1026 {
1027 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1028 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1029 pExit->exception.physical_address);
1030 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1031 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1032 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1033 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1034 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1035 {
1036 /* No need to halt if there is an interrupt pending already. */
1037 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1038 return VINF_SUCCESS;
1039
1040 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1041 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1042 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1043 {
1044 uint64_t cTicksVTimer = ASMReadTSC() - pVCpu->nem.s.u64VTimerOff;
1045
1046 /* Check whether it expired and start executing guest code. */
1047 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1048 return VINF_SUCCESS;
1049
1050 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1051 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1052
1053 /*
1054 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1055 * + scheduling overhead which would increase the wakeup latency.
1056 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1057 * between CPU load when the guest is idle and performance).
1058 */
1059 if (cNanoSecsVTimerToExpire < 5 * RT_NS_1MS)
1060 return VINF_SUCCESS;
1061
1062 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1063 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVCpu->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1064 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1065 }
1066 else
1067 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1068
1069 return VINF_EM_HALT;
1070 }
1071 case ARMV8_ESR_EL2_EC_UNKNOWN:
1072 default:
1073 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1074 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1075 AssertReleaseFailed();
1076 return VERR_NOT_IMPLEMENTED;
1077 }
1078
1079 return VINF_SUCCESS;
1080}
1081
1082
1083/**
1084 * Handles an exit from hv_vcpu_run().
1085 *
1086 * @returns VBox strict status code.
1087 * @param pVM The cross context VM structure.
1088 * @param pVCpu The cross context virtual CPU structure of the
1089 * calling EMT.
1090 */
1091static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1092{
1093 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1094 if (RT_FAILURE(rc))
1095 return rc;
1096
1097#ifdef LOG_ENABLED
1098 if (LogIs3Enabled())
1099 nemR3DarwinLogState(pVM, pVCpu);
1100#endif
1101
1102 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1103 switch (pExit->reason)
1104 {
1105 case HV_EXIT_REASON_CANCELED:
1106 return VINF_EM_RAW_INTERRUPT;
1107 case HV_EXIT_REASON_EXCEPTION:
1108 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1109 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1110 {
1111 LogFlowFunc(("vTimer got activated\n"));
1112 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1113 pVCpu->nem.s.fVTimerActivated = true;
1114 return GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, true /*fAsserted*/);
1115 }
1116 default:
1117 AssertReleaseFailed();
1118 break;
1119 }
1120
1121 return VERR_INVALID_STATE;
1122}
1123
1124
1125/**
1126 * Runs the guest once until an exit occurs.
1127 *
1128 * @returns HV status code.
1129 * @param pVM The cross context VM structure.
1130 * @param pVCpu The cross context virtual CPU structure.
1131 */
1132static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1133{
1134 TMNotifyStartOfExecution(pVM, pVCpu);
1135
1136 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1137
1138 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1139
1140 return hrc;
1141}
1142
1143
1144/**
1145 * Prepares the VM to run the guest.
1146 *
1147 * @returns Strict VBox status code.
1148 * @param pVM The cross context VM structure.
1149 * @param pVCpu The cross context virtual CPU structure.
1150 * @param fSingleStepping Flag whether we run in single stepping mode.
1151 */
1152static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1153{
1154#ifdef LOG_ENABLED
1155 bool fIrq = false;
1156 bool fFiq = false;
1157
1158 if (LogIs3Enabled())
1159 nemR3DarwinLogState(pVM, pVCpu);
1160#endif
1161
1162 /** @todo */ RT_NOREF(fSingleStepping);
1163 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1164 AssertRCReturn(rc, rc);
1165
1166 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
1167 if (pVCpu->nem.s.fVTimerActivated)
1168 {
1169 /* Read the CNTV_CTL_EL0 register. */
1170 uint64_t u64CntvCtl = 0;
1171
1172 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
1173 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1174
1175 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1176 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1177 {
1178 /* Clear the interrupt. */
1179 GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, false /*fAsserted*/);
1180
1181 pVCpu->nem.s.fVTimerActivated = false;
1182 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
1183 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1184 }
1185 }
1186
1187 /* Set the pending interrupt state. */
1188 hv_return_t hrc = HV_SUCCESS;
1189 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1190 {
1191 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1192 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1193#ifdef LOG_ENABLED
1194 fIrq = true;
1195#endif
1196 }
1197 else
1198 {
1199 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1200 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1201 }
1202
1203 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1204 {
1205 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1206 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1207#ifdef LOG_ENABLED
1208 fFiq = true;
1209#endif
1210 }
1211 else
1212 {
1213 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1214 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1215 }
1216
1217 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1218 pVCpu->nem.s.fEventPending = false;
1219 return VINF_SUCCESS;
1220}
1221
1222
1223/**
1224 * The normal runloop (no debugging features enabled).
1225 *
1226 * @returns Strict VBox status code.
1227 * @param pVM The cross context VM structure.
1228 * @param pVCpu The cross context virtual CPU structure.
1229 */
1230static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1231{
1232 /*
1233 * The run loop.
1234 *
1235 * Current approach to state updating to use the sledgehammer and sync
1236 * everything every time. This will be optimized later.
1237 */
1238
1239 /* Update the vTimer offset after resuming if instructed. */
1240 if (pVCpu->nem.s.fVTimerOffUpdate)
1241 {
1242 /*
1243 * Program the new offset, first get the new TSC value with the old vTimer offset and then adjust the
1244 * the new offset to let the guest not notice the pause.
1245 */
1246 uint64_t u64TscNew = mach_absolute_time() - pVCpu->nem.s.u64VTimerOff;
1247 Assert(u64TscNew >= pVM->nem.s.u64VTimerValuePaused);
1248 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
1249 pVCpu->nem.s.u64VTimerOff, u64TscNew, pVM->nem.s.u64VTimerValuePaused,
1250 pVCpu->nem.s.u64VTimerOff + (u64TscNew - pVM->nem.s.u64VTimerValuePaused)));
1251
1252 pVCpu->nem.s.u64VTimerOff += u64TscNew - pVM->nem.s.u64VTimerValuePaused;
1253 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVCpu->nem.s.u64VTimerOff);
1254 if (hrc != HV_SUCCESS)
1255 return nemR3DarwinHvSts2Rc(hrc);
1256
1257 pVCpu->nem.s.fVTimerOffUpdate = false;
1258 }
1259
1260 /*
1261 * Poll timers and run for a bit.
1262 */
1263 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1264 * the whole polling job when timers have changed... */
1265 uint64_t offDeltaIgnored;
1266 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1267 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1268 for (unsigned iLoop = 0;; iLoop++)
1269 {
1270 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1271 if (rcStrict != VINF_SUCCESS)
1272 break;
1273
1274 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1275 if (hrc == HV_SUCCESS)
1276 {
1277 /*
1278 * Deal with the message.
1279 */
1280 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1281 if (rcStrict == VINF_SUCCESS)
1282 { /* hopefully likely */ }
1283 else
1284 {
1285 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1286 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1287 break;
1288 }
1289 }
1290 else
1291 {
1292 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1293 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1294 }
1295 } /* the run loop */
1296
1297 return rcStrict;
1298}
1299
1300
1301VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1302{
1303#ifdef LOG_ENABLED
1304 if (LogIs3Enabled())
1305 nemR3DarwinLogState(pVM, pVCpu);
1306#endif
1307
1308 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1309
1310 /*
1311 * Try switch to NEM runloop state.
1312 */
1313 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1314 { /* likely */ }
1315 else
1316 {
1317 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1318 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1319 return VINF_SUCCESS;
1320 }
1321
1322 VBOXSTRICTRC rcStrict;
1323#if 0
1324 if ( !pVCpu->nem.s.fUseDebugLoop
1325 && !nemR3DarwinAnyExpensiveProbesEnabled()
1326 && !DBGFIsStepping(pVCpu)
1327 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1328#endif
1329 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1330#if 0
1331 else
1332 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1333#endif
1334
1335 if (rcStrict == VINF_EM_RAW_TO_R3)
1336 rcStrict = VINF_SUCCESS;
1337
1338 /*
1339 * Convert any pending HM events back to TRPM due to premature exits.
1340 *
1341 * This is because execution may continue from IEM and we would need to inject
1342 * the event from there (hence place it back in TRPM).
1343 */
1344 if (pVCpu->nem.s.fEventPending)
1345 {
1346 /** @todo */
1347 }
1348
1349
1350 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1351 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1352
1353 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1354 {
1355 /* Try anticipate what we might need. */
1356 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1357 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1358 || RT_FAILURE(rcStrict))
1359 fImport = CPUMCTX_EXTRN_ALL;
1360 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1361 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1362 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1363
1364 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1365 {
1366 /* Only import what is external currently. */
1367 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1368 if (RT_SUCCESS(rc2))
1369 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1370 else if (RT_SUCCESS(rcStrict))
1371 rcStrict = rc2;
1372 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1373 pVCpu->cpum.GstCtx.fExtrn = 0;
1374 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1375 }
1376 else
1377 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1378 }
1379 else
1380 {
1381 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1382 pVCpu->cpum.GstCtx.fExtrn = 0;
1383 }
1384
1385 return rcStrict;
1386}
1387
1388
1389VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1390{
1391 RT_NOREF(pVM, pVCpu);
1392 return true; /** @todo Are there any cases where we have to emulate? */
1393}
1394
1395
1396bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1397{
1398 VMCPU_ASSERT_EMT(pVCpu);
1399 bool fOld = pVCpu->nem.s.fSingleInstruction;
1400 pVCpu->nem.s.fSingleInstruction = fEnable;
1401 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1402 return fOld;
1403}
1404
1405
1406void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1407{
1408 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1409
1410 RT_NOREF(pVM, fFlags);
1411
1412 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1413 if (hrc != HV_SUCCESS)
1414 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1415}
1416
1417
1418DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1419{
1420 RT_NOREF(pVM, fUseDebugLoop);
1421 AssertReleaseFailed();
1422 return false;
1423}
1424
1425
1426DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1427{
1428 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1429 return fUseDebugLoop;
1430}
1431
1432
1433VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1434 uint8_t *pu2State, uint32_t *puNemRange)
1435{
1436 RT_NOREF(pVM, puNemRange);
1437
1438 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1439#if defined(VBOX_WITH_PGM_NEM_MODE)
1440 if (pvR3)
1441 {
1442 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1443 if (RT_FAILURE(rc))
1444 {
1445 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1446 return VERR_NEM_MAP_PAGES_FAILED;
1447 }
1448 }
1449 return VINF_SUCCESS;
1450#else
1451 RT_NOREF(pVM, GCPhys, cb, pvR3);
1452 return VERR_NEM_MAP_PAGES_FAILED;
1453#endif
1454}
1455
1456
1457VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1458{
1459 RT_NOREF(pVM);
1460 return false;
1461}
1462
1463
1464VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1465 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1466{
1467 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
1468
1469 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1470 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1471
1472#if defined(VBOX_WITH_PGM_NEM_MODE)
1473 /*
1474 * Unmap the RAM we're replacing.
1475 */
1476 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1477 {
1478 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1479 if (RT_SUCCESS(rc))
1480 { /* likely */ }
1481 else if (pvMmio2)
1482 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1483 GCPhys, cb, fFlags, rc));
1484 else
1485 {
1486 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1487 GCPhys, cb, fFlags, rc));
1488 return VERR_NEM_UNMAP_PAGES_FAILED;
1489 }
1490 }
1491
1492 /*
1493 * Map MMIO2 if any.
1494 */
1495 if (pvMmio2)
1496 {
1497 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1498 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1499 if (RT_FAILURE(rc))
1500 {
1501 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1502 GCPhys, cb, fFlags, pvMmio2, rc));
1503 return VERR_NEM_MAP_PAGES_FAILED;
1504 }
1505 }
1506 else
1507 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1508
1509#else
1510 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1511 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1512#endif
1513 return VINF_SUCCESS;
1514}
1515
1516
1517VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1518 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1519{
1520 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1521 return VINF_SUCCESS;
1522}
1523
1524
1525VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1526 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1527{
1528 RT_NOREF(pVM, puNemRange);
1529
1530 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1531 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1532
1533 int rc = VINF_SUCCESS;
1534#if defined(VBOX_WITH_PGM_NEM_MODE)
1535 /*
1536 * Unmap the MMIO2 pages.
1537 */
1538 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1539 * we may have more stuff to unmap even in case of pure MMIO... */
1540 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1541 {
1542 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1543 if (RT_FAILURE(rc))
1544 {
1545 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1546 GCPhys, cb, fFlags, rc));
1547 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1548 }
1549 }
1550
1551 /* Ensure the page is masked as unmapped if relevant. */
1552 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1553
1554 /*
1555 * Restore the RAM we replaced.
1556 */
1557 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1558 {
1559 AssertPtr(pvRam);
1560 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1561 if (RT_SUCCESS(rc))
1562 { /* likely */ }
1563 else
1564 {
1565 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1566 rc = VERR_NEM_MAP_PAGES_FAILED;
1567 }
1568 }
1569
1570 RT_NOREF(pvMmio2);
1571#else
1572 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1573 if (pu2State)
1574 *pu2State = UINT8_MAX;
1575 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1576#endif
1577 return rc;
1578}
1579
1580
1581VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1582 void *pvBitmap, size_t cbBitmap)
1583{
1584 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
1585 AssertReleaseFailed();
1586 return VERR_NOT_IMPLEMENTED;
1587}
1588
1589
1590VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1591 uint8_t *pu2State, uint32_t *puNemRange)
1592{
1593 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1594
1595 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1596 *pu2State = UINT8_MAX;
1597 *puNemRange = 0;
1598 return VINF_SUCCESS;
1599}
1600
1601
1602VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1603 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1604{
1605 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1606 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1607 *pu2State = UINT8_MAX;
1608
1609#if defined(VBOX_WITH_PGM_NEM_MODE)
1610 /*
1611 * (Re-)map readonly.
1612 */
1613 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1614 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1615 if (RT_FAILURE(rc))
1616 {
1617 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1618 GCPhys, cb, pvPages, fFlags, rc));
1619 return VERR_NEM_MAP_PAGES_FAILED;
1620 }
1621 RT_NOREF(fFlags, puNemRange);
1622 return VINF_SUCCESS;
1623#else
1624 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1625 return VERR_NEM_MAP_PAGES_FAILED;
1626#endif
1627}
1628
1629
1630VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1631 RTR3PTR pvMemR3, uint8_t *pu2State)
1632{
1633 RT_NOREF(pVM);
1634
1635 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1636 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1637
1638 *pu2State = UINT8_MAX;
1639#if defined(VBOX_WITH_PGM_NEM_MODE)
1640 if (pvMemR3)
1641 {
1642 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1643 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1644 pvMemR3, GCPhys, cb, rc));
1645 }
1646 RT_NOREF(enmKind);
1647#else
1648 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1649 AssertFailed();
1650#endif
1651}
1652
1653
1654VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1655{
1656 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1657 RT_NOREF(pVCpu, fEnabled);
1658}
1659
1660
1661void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1662{
1663 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1664 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1665}
1666
1667
1668void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1669 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1670{
1671 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1672 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1673 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1674}
1675
1676
1677int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1678 PGMPAGETYPE enmType, uint8_t *pu2State)
1679{
1680 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1681 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1682 RT_NOREF(HCPhys, fPageProt, enmType);
1683
1684 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1685}
1686
1687
1688VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1689 PGMPAGETYPE enmType, uint8_t *pu2State)
1690{
1691 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1692 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1693 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
1694
1695 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1696}
1697
1698
1699VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1700 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1701{
1702 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1703 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1704 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
1705
1706 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1707}
1708
1709
1710/**
1711 * Interface for importing state on demand (used by IEM).
1712 *
1713 * @returns VBox status code.
1714 * @param pVCpu The cross context CPU structure.
1715 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1716 */
1717VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1718{
1719 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1720 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1721
1722 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1723}
1724
1725
1726/**
1727 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1728 *
1729 * @returns VBox status code.
1730 * @param pVCpu The cross context CPU structure.
1731 * @param pcTicks Where to return the CPU tick count.
1732 * @param puAux Where to return the TSC_AUX register value.
1733 */
1734VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1735{
1736 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1737 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1738
1739 if (puAux)
1740 *puAux = 0;
1741 *pcTicks = mach_absolute_time() - pVCpu->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
1742 return VINF_SUCCESS;
1743}
1744
1745
1746/**
1747 * Resumes CPU clock (TSC) on all virtual CPUs.
1748 *
1749 * This is called by TM when the VM is started, restored, resumed or similar.
1750 *
1751 * @returns VBox status code.
1752 * @param pVM The cross context VM structure.
1753 * @param pVCpu The cross context CPU structure of the calling EMT.
1754 * @param uPausedTscValue The TSC value at the time of pausing.
1755 */
1756VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1757{
1758 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
1759 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1760 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1761
1762 pVM->nem.s.u64VTimerValuePaused = uPausedTscValue;
1763
1764 /*
1765 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
1766 * (needs to be done on the actual EMT).
1767 */
1768 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1769 {
1770 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1771 pVCpuDst->nem.s.fVTimerOffUpdate = true;
1772 }
1773
1774 return VINF_SUCCESS;
1775}
1776
1777
1778/**
1779 * Returns features supported by the NEM backend.
1780 *
1781 * @returns Flags of features supported by the native NEM backend.
1782 * @param pVM The cross context VM structure.
1783 */
1784VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1785{
1786 RT_NOREF(pVM);
1787 /*
1788 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1789 * and unrestricted guest execution support so we can safely return these flags here always.
1790 */
1791 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1792}
1793
1794
1795/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1796 *
1797 * @todo Add notes as the implementation progresses...
1798 */
1799
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette