VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 106952

Last change on this file since 106952 was 106952, checked in by vboxsync, 3 months ago

VMM/ARM: Implement configuring the GICv3 emulated by Hyper-V. This API is available starting with Windows build 27744 but there is no public SDK for this yet so we have to resort to defining these structures ourselves, bugref:10392

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 136.7 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 106952 2024-11-12 09:51:44Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/vmm/nem.h>
57#include <VBox/vmm/iem.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/pdm.h>
61#include <VBox/vmm/dbgftrace.h>
62#include "NEMInternal.h"
63#include <VBox/vmm/vmcc.h>
64
65#include <iprt/formats/arm-psci.h>
66
67#include <iprt/ldr.h>
68#include <iprt/path.h>
69#include <iprt/string.h>
70#include <iprt/system.h>
71#include <iprt/utf16.h>
72
73#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
74HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
75# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
76#endif
77
78
79/*
80 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
81 * (there is no official SDK for this yet).
82 */
83/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
84#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
85/** No GIC present. */
86#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
87/** Hyper-V emulates a GICv3. */
88#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
89
90/**
91 * Configures the interrupt controller emulated by Hyper-V.
92 */
93typedef struct MY_WHV_ARM64_IC_PARAMETERS
94{
95 uint32_t u32EmulationMode;
96 uint32_t u32Rsvd;
97 union
98 {
99 struct
100 {
101 RTGCPHYS GCPhysGicdBase;
102 RTGCPHYS GCPhysGitsTranslaterBase;
103 uint32_t u32Rsvd;
104 uint32_t cLpiIntIdBits;
105 uint32_t u32PpiCntvOverflw;
106 uint32_t u32PpiPmu;
107 uint32_t au32Rsvd[6];
108 } GicV3;
109 } u;
110} MY_WHV_ARM64_IC_PARAMETERS;
111AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
112
113
114/**
115 * The hypercall exit context.
116 */
117typedef struct MY_WHV_HYPERCALL_CONTEXT
118{
119 WHV_INTERCEPT_MESSAGE_HEADER Header;
120 uint16_t Immediate;
121 uint16_t u16Rsvd;
122 uint32_t u32Rsvd;
123 uint64_t X[18];
124} MY_WHV_HYPERCALL_CONTEXT;
125typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
126AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
127
128
129/**
130 * The exit reason context for arm64, the size is different
131 * from the default SDK we build against.
132 */
133typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
134{
135 WHV_RUN_VP_EXIT_REASON ExitReason;
136 uint32_t u32Rsvd;
137 uint64_t u64Rsvd;
138 union
139 {
140 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
141 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
142 MY_WHV_HYPERCALL_CONTEXT Hypercall;
143 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
144 uint64_t au64Rsvd2[32];
145 };
146} MY_WHV_RUN_VP_EXIT_CONTEXT;
147typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
148AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
149
150#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
151
152
153/*********************************************************************************************************************************
154* Defined Constants And Macros *
155*********************************************************************************************************************************/
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161/** @name APIs imported from WinHvPlatform.dll
162 * @{ */
163static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
164static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
165static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
166static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
167static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
168static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
169static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
170static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
171static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
172static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
173static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
174static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
175static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
176static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
177static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
178static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
179//static decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
180decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
181/** @} */
182
183/** The Windows build number. */
184static uint32_t g_uBuildNo = 17134;
185
186
187
188/**
189 * Import instructions.
190 */
191static const struct
192{
193 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
194 bool fOptional; /**< Set if import is optional. */
195 PFNRT *ppfn; /**< The function pointer variable. */
196 const char *pszName; /**< The function name. */
197} g_aImports[] =
198{
199#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
200 NEM_WIN_IMPORT(0, false, WHvGetCapability),
201 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
202 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
203 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
204 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
205 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
206 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
207 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
208 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
209 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
210 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
211 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
212 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
213 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
214 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
215 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
216// NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
217 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
218#undef NEM_WIN_IMPORT
219};
220
221
222/*
223 * Let the preprocessor alias the APIs to import variables for better autocompletion.
224 */
225#ifndef IN_SLICKEDIT
226# define WHvGetCapability g_pfnWHvGetCapability
227# define WHvCreatePartition g_pfnWHvCreatePartition
228# define WHvSetupPartition g_pfnWHvSetupPartition
229# define WHvDeletePartition g_pfnWHvDeletePartition
230# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
231# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
232# define WHvMapGpaRange g_pfnWHvMapGpaRange
233# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
234# define WHvTranslateGva g_pfnWHvTranslateGva
235# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
236# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
237# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
238# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
239# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
240# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
241# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
242# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
243//# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
244# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
245
246# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
247# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
248# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
249
250#endif
251
252#if 0 /* unused */
253/** WHV_MEMORY_ACCESS_TYPE names */
254static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
255#endif
256/** NEM_WIN_PAGE_STATE_XXX names. */
257NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
258#ifdef LOG_ENABLED
259/** HV_INTERCEPT_ACCESS_TYPE names. */
260static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
261#endif
262
263
264/*********************************************************************************************************************************
265* Internal Functions *
266*********************************************************************************************************************************/
267DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
268DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
269
270NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
271 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
272
273/**
274 * Worker for nemR3NativeInit that probes and load the native API.
275 *
276 * @returns VBox status code.
277 * @param fForced Whether the HMForced flag is set and we should
278 * fail if we cannot initialize.
279 * @param pErrInfo Where to always return error info.
280 */
281static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
282{
283 /*
284 * Check that the DLL files we need are present, but without loading them.
285 * We'd like to avoid loading them unnecessarily.
286 */
287 WCHAR wszPath[MAX_PATH + 64];
288 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
289 if (cwcPath >= MAX_PATH || cwcPath < 2)
290 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
291
292 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
293 wszPath[cwcPath++] = '\\';
294 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
295 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
296 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
297
298 /*
299 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
300 */
301 /** @todo */
302
303 /** @todo would be great if we could recognize a root partition from the
304 * CPUID info, but I currently don't dare do that. */
305
306 /*
307 * Now try load the DLLs and resolve the APIs.
308 */
309 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
310 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
311 int rc = VINF_SUCCESS;
312 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
313 {
314 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
315 if (RT_FAILURE(rc2))
316 {
317 if (!RTErrInfoIsSet(pErrInfo))
318 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
319 else
320 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
321 ahMods[i] = NIL_RTLDRMOD;
322 rc = VERR_NEM_INIT_FAILED;
323 }
324 }
325 if (RT_SUCCESS(rc))
326 {
327 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
328 {
329 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
330 if (RT_SUCCESS(rc2))
331 {
332 if (g_aImports[i].fOptional)
333 LogRel(("NEM: info: Found optional import %s!%s.\n",
334 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
335 }
336 else
337 {
338 *g_aImports[i].ppfn = NULL;
339
340 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
341 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
342 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
343 if (!g_aImports[i].fOptional)
344 {
345 if (RTErrInfoIsSet(pErrInfo))
346 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
347 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
348 else
349 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
350 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
351 Assert(RT_FAILURE(rc));
352 }
353 }
354 }
355 if (RT_SUCCESS(rc))
356 {
357 Assert(!RTErrInfoIsSet(pErrInfo));
358 }
359 }
360
361 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
362 RTLdrClose(ahMods[i]);
363 return rc;
364}
365
366
367/**
368 * Wrapper for different WHvGetCapability signatures.
369 */
370DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
371{
372 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
373}
374
375
376/**
377 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
378 *
379 * @returns VBox status code.
380 * @param pVM The cross context VM structure.
381 * @param pErrInfo Where to always return error info.
382 */
383static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
384{
385#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
386#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
387#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
388
389 /*
390 * Is the hypervisor present with the desired capability?
391 *
392 * In build 17083 this translates into:
393 * - CPUID[0x00000001].HVP is set
394 * - CPUID[0x40000000] == "Microsoft Hv"
395 * - CPUID[0x40000001].eax == "Hv#1"
396 * - CPUID[0x40000003].ebx[12] is set.
397 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
398 * a non-zero value.
399 */
400 /**
401 * @todo Someone at Microsoft please explain weird API design:
402 * 1. Pointless CapabilityCode duplication int the output;
403 * 2. No output size.
404 */
405 WHV_CAPABILITY Caps;
406 RT_ZERO(Caps);
407 SetLastError(0);
408 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
409 DWORD rcWin = GetLastError();
410 if (FAILED(hrc))
411 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
412 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
413 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
414 if (!Caps.HypervisorPresent)
415 {
416 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
417 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
418 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
419 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
420 }
421 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
422
423
424 /*
425 * Check what extended VM exits are supported.
426 */
427 RT_ZERO(Caps);
428 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
429 if (FAILED(hrc))
430 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
431 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
432 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
433 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
434 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
435 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
436 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
437 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
438 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
439 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
440 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
441
442 /*
443 * Check features in case they end up defining any.
444 */
445 RT_ZERO(Caps);
446 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
447 if (FAILED(hrc))
448 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
449 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
450 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
451 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
452 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
453 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
454
455 /*
456 * Check that the CPU vendor is supported.
457 */
458 RT_ZERO(Caps);
459 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
460 if (FAILED(hrc))
461 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
462 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
463 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
464 switch (Caps.ProcessorVendor)
465 {
466 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
467 case WHvProcessorVendorArm:
468 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
469 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
470 break;
471 default:
472 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
473 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
474 }
475
476 /*
477 * CPU features, guessing these are virtual CPU features?
478 */
479 RT_ZERO(Caps);
480 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
481 if (FAILED(hrc))
482 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
483 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
484 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
485 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
486#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
487 NEM_LOG_REL_CPU_FEATURE(Asid16);
488 NEM_LOG_REL_CPU_FEATURE(TGran16);
489 NEM_LOG_REL_CPU_FEATURE(TGran64);
490 NEM_LOG_REL_CPU_FEATURE(Haf);
491 NEM_LOG_REL_CPU_FEATURE(Hdbs);
492 NEM_LOG_REL_CPU_FEATURE(Pan);
493 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
494 NEM_LOG_REL_CPU_FEATURE(Uao);
495 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
496 NEM_LOG_REL_CPU_FEATURE(Fp);
497 NEM_LOG_REL_CPU_FEATURE(FpHp);
498 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
499 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
500 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
501 NEM_LOG_REL_CPU_FEATURE(GicV41);
502 NEM_LOG_REL_CPU_FEATURE(Ras);
503 NEM_LOG_REL_CPU_FEATURE(PmuV3);
504 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
505 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
506 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
507 NEM_LOG_REL_CPU_FEATURE(Aes);
508 NEM_LOG_REL_CPU_FEATURE(PolyMul);
509 NEM_LOG_REL_CPU_FEATURE(Sha1);
510 NEM_LOG_REL_CPU_FEATURE(Sha256);
511 NEM_LOG_REL_CPU_FEATURE(Sha512);
512 NEM_LOG_REL_CPU_FEATURE(Crc32);
513 NEM_LOG_REL_CPU_FEATURE(Atomic);
514 NEM_LOG_REL_CPU_FEATURE(Rdm);
515 NEM_LOG_REL_CPU_FEATURE(Sha3);
516 NEM_LOG_REL_CPU_FEATURE(Sm3);
517 NEM_LOG_REL_CPU_FEATURE(Sm4);
518 NEM_LOG_REL_CPU_FEATURE(Dp);
519 NEM_LOG_REL_CPU_FEATURE(Fhm);
520 NEM_LOG_REL_CPU_FEATURE(DcCvap);
521 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
522 NEM_LOG_REL_CPU_FEATURE(ApaBase);
523 NEM_LOG_REL_CPU_FEATURE(ApaEp);
524 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
525 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
526 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
527 NEM_LOG_REL_CPU_FEATURE(Jscvt);
528 NEM_LOG_REL_CPU_FEATURE(Fcma);
529 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
530 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
531 NEM_LOG_REL_CPU_FEATURE(Gpa);
532 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
533 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
534
535#undef NEM_LOG_REL_CPU_FEATURE
536 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
537 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
538 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
539 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
540
541 /*
542 * The cache line flush size.
543 */
544 RT_ZERO(Caps);
545 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
546 if (FAILED(hrc))
547 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
548 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
549 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
550 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
551 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
552 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
553 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
554
555 RT_ZERO(Caps);
556 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
557 if (FAILED(hrc))
558 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
559 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
560 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
561 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
562 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
563 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
564 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
565
566
567 /*
568 * See if they've added more properties that we're not aware of.
569 */
570 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
571 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
572 {
573 static const struct
574 {
575 uint32_t iMin, iMax; } s_aUnknowns[] =
576 {
577 { 0x0004, 0x000f },
578 { 0x1003, 0x100f },
579 { 0x2000, 0x200f },
580 { 0x3000, 0x300f },
581 { 0x4000, 0x400f },
582 };
583 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
584 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
585 {
586 RT_ZERO(Caps);
587 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
588 if (SUCCEEDED(hrc))
589 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
590 }
591 }
592
593 /*
594 * For proper operation, we require CPUID exits.
595 */
596 /** @todo Any? */
597
598#undef NEM_LOG_REL_CAP_EX
599#undef NEM_LOG_REL_CAP_SUB_EX
600#undef NEM_LOG_REL_CAP_SUB
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * Initializes the GIC controller emulation provided by Hyper-V.
607 *
608 * @returns VBox status code.
609 * @param pVM The cross context VM structure.
610 *
611 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
612 */
613static int nemR3WinGicCreate(PVM pVM)
614{
615 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
616 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
617
618 /*
619 * Query the MMIO ranges.
620 */
621 RTGCPHYS GCPhysMmioBaseDist = 0;
622 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
623 if (RT_FAILURE(rc))
624 return VMSetError(pVM, rc, RT_SRC_POS,
625 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
626
627 RTGCPHYS GCPhysMmioBaseReDist = 0;
628 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
629 if (RT_FAILURE(rc))
630 return VMSetError(pVM, rc, RT_SRC_POS,
631 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
632
633 RTGCPHYS GCPhysMmioBaseIts = 0;
634 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
635 if (RT_FAILURE(rc))
636 return VMSetError(pVM, rc, RT_SRC_POS,
637 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
638
639 /*
640 * One can only set the GIC distributor base. The re-distributor regions for the individual
641 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
642 */
643 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
644
645 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
646
647 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
648 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
649 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
650 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
651 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
652 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
653 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
654 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
655 if (FAILED(hrc))
656 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
657 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
658 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
659
660 return rc;
661}
662
663
664/**
665 * Creates and sets up a Hyper-V (exo) partition.
666 *
667 * @returns VBox status code.
668 * @param pVM The cross context VM structure.
669 * @param pErrInfo Where to always return error info.
670 */
671static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
672{
673 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
674 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
675
676 /*
677 * Create the partition.
678 */
679 WHV_PARTITION_HANDLE hPartition;
680 HRESULT hrc = WHvCreatePartition(&hPartition);
681 if (FAILED(hrc))
682 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
683 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
684
685 int rc;
686
687 /*
688 * Set partition properties, most importantly the CPU count.
689 */
690 /**
691 * @todo Someone at Microsoft please explain another weird API:
692 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
693 * argument rather than as part of the struct. That is so weird if you've
694 * used any other NT or windows API, including WHvGetCapability().
695 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
696 * technically only need 9 bytes for setting/getting
697 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
698 WHV_PARTITION_PROPERTY Property;
699 RT_ZERO(Property);
700 Property.ProcessorCount = pVM->cCpus;
701 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
702 if (SUCCEEDED(hrc))
703 {
704 RT_ZERO(Property);
705 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
706 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
707 if (SUCCEEDED(hrc))
708 {
709 /*
710 * We'll continue setup in nemR3NativeInitAfterCPUM.
711 */
712 pVM->nem.s.fCreatedEmts = false;
713 pVM->nem.s.hPartition = hPartition;
714 LogRel(("NEM: Created partition %p.\n", hPartition));
715 return VINF_SUCCESS;
716 }
717
718 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
719 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
720 Property.ExtendedVmExits.AsUINT64, hrc);
721 }
722 else
723 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
724 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
725 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
726 WHvDeletePartition(hPartition);
727
728 Assert(!pVM->nem.s.hPartitionDevice);
729 Assert(!pVM->nem.s.hPartition);
730 return rc;
731}
732
733
734static int nemR3NativeInitSetupVm(PVM pVM)
735{
736 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
737 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
738 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
739 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
740
741 /*
742 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
743 */
744 WHV_PARTITION_PROPERTY Property;
745 HRESULT hrc;
746
747#if 0
748 /* Not sure if we really need to set the vendor.
749 Update: Apparently we don't. WHvPartitionPropertyCodeProcessorVendor was removed in 17110. */
750 RT_ZERO(Property);
751 Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd
752 : WHvProcessorVendorIntel;
753 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property));
754 if (FAILED(hrc))
755 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
756 "Failed to set WHvPartitionPropertyCodeProcessorVendor to %u: %Rhrc (Last=%#x/%u)",
757 Property.ProcessorVendor, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
758#endif
759
760 /* Not sure if we really need to set the cache line flush size. */
761 RT_ZERO(Property);
762 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
763 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
764 if (FAILED(hrc))
765 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
766 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
767 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
768
769 /*
770 * Sync CPU features with CPUM.
771 */
772 /** @todo sync CPU features with CPUM. */
773
774 /* Set the partition property. */
775 RT_ZERO(Property);
776 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
777 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
778 if (FAILED(hrc))
779 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
780 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
781 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
782
783 /* Configure the GIC. */
784 int rc = nemR3WinGicCreate(pVM);
785 if (RT_FAILURE(rc))
786 return rc;
787
788 /*
789 * Set up the partition.
790 *
791 * Seems like this is where the partition is actually instantiated and we get
792 * a handle to it.
793 */
794 hrc = WHvSetupPartition(hPartition);
795 if (FAILED(hrc))
796 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
797 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
798 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
799
800 /*
801 * Setup the EMTs.
802 */
803 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
804 {
805 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
806 if (FAILED(hrc))
807 {
808 NTSTATUS const rcNtLast = RTNtLastStatusValue();
809 DWORD const dwErrLast = RTNtLastErrorValue();
810 while (idCpu-- > 0)
811 {
812 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
813 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
814 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
815 RTNtLastErrorValue()));
816 }
817 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
818 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
819 }
820
821 if (idCpu == 0)
822 {
823 /* Need to query the ID registers and populate CPUM. */
824 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
825
826#if 0
827 WHV_REGISTER_NAME aenmNames[12];
828 WHV_REGISTER_VALUE aValues[12];
829 RT_ZERO(aValues);
830
831 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
832 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
833 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
834 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
835 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
836 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
837 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
838 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
839 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
840 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
841 aenmNames[10] = WHvArm64RegisterCtrEl0;
842 aenmNames[11] = WHvArm64RegisterDczidEl0;
843
844 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
845 AssertLogRelMsgReturn(SUCCEEDED(hrc),
846 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
847 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
848 , VERR_NEM_GET_REGISTERS_FAILED);
849
850 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
851 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
852 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
853 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
854 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
855 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
856 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
857 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
858 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
859 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
860 IdRegs.u64RegCtrEl0 = aValues[10].Reg64;
861 IdRegs.u64RegDczidEl0 = aValues[11].Reg64;
862#else
863 switch (pVM->nem.s.cPhysicalAddressWidth)
864 {
865 case 32: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_32BITS); break;
866 case 36: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_36BITS); break;
867 case 40: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_40BITS); break;
868 case 42: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_42BITS); break;
869 case 44: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_44BITS); break;
870 case 48: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_48BITS); break;
871 case 52: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_52BITS); break;
872 default: AssertReleaseFailed(); break;
873 }
874#endif
875
876 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
877 if (RT_FAILURE(rc))
878 return rc;
879 }
880
881 /* Configure the GIC re-distributor region for the GIC. */
882 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
883 WHV_REGISTER_VALUE Value;
884 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
885
886 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
887 AssertLogRelMsgReturn(SUCCEEDED(hrc),
888 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
889 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
890 , VERR_NEM_SET_REGISTERS_FAILED);
891 }
892
893 pVM->nem.s.fCreatedEmts = true;
894
895 LogRel(("NEM: Successfully set up partition\n"));
896 return VINF_SUCCESS;
897}
898
899
900/**
901 * Try initialize the native API.
902 *
903 * This may only do part of the job, more can be done in
904 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
905 *
906 * @returns VBox status code.
907 * @param pVM The cross context VM structure.
908 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
909 * the latter we'll fail if we cannot initialize.
910 * @param fForced Whether the HMForced flag is set and we should
911 * fail if we cannot initialize.
912 */
913int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
914{
915 g_uBuildNo = RTSystemGetNtBuildNo();
916
917 /*
918 * Error state.
919 * The error message will be non-empty on failure and 'rc' will be set too.
920 */
921 RTERRINFOSTATIC ErrInfo;
922 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
923 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
924 if (RT_SUCCESS(rc))
925 {
926 /*
927 * Check the capabilties of the hypervisor, starting with whether it's present.
928 */
929 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
930 if (RT_SUCCESS(rc))
931 {
932 /*
933 * Create and initialize a partition.
934 */
935 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
936 if (RT_SUCCESS(rc))
937 {
938 rc = nemR3NativeInitSetupVm(pVM);
939 if (RT_SUCCESS(rc))
940 {
941 /*
942 * Set ourselves as the execution engine and make config adjustments.
943 */
944 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
945 Log(("NEM: Marked active!\n"));
946 PGMR3EnableNemMode(pVM);
947
948 /*
949 * Register release statistics
950 */
951 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
952 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
953 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
954 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
955 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
956 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
957 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
958 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
959 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
960 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
961 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
962 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
963 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
964 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
965 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
966 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
967 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
968 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
969
970 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
971 {
972 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
973 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
974 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
975 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
976 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
977 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
978 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
979 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
980 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
981 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
982 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
983 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
984 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
985 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
986 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
987 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
988 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
989 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
990 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
991 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
992 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
993 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
994 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
995 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
996 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
997 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
998 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
999 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1000 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1001 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1002 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1003 }
1004
1005 if (!SUPR3IsDriverless())
1006 {
1007 PUVM pUVM = pVM->pUVM;
1008 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1009 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1010 "/NEM/R0Stats/cPagesAvailable");
1011 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1012 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1013 "/NEM/R0Stats/cPagesInUse");
1014 }
1015 }
1016
1017 }
1018
1019 }
1020 }
1021
1022 /*
1023 * We only fail if in forced mode, otherwise just log the complaint and return.
1024 */
1025 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1026 if ( (fForced || !fFallback)
1027 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1028 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1029
1030 if (RTErrInfoIsSet(pErrInfo))
1031 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1032 return VINF_SUCCESS;
1033}
1034
1035
1036/**
1037 * This is called after CPUMR3Init is done.
1038 *
1039 * @returns VBox status code.
1040 * @param pVM The VM handle..
1041 */
1042int nemR3NativeInitAfterCPUM(PVM pVM)
1043{
1044 /*
1045 * Validate sanity.
1046 */
1047 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1048
1049 /** @todo */
1050
1051 /*
1052 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1053 */
1054 /** @todo stats */
1055
1056 /*
1057 * Adjust features.
1058 *
1059 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1060 * the first init call.
1061 */
1062
1063 return VINF_SUCCESS;
1064}
1065
1066
1067int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1068{
1069 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1070 //AssertLogRel(fRet);
1071
1072 NOREF(pVM); NOREF(enmWhat);
1073 return VINF_SUCCESS;
1074}
1075
1076
1077int nemR3NativeTerm(PVM pVM)
1078{
1079 /*
1080 * Delete the partition.
1081 */
1082 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1083 pVM->nem.s.hPartition = NULL;
1084 pVM->nem.s.hPartitionDevice = NULL;
1085 if (hPartition != NULL)
1086 {
1087 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1088 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1089 while (idCpu-- > 0)
1090 {
1091 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1092 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1093 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1094 RTNtLastErrorValue()));
1095 }
1096 WHvDeletePartition(hPartition);
1097 }
1098 pVM->nem.s.fCreatedEmts = false;
1099 return VINF_SUCCESS;
1100}
1101
1102
1103/**
1104 * VM reset notification.
1105 *
1106 * @param pVM The cross context VM structure.
1107 */
1108void nemR3NativeReset(PVM pVM)
1109{
1110 RT_NOREF(pVM);
1111}
1112
1113
1114/**
1115 * Reset CPU due to INIT IPI or hot (un)plugging.
1116 *
1117 * @param pVCpu The cross context virtual CPU structure of the CPU being
1118 * reset.
1119 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1120 */
1121void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1122{
1123 RT_NOREF(pVCpu, fInitIpi);
1124}
1125
1126
1127NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1128{
1129 WHV_REGISTER_NAME aenmNames[128];
1130 WHV_REGISTER_VALUE aValues[128];
1131
1132 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1133 if (!fWhat)
1134 return VINF_SUCCESS;
1135 uintptr_t iReg = 0;
1136
1137#define ADD_REG64(a_enmName, a_uValue) do { \
1138 aenmNames[iReg] = (a_enmName); \
1139 aValues[iReg].Reg128.High64 = 0; \
1140 aValues[iReg].Reg64 = (a_uValue).x; \
1141 iReg++; \
1142 } while (0)
1143#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1144 aenmNames[iReg] = (a_enmName); \
1145 aValues[iReg].Reg128.High64 = 0; \
1146 aValues[iReg].Reg64 = (a_uValue); \
1147 iReg++; \
1148 } while (0)
1149#define ADD_REG128(a_enmName, a_uValue) do { \
1150 aenmNames[iReg] = (a_enmName); \
1151 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1152 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1153 iReg++; \
1154 } while (0)
1155
1156 /* GPRs */
1157 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1158 {
1159 if (fWhat & CPUMCTX_EXTRN_X0)
1160 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1161 if (fWhat & CPUMCTX_EXTRN_X1)
1162 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1163 if (fWhat & CPUMCTX_EXTRN_X2)
1164 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1165 if (fWhat & CPUMCTX_EXTRN_X3)
1166 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1167 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1168 {
1169 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1170 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1171 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1172 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1173 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1174 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1175 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1176 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1177 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1178 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1179 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1180 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1181 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1182 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1183 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1184 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1185 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1186 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1187 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1188 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1189 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1190 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1191 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1192 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1193 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1194 }
1195 if (fWhat & CPUMCTX_EXTRN_LR)
1196 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1197 if (fWhat & CPUMCTX_EXTRN_FP)
1198 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1199 }
1200
1201 /* RIP & Flags */
1202 if (fWhat & CPUMCTX_EXTRN_PC)
1203 ADD_REG64_RAW(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc.u64);
1204 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1205 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1206
1207 /* Vector state. */
1208 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1209 {
1210 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1211 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1212 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1213 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1214 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1215 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1216 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1217 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1218 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1219 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1220 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1221 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1222 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1223 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1224 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1225 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1226 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1227 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1228 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1229 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1230 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1231 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1232 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1233 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1234 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1235 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1236 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1237 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1238 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1239 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1240 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1241 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1242 }
1243
1244#undef ADD_REG64
1245#undef ADD_REG64_RAW
1246#undef ADD_REG128
1247
1248 /*
1249 * Set the registers.
1250 */
1251 Assert(iReg < RT_ELEMENTS(aValues));
1252 Assert(iReg < RT_ELEMENTS(aenmNames));
1253 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1254 if (SUCCEEDED(hrc))
1255 {
1256 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1257 return VINF_SUCCESS;
1258 }
1259 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1260 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1261 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1262 return VERR_INTERNAL_ERROR;
1263}
1264
1265
1266NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1267{
1268 WHV_REGISTER_NAME aenmNames[128];
1269
1270 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1271 if (!fWhat)
1272 return VINF_SUCCESS;
1273
1274 uintptr_t iReg = 0;
1275
1276 /* GPRs */
1277 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1278 {
1279 if (fWhat & CPUMCTX_EXTRN_X0)
1280 aenmNames[iReg++] = WHvArm64RegisterX0;
1281 if (fWhat & CPUMCTX_EXTRN_X1)
1282 aenmNames[iReg++] = WHvArm64RegisterX1;
1283 if (fWhat & CPUMCTX_EXTRN_X2)
1284 aenmNames[iReg++] = WHvArm64RegisterX2;
1285 if (fWhat & CPUMCTX_EXTRN_X3)
1286 aenmNames[iReg++] = WHvArm64RegisterX3;
1287 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1288 {
1289 aenmNames[iReg++] = WHvArm64RegisterX4;
1290 aenmNames[iReg++] = WHvArm64RegisterX5;
1291 aenmNames[iReg++] = WHvArm64RegisterX6;
1292 aenmNames[iReg++] = WHvArm64RegisterX7;
1293 aenmNames[iReg++] = WHvArm64RegisterX8;
1294 aenmNames[iReg++] = WHvArm64RegisterX9;
1295 aenmNames[iReg++] = WHvArm64RegisterX10;
1296 aenmNames[iReg++] = WHvArm64RegisterX11;
1297 aenmNames[iReg++] = WHvArm64RegisterX12;
1298 aenmNames[iReg++] = WHvArm64RegisterX13;
1299 aenmNames[iReg++] = WHvArm64RegisterX14;
1300 aenmNames[iReg++] = WHvArm64RegisterX15;
1301 aenmNames[iReg++] = WHvArm64RegisterX16;
1302 aenmNames[iReg++] = WHvArm64RegisterX17;
1303 aenmNames[iReg++] = WHvArm64RegisterX18;
1304 aenmNames[iReg++] = WHvArm64RegisterX19;
1305 aenmNames[iReg++] = WHvArm64RegisterX20;
1306 aenmNames[iReg++] = WHvArm64RegisterX21;
1307 aenmNames[iReg++] = WHvArm64RegisterX22;
1308 aenmNames[iReg++] = WHvArm64RegisterX23;
1309 aenmNames[iReg++] = WHvArm64RegisterX24;
1310 aenmNames[iReg++] = WHvArm64RegisterX25;
1311 aenmNames[iReg++] = WHvArm64RegisterX26;
1312 aenmNames[iReg++] = WHvArm64RegisterX27;
1313 aenmNames[iReg++] = WHvArm64RegisterX28;
1314 }
1315 if (fWhat & CPUMCTX_EXTRN_LR)
1316 aenmNames[iReg++] = WHvArm64RegisterLr;
1317 if (fWhat & CPUMCTX_EXTRN_FP)
1318 aenmNames[iReg++] = WHvArm64RegisterFp;
1319 }
1320
1321 /* PC & Flags */
1322 if (fWhat & CPUMCTX_EXTRN_PC)
1323 aenmNames[iReg++] = WHvArm64RegisterPc;
1324 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1325 aenmNames[iReg++] = WHvArm64RegisterPstate;
1326 if (fWhat & CPUMCTX_EXTRN_SPSR)
1327 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1328 if (fWhat & CPUMCTX_EXTRN_ELR)
1329 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1330 if (fWhat & CPUMCTX_EXTRN_SP)
1331 {
1332 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1333 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1334 }
1335 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1336 {
1337 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1338 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1339 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1340 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1341 }
1342
1343 /* Vector state. */
1344 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1345 {
1346 aenmNames[iReg++] = WHvArm64RegisterQ0;
1347 aenmNames[iReg++] = WHvArm64RegisterQ1;
1348 aenmNames[iReg++] = WHvArm64RegisterQ2;
1349 aenmNames[iReg++] = WHvArm64RegisterQ3;
1350 aenmNames[iReg++] = WHvArm64RegisterQ4;
1351 aenmNames[iReg++] = WHvArm64RegisterQ5;
1352 aenmNames[iReg++] = WHvArm64RegisterQ6;
1353 aenmNames[iReg++] = WHvArm64RegisterQ7;
1354 aenmNames[iReg++] = WHvArm64RegisterQ8;
1355 aenmNames[iReg++] = WHvArm64RegisterQ9;
1356 aenmNames[iReg++] = WHvArm64RegisterQ10;
1357 aenmNames[iReg++] = WHvArm64RegisterQ11;
1358 aenmNames[iReg++] = WHvArm64RegisterQ12;
1359 aenmNames[iReg++] = WHvArm64RegisterQ13;
1360 aenmNames[iReg++] = WHvArm64RegisterQ14;
1361 aenmNames[iReg++] = WHvArm64RegisterQ15;
1362
1363 aenmNames[iReg++] = WHvArm64RegisterQ16;
1364 aenmNames[iReg++] = WHvArm64RegisterQ17;
1365 aenmNames[iReg++] = WHvArm64RegisterQ18;
1366 aenmNames[iReg++] = WHvArm64RegisterQ19;
1367 aenmNames[iReg++] = WHvArm64RegisterQ20;
1368 aenmNames[iReg++] = WHvArm64RegisterQ21;
1369 aenmNames[iReg++] = WHvArm64RegisterQ22;
1370 aenmNames[iReg++] = WHvArm64RegisterQ23;
1371 aenmNames[iReg++] = WHvArm64RegisterQ24;
1372 aenmNames[iReg++] = WHvArm64RegisterQ25;
1373 aenmNames[iReg++] = WHvArm64RegisterQ26;
1374 aenmNames[iReg++] = WHvArm64RegisterQ27;
1375 aenmNames[iReg++] = WHvArm64RegisterQ28;
1376 aenmNames[iReg++] = WHvArm64RegisterQ29;
1377 aenmNames[iReg++] = WHvArm64RegisterQ30;
1378 aenmNames[iReg++] = WHvArm64RegisterQ31;
1379 }
1380 if (fWhat & CPUMCTX_EXTRN_FPCR)
1381 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1382 if (fWhat & CPUMCTX_EXTRN_FPSR)
1383 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1384
1385 /* System registers. */
1386 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1387 {
1388 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1389 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1390 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1391 /** @todo */
1392 }
1393
1394#if 0
1395 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1396 {
1397 aenmNames[iReg++] = WHvArm64RegisterDbgbcr0El1;
1398 /** @todo */
1399 }
1400#endif
1401
1402 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1403 {
1404 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1405 /** @todo */
1406 }
1407
1408 size_t const cRegs = iReg;
1409 Assert(cRegs < RT_ELEMENTS(aenmNames));
1410
1411 /*
1412 * Get the registers.
1413 */
1414 WHV_REGISTER_VALUE aValues[128];
1415 RT_ZERO(aValues);
1416 Assert(RT_ELEMENTS(aValues) >= cRegs);
1417 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1418 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1419 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1420 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1421 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1422 , VERR_NEM_GET_REGISTERS_FAILED);
1423
1424 iReg = 0;
1425#define GET_REG64(a_DstVar, a_enmName) do { \
1426 Assert(aenmNames[iReg] == (a_enmName)); \
1427 (a_DstVar).x = aValues[iReg].Reg64; \
1428 iReg++; \
1429 } while (0)
1430#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1431 Assert(aenmNames[iReg] == (a_enmName)); \
1432 (a_DstVar) = aValues[iReg].Reg64; \
1433 iReg++; \
1434 } while (0)
1435#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1436 Assert(aenmNames[iReg] == (a_enmName)); \
1437 (a_DstVar).u64 = aValues[iReg].Reg64; \
1438 iReg++; \
1439 } while (0)
1440#define GET_REG128(a_DstVar, a_enmName) do { \
1441 Assert(aenmNames[iReg] == a_enmName); \
1442 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1443 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1444 iReg++; \
1445 } while (0)
1446
1447 /* GPRs */
1448 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1449 {
1450 if (fWhat & CPUMCTX_EXTRN_X0)
1451 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1452 if (fWhat & CPUMCTX_EXTRN_X1)
1453 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1454 if (fWhat & CPUMCTX_EXTRN_X2)
1455 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1456 if (fWhat & CPUMCTX_EXTRN_X3)
1457 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1458 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1459 {
1460 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1461 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1462 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1463 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1464 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1465 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1466 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1467 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1468 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1469 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1470 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1471 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1472 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1473 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1474 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1475 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1476 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1477 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1478 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1479 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1480 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1481 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1482 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1483 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1484 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1485 }
1486 if (fWhat & CPUMCTX_EXTRN_LR)
1487 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1488 if (fWhat & CPUMCTX_EXTRN_FP)
1489 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1490 }
1491
1492 /* RIP & Flags */
1493 if (fWhat & CPUMCTX_EXTRN_PC)
1494 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1495 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1496 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1497 if (fWhat & CPUMCTX_EXTRN_SPSR)
1498 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1499 if (fWhat & CPUMCTX_EXTRN_ELR)
1500 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1501 if (fWhat & CPUMCTX_EXTRN_SP)
1502 {
1503 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1504 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1505 }
1506 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1507 {
1508 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1509 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1510 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1511 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1512 }
1513
1514 /* Vector state. */
1515 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1516 {
1517 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1518 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1519 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1520 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1521 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1522 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1523 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1524 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1525 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1526 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1527 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1528 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1529 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1530 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1531 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1532 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1533
1534 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1535 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1536 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1537 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1538 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1539 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1540 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1541 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1542 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1543 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1544 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1545 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1546 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1547 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1548 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1549 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1550 }
1551 if (fWhat & CPUMCTX_EXTRN_FPCR)
1552 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1553 if (fWhat & CPUMCTX_EXTRN_FPSR)
1554 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1555
1556 /* System registers. */
1557 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1558 {
1559 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1560 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1561 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1562 /** @todo */
1563 }
1564
1565#if 0
1566 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1567 {
1568 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[0].Ctrl, WHvArm64RegisterDbgbcr0El1);
1569 /** @todo */
1570 }
1571#endif
1572
1573 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1574 {
1575 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1576 /** @todo */
1577 }
1578
1579 /* Almost done, just update extrn flags. */
1580 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1581 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1582 pVCpu->cpum.GstCtx.fExtrn = 0;
1583
1584 return VINF_SUCCESS;
1585}
1586
1587
1588/**
1589 * Interface for importing state on demand (used by IEM).
1590 *
1591 * @returns VBox status code.
1592 * @param pVCpu The cross context CPU structure.
1593 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1594 */
1595VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1596{
1597 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1598 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1599}
1600
1601
1602/**
1603 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1604 *
1605 * @returns VBox status code.
1606 * @param pVCpu The cross context CPU structure.
1607 * @param pcTicks Where to return the CPU tick count.
1608 * @param puAux Where to return the TSC_AUX register value.
1609 */
1610VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1611{
1612 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1613
1614 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1615 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1616 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1617
1618#pragma message("NEMHCQueryCpuTick: Implement it!")
1619#if 0 /** @todo */
1620 /* Call the offical API. */
1621 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1622 WHV_REGISTER_VALUE aValues[2] = { { {0, 0} }, { {0, 0} } };
1623 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1624 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1625 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1626 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1627 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1628 , VERR_NEM_GET_REGISTERS_FAILED);
1629 *pcTicks = aValues[0].Reg64;
1630 if (puAux)
1631 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[1].Reg64 : CPUMGetGuestTscAux(pVCpu);
1632#else
1633 RT_NOREF(pVCpu, pcTicks, puAux);
1634#endif
1635 return VINF_SUCCESS;
1636}
1637
1638
1639/**
1640 * Resumes CPU clock (TSC) on all virtual CPUs.
1641 *
1642 * This is called by TM when the VM is started, restored, resumed or similar.
1643 *
1644 * @returns VBox status code.
1645 * @param pVM The cross context VM structure.
1646 * @param pVCpu The cross context CPU structure of the calling EMT.
1647 * @param uPausedTscValue The TSC value at the time of pausing.
1648 */
1649VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1650{
1651 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1652 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1653
1654 /*
1655 * Call the offical API to do the job.
1656 */
1657 if (pVM->cCpus > 1)
1658 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1659
1660#pragma message("NEMHCResumeCpuTickOnAll: Implement it!")
1661#if 0 /** @todo */
1662 /* Start with the first CPU. */
1663 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1664 WHV_REGISTER_VALUE Value = { {0, 0} };
1665 Value.Reg64 = uPausedTscValue;
1666 uint64_t const uFirstTsc = ASMReadTSC();
1667 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1668 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1669 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1670 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1671 , VERR_NEM_SET_TSC);
1672
1673 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1674 that we don't introduce too much drift here. */
1675 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1676 {
1677 Assert(enmName == WHvX64RegisterTsc);
1678 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1679 Value.Reg64 = uPausedTscValue + offDelta;
1680 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1681 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1682 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1683 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1684 , VERR_NEM_SET_TSC);
1685 }
1686#else
1687 RT_NOREF(uPausedTscValue);
1688#endif
1689
1690 return VINF_SUCCESS;
1691}
1692
1693
1694#ifdef LOG_ENABLED
1695/**
1696 * Logs the current CPU state.
1697 */
1698static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1699{
1700 if (LogIs3Enabled())
1701 {
1702 char szRegs[4096];
1703 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1704 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1705 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1706 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1707 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1708 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1709 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1710 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1711 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1712 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1713 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1714 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1715 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1716 "vbar_el1=%016VR{vbar_el1}\n"
1717 );
1718 char szInstr[256]; RT_ZERO(szInstr);
1719#if 0
1720 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1721 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1722 szInstr, sizeof(szInstr), NULL);
1723#endif
1724 Log3(("%s%s\n", szRegs, szInstr));
1725 }
1726}
1727#endif /* LOG_ENABLED */
1728
1729
1730/**
1731 * Copies register state from the (common) exit context.
1732 *
1733 * ASSUMES no state copied yet.
1734 *
1735 * @param pVCpu The cross context per CPU structure.
1736 * @param pMsgHdr The common message header.
1737 */
1738DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1739{
1740 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1741 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1742
1743 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1744 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1745
1746 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1747}
1748
1749
1750/**
1751 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1752 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1753 */
1754typedef struct NEMHCWINHMACPCCSTATE
1755{
1756 /** Input: Write access. */
1757 bool fWriteAccess;
1758 /** Output: Set if we did something. */
1759 bool fDidSomething;
1760 /** Output: Set it we should resume. */
1761 bool fCanResume;
1762} NEMHCWINHMACPCCSTATE;
1763
1764/**
1765 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1766 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1767 * NEMHCWINHMACPCCSTATE structure. }
1768 */
1769NEM_TMPL_STATIC DECLCALLBACK(int)
1770nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1771{
1772 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1773 pState->fDidSomething = false;
1774 pState->fCanResume = false;
1775
1776 /* If A20 is disabled, we may need to make another query on the masked
1777 page to get the correct protection information. */
1778 uint8_t u2State = pInfo->u2NemState;
1779 RTGCPHYS GCPhysSrc = GCPhys;
1780
1781 /*
1782 * Consolidate current page state with actual page protection and access type.
1783 * We don't really consider downgrades here, as they shouldn't happen.
1784 */
1785 int rc;
1786 switch (u2State)
1787 {
1788 case NEM_WIN_PAGE_STATE_UNMAPPED:
1789 case NEM_WIN_PAGE_STATE_NOT_SET:
1790 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1791 {
1792 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1793 return VINF_SUCCESS;
1794 }
1795
1796 /* Don't bother remapping it if it's a write request to a non-writable page. */
1797 if ( pState->fWriteAccess
1798 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1799 {
1800 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1801 return VINF_SUCCESS;
1802 }
1803
1804 /* Map the page. */
1805 rc = nemHCNativeSetPhysPage(pVM,
1806 pVCpu,
1807 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1808 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1809 pInfo->fNemProt,
1810 &u2State,
1811 true /*fBackingState*/);
1812 pInfo->u2NemState = u2State;
1813 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1814 GCPhys, g_apszPageStates[u2State], rc));
1815 pState->fDidSomething = true;
1816 pState->fCanResume = true;
1817 return rc;
1818
1819 case NEM_WIN_PAGE_STATE_READABLE:
1820 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1821 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1822 {
1823 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1824 return VINF_SUCCESS;
1825 }
1826
1827 break;
1828
1829 case NEM_WIN_PAGE_STATE_WRITABLE:
1830 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1831 {
1832 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1833 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1834 else
1835 {
1836 pState->fCanResume = true;
1837 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1838 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1839 }
1840 return VINF_SUCCESS;
1841 }
1842 break;
1843
1844 default:
1845 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1846 }
1847
1848 /*
1849 * Unmap and restart the instruction.
1850 * If this fails, which it does every so often, just unmap everything for now.
1851 */
1852 /** @todo figure out whether we mess up the state or if it's WHv. */
1853 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1854 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1855 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1856 if (SUCCEEDED(hrc))
1857 {
1858 pState->fDidSomething = true;
1859 pState->fCanResume = true;
1860 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1861 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1862 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1863 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1864 return VINF_SUCCESS;
1865 }
1866 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1867 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
1868 GCPhys, g_apszPageStates[u2State], hrc, hrc));
1869 return VERR_NEM_UNMAP_PAGES_FAILED;
1870}
1871
1872
1873/**
1874 * Returns the byte size from the given access SAS value.
1875 *
1876 * @returns Number of bytes to transfer.
1877 * @param uSas The SAS value to convert.
1878 */
1879DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
1880{
1881 switch (uSas)
1882 {
1883 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1884 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1885 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1886 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1887 default:
1888 AssertReleaseFailed();
1889 }
1890
1891 return 0;
1892}
1893
1894
1895/**
1896 * Sets the given general purpose register to the given value.
1897 *
1898 * @param pVCpu The cross context virtual CPU structure of the
1899 * calling EMT.
1900 * @param uReg The register index.
1901 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1902 * @param fSignExtend Flag whether to sign extend the value.
1903 * @param u64Val The value.
1904 */
1905DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1906{
1907 AssertReturnVoid(uReg < 31);
1908
1909 if (f64BitReg)
1910 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1911 else
1912 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1913
1914 /* Mark the register as not extern anymore. */
1915 switch (uReg)
1916 {
1917 case 0:
1918 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1919 break;
1920 case 1:
1921 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1922 break;
1923 case 2:
1924 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1925 break;
1926 case 3:
1927 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1928 break;
1929 default:
1930 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1931 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1932 }
1933}
1934
1935
1936/**
1937 * Gets the given general purpose register and returns the value.
1938 *
1939 * @returns Value from the given register.
1940 * @param pVCpu The cross context virtual CPU structure of the
1941 * calling EMT.
1942 * @param uReg The register index.
1943 */
1944DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1945{
1946 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1947
1948 if (uReg == ARMV8_AARCH64_REG_ZR)
1949 return 0;
1950
1951 /** @todo Import the register if extern. */
1952 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1953
1954 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1955}
1956
1957
1958/**
1959 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1960 *
1961 * @returns Strict VBox status code.
1962 * @param pVM The cross context VM structure.
1963 * @param pVCpu The cross context per CPU structure.
1964 * @param pExit The VM exit information to handle.
1965 * @sa nemHCWinHandleMessageMemory
1966 */
1967NEM_TMPL_STATIC VBOXSTRICTRC
1968nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1969{
1970 uint64_t const uHostTsc = ASMReadTSC();
1971 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
1972
1973 /*
1974 * Ask PGM for information about the given GCPhys. We need to check if we're
1975 * out of sync first.
1976 */
1977 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
1978 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
1979 PGMPHYSNEMPAGEINFO Info;
1980 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1981 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1982 if (RT_SUCCESS(rc))
1983 {
1984 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1985 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1986 {
1987 if (State.fCanResume)
1988 {
1989 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1990 pVCpu->idCpu, pHdr->Pc,
1991 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1992 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1993 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1994 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
1995 pHdr->Pc, uHostTsc);
1996 return VINF_SUCCESS;
1997 }
1998 }
1999 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2000 pVCpu->idCpu, pHdr->Pc,
2001 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2002 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2003 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2004 }
2005 else
2006 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
2007 pVCpu->idCpu, pHdr->Pc,
2008 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2009 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2010
2011 /*
2012 * Emulate the memory access, either access handler or special memory.
2013 */
2014 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2015 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2016 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2017 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2018 pHdr->Pc, uHostTsc);
2019#pragma message("nemR3WinHandleExitMemory: Why not calling nemR3WinCopyStateFromArmHeader?")
2020/** @todo r=bird: Why is nemR3WinCopyStateFromArmHeader commented out? */
2021 //nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
2022 RT_NOREF_PV(pExitRec);
2023 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2024 AssertRCReturn(rc, rc);
2025
2026#ifdef LOG_ENABLED
2027 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2028 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2029#endif
2030 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2031 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2032 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2033 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2034 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2035 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2036 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2037 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2038 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2039 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2040 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2041 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2042
2043 RT_NOREF(fL2Fault);
2044
2045 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
2046
2047 EMHistoryAddExit(pVCpu,
2048 fWrite
2049 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2050 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2051 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2052
2053 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2054 uint64_t u64Val = 0;
2055 if (fWrite)
2056 {
2057 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2058 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2059 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2060 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2061 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2062 }
2063 else
2064 {
2065 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2066 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2067 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2068 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2069 if (rcStrict == VINF_SUCCESS)
2070 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2071 }
2072
2073 if (rcStrict == VINF_SUCCESS)
2074 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2075
2076 return rcStrict;
2077}
2078
2079
2080/**
2081 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2082 *
2083 * @returns Strict VBox status code.
2084 * @param pVM The cross context VM structure.
2085 * @param pVCpu The cross context per CPU structure.
2086 * @param pExit The VM exit information to handle.
2087 * @sa nemHCWinHandleMessageMemory
2088 */
2089NEM_TMPL_STATIC VBOXSTRICTRC
2090nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2091{
2092 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2093
2094 /** @todo Raise exception to EL1 if PSCI not configured. */
2095 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2096 uint32_t uFunId = pExit->Hypercall.Immediate;
2097 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2098 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2099 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2100 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2101 {
2102 switch (uFunNum)
2103 {
2104 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2105 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2106 break;
2107 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2108 rcStrict = VMR3PowerOff(pVM->pUVM);
2109 break;
2110 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2111 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2112 {
2113 bool fHaltOnReset;
2114 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2115 if (RT_SUCCESS(rc) && fHaltOnReset)
2116 {
2117 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2118 rcStrict = VINF_EM_HALT;
2119 }
2120 else
2121 {
2122 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2123 VM_FF_SET(pVM, VM_FF_RESET);
2124 rcStrict = VINF_EM_RESET;
2125 }
2126 break;
2127 }
2128 case ARM_PSCI_FUNC_ID_CPU_ON:
2129 {
2130 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2131 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2132 uint64_t u64CtxId = pExit->Hypercall.X[3];
2133 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2134 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2135 break;
2136 }
2137 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2138 {
2139 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2140 switch (u32FunNum)
2141 {
2142 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2143 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2144 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2145 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2146 case ARM_PSCI_FUNC_ID_CPU_ON:
2147 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
2148 false /*f64BitReg*/, false /*fSignExtend*/,
2149 (uint64_t)ARM_PSCI_STS_SUCCESS);
2150 break;
2151 default:
2152 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
2153 false /*f64BitReg*/, false /*fSignExtend*/,
2154 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2155 }
2156 break;
2157 }
2158 default:
2159 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2160 }
2161 }
2162 else
2163 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2164
2165
2166 return rcStrict;
2167}
2168
2169
2170/**
2171 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2172 *
2173 * @returns Strict VBox status code.
2174 * @param pVM The cross context VM structure.
2175 * @param pVCpu The cross context per CPU structure.
2176 * @param pExit The VM exit information to handle.
2177 * @sa nemHCWinHandleMessageUnrecoverableException
2178 */
2179NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2180{
2181#if 0
2182 /*
2183 * Just copy the state we've got and handle it in the loop for now.
2184 */
2185 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2186 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2187 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2188 RT_NOREF_PV(pVM);
2189 return VINF_EM_TRIPLE_FAULT;
2190#else
2191 /*
2192 * Let IEM decide whether this is really it.
2193 */
2194 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2195 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2196 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2197 AssertReleaseFailed();
2198 RT_NOREF_PV(pVM);
2199 return VINF_SUCCESS;
2200#endif
2201}
2202
2203
2204/**
2205 * Handles VM exits.
2206 *
2207 * @returns Strict VBox status code.
2208 * @param pVM The cross context VM structure.
2209 * @param pVCpu The cross context per CPU structure.
2210 * @param pExit The VM exit information to handle.
2211 * @sa nemHCWinHandleMessage
2212 */
2213NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2214{
2215 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2216 AssertRCReturn(rc, rc);
2217
2218#ifdef LOG_ENABLED
2219 if (LogIs3Enabled())
2220 nemR3WinLogState(pVM, pVCpu);
2221#endif
2222
2223 switch (pExit->ExitReason)
2224 {
2225 case WHvRunVpExitReasonUnmappedGpa:
2226 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2227 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2228
2229 case WHvRunVpExitReasonCanceled:
2230 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2231 return VINF_SUCCESS;
2232
2233 case WHvRunVpExitReasonHypercall:
2234 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2235
2236 case WHvRunVpExitReasonUnrecoverableException:
2237 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2238 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2239
2240 case WHvRunVpExitReasonUnsupportedFeature:
2241 case WHvRunVpExitReasonInvalidVpRegisterValue:
2242 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2243 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2244 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2245
2246 /* Undesired exits: */
2247 case WHvRunVpExitReasonNone:
2248 default:
2249 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2250 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2251 }
2252}
2253
2254
2255VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2256{
2257 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2258#ifdef LOG_ENABLED
2259 if (LogIs3Enabled())
2260 nemR3WinLogState(pVM, pVCpu);
2261#endif
2262
2263 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2264 {
2265 /*
2266 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2267 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2268 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2269 */
2270 PCCPUMIDREGS pIdRegsGst = NULL;
2271 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2272 AssertRCReturn(rc, rc);
2273
2274 WHV_REGISTER_NAME aenmNames[12];
2275 WHV_REGISTER_VALUE aValues[12];
2276
2277 uint32_t iReg = 0;
2278#define ADD_REG64(a_enmName, a_uValue) do { \
2279 aenmNames[iReg] = (a_enmName); \
2280 aValues[iReg].Reg128.High64 = 0; \
2281 aValues[iReg].Reg64 = (a_uValue); \
2282 iReg++; \
2283 } while (0)
2284
2285
2286 ADD_REG64(WHvArm64RegisterIdAa64Mmfr0El1, pIdRegsGst->u64RegIdAa64Mmfr0El1);
2287#undef ADD_REG64
2288
2289 //HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
2290 //AssertReturn(SUCCEEDED(hrc), VERR_NEM_IPE_9);
2291
2292 pVCpu->nem.s.fIdRegsSynced = true;
2293 }
2294
2295 /*
2296 * Try switch to NEM runloop state.
2297 */
2298 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2299 { /* likely */ }
2300 else
2301 {
2302 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2303 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2304 return VINF_SUCCESS;
2305 }
2306
2307 /*
2308 * The run loop.
2309 *
2310 * Current approach to state updating to use the sledgehammer and sync
2311 * everything every time. This will be optimized later.
2312 */
2313 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2314// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
2315// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
2316// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
2317 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2318 for (unsigned iLoop = 0;; iLoop++)
2319 {
2320 /*
2321 * Pending interrupts or such? Need to check and deal with this prior
2322 * to the state syncing.
2323 */
2324#if 0
2325 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_UPDATE_IRQ))
2326 {
2327 /* Try inject interrupt. */
2328 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
2329 if (rcStrict == VINF_SUCCESS)
2330 { /* likely */ }
2331 else
2332 {
2333 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2334 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2335 break;
2336 }
2337 }
2338#endif
2339
2340 /* Ensure that Hyper-V has the whole state. */
2341 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2342 AssertRCReturn(rc2, rc2);
2343
2344 /*
2345 * Poll timers and run for a bit.
2346 *
2347 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2348 * so we take the time of the next timer event and uses that as a deadline.
2349 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2350 */
2351 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2352 * the whole polling job when timers have changed... */
2353 uint64_t offDeltaIgnored;
2354 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2355 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2356 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2357 {
2358 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2359 {
2360#if 0 //def LOG_ENABLED
2361 if (LogIsFlowEnabled())
2362 {
2363 static const WHV_REGISTER_NAME s_aNames[6] = { WHvX64RegisterCs, WHvX64RegisterRip, WHvX64RegisterRflags,
2364 WHvX64RegisterSs, WHvX64RegisterRsp, WHvX64RegisterCr0 };
2365 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { {{0, 0} } };
2366 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2367 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
2368 pVCpu->idCpu, aRegs[0].Segment.Selector, aRegs[1].Reg64, RT_BOOL(aRegs[2].Reg64 & X86_EFL_IF),
2369 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64));
2370 }
2371#endif
2372 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2373 TMNotifyStartOfExecution(pVM, pVCpu);
2374
2375 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2376
2377 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2378 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2379#ifdef LOG_ENABLED
2380 LogFlow(("NEM/%u: Exit @ @todo Reason=%#x\n", pVCpu->idCpu, ExitReason.ExitReason));
2381#endif
2382 if (SUCCEEDED(hrc))
2383 {
2384 /*
2385 * Deal with the message.
2386 */
2387 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2388 if (rcStrict == VINF_SUCCESS)
2389 { /* hopefully likely */ }
2390 else
2391 {
2392 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2393 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2394 break;
2395 }
2396 }
2397 else
2398 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2399 pVCpu->idCpu, hrc, GetLastError()),
2400 VERR_NEM_IPE_0);
2401
2402 /*
2403 * If no relevant FFs are pending, loop.
2404 */
2405 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2406 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2407 continue;
2408
2409 /** @todo Try handle pending flags, not just return to EM loops. Take care
2410 * not to set important RCs here unless we've handled a message. */
2411 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2412 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2413 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2414 }
2415 else
2416 {
2417 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2418 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2419 }
2420 }
2421 else
2422 {
2423 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2424 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2425 }
2426 break;
2427 } /* the run loop */
2428
2429
2430 /*
2431 * If the CPU is running, make sure to stop it before we try sync back the
2432 * state and return to EM. We don't sync back the whole state if we can help it.
2433 */
2434 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2435 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2436
2437 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2438 {
2439 /* Try anticipate what we might need. */
2440 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2441 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2442 || RT_FAILURE(rcStrict))
2443 fImport = CPUMCTX_EXTRN_ALL;
2444 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2445 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2446
2447 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2448 {
2449 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2450 if (RT_SUCCESS(rc2))
2451 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2452 else if (RT_SUCCESS(rcStrict))
2453 rcStrict = rc2;
2454 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2455 pVCpu->cpum.GstCtx.fExtrn = 0;
2456 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2457 }
2458 else
2459 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2460 }
2461 else
2462 {
2463 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2464 pVCpu->cpum.GstCtx.fExtrn = 0;
2465 }
2466
2467#if 0
2468 UINT32 cbWritten;
2469 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2470 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2471 &IntrState, sizeof(IntrState), &cbWritten);
2472 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2473 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2474 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2475 , VERR_NEM_GET_REGISTERS_FAILED);
2476 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2477 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2478 {
2479 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2480 LogFlowFunc(("IntrState: Intr %u:\n"
2481 " Enabled=%RTbool\n"
2482 " EdgeTriggered=%RTbool\n"
2483 " Asserted=%RTbool\n"
2484 " SetPending=%RTbool\n"
2485 " Active=%RTbool\n"
2486 " Direct=%RTbool\n"
2487 " GicrIpriorityrConfigured=%u\n"
2488 " GicrIpriorityrActive=%u\n",
2489 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2490 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2491 }
2492#endif
2493
2494#if 0
2495 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel,
2496 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, VBOXSTRICTRC_VAL(rcStrict) ));
2497#endif
2498 return rcStrict;
2499}
2500
2501
2502VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2503{
2504 Assert(VM_IS_NEM_ENABLED(pVM));
2505 RT_NOREF(pVM, pVCpu);
2506 return true;
2507}
2508
2509
2510bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2511{
2512 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2513 return false;
2514}
2515
2516
2517void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2518{
2519 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2520 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2521 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2522 RT_NOREF_PV(hrc);
2523 RT_NOREF_PV(fFlags);
2524}
2525
2526
2527DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2528{
2529 RT_NOREF(pVM, fUseDebugLoop);
2530 return false;
2531}
2532
2533
2534DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2535{
2536 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2537 return false;
2538}
2539
2540
2541DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2542{
2543 PGMPAGEMAPLOCK Lock;
2544 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2545 if (RT_SUCCESS(rc))
2546 PGMPhysReleasePageMappingLock(pVM, &Lock);
2547 return rc;
2548}
2549
2550
2551DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2552{
2553 PGMPAGEMAPLOCK Lock;
2554 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2555 if (RT_SUCCESS(rc))
2556 PGMPhysReleasePageMappingLock(pVM, &Lock);
2557 return rc;
2558}
2559
2560
2561VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2562 uint8_t *pu2State, uint32_t *puNemRange)
2563{
2564 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2565 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2566
2567 *pu2State = UINT8_MAX;
2568 RT_NOREF(puNemRange);
2569
2570 if (pvR3)
2571 {
2572 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2573 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2574 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2575 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2576 if (SUCCEEDED(hrc))
2577 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2578 else
2579 {
2580 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2581 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2582 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2583 return VERR_NEM_MAP_PAGES_FAILED;
2584 }
2585 }
2586 return VINF_SUCCESS;
2587}
2588
2589
2590VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2591{
2592 RT_NOREF(pVM);
2593 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2594}
2595
2596
2597VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2598 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2599{
2600 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2601 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2602 RT_NOREF(puNemRange);
2603
2604 /*
2605 * Unmap the RAM we're replacing.
2606 */
2607 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2608 {
2609 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2610 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2611 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2612 if (SUCCEEDED(hrc))
2613 { /* likely */ }
2614 else if (pvMmio2)
2615 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2616 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2617 else
2618 {
2619 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2620 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2621 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2622 return VERR_NEM_UNMAP_PAGES_FAILED;
2623 }
2624 }
2625
2626 /*
2627 * Map MMIO2 if any.
2628 */
2629 if (pvMmio2)
2630 {
2631 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2632 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2633 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2634 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2635 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2636 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2637 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2638 if (SUCCEEDED(hrc))
2639 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2640 else
2641 {
2642 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2643 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2644 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2645 return VERR_NEM_MAP_PAGES_FAILED;
2646 }
2647 }
2648 else
2649 {
2650 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2651 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2652 }
2653 RT_NOREF(pvRam);
2654 return VINF_SUCCESS;
2655}
2656
2657
2658VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2659 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2660{
2661 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2662 return VINF_SUCCESS;
2663}
2664
2665
2666VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2667 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2668{
2669 int rc = VINF_SUCCESS;
2670 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2671 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2672
2673 /*
2674 * Unmap the MMIO2 pages.
2675 */
2676 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2677 * we may have more stuff to unmap even in case of pure MMIO... */
2678 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2679 {
2680 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2681 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2682 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2683 if (FAILED(hrc))
2684 {
2685 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2686 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2687 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2688 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2689 }
2690 }
2691
2692 /*
2693 * Restore the RAM we replaced.
2694 */
2695 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2696 {
2697 AssertPtr(pvRam);
2698 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2699 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2700 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2701 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2702 if (SUCCEEDED(hrc))
2703 { /* likely */ }
2704 else
2705 {
2706 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2707 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2708 rc = VERR_NEM_MAP_PAGES_FAILED;
2709 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2710 }
2711 if (pu2State)
2712 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2713 }
2714 /* Mark the pages as unmapped if relevant. */
2715 else if (pu2State)
2716 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2717
2718 RT_NOREF(pvMmio2, puNemRange);
2719 return rc;
2720}
2721
2722
2723VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2724 void *pvBitmap, size_t cbBitmap)
2725{
2726 Assert(VM_IS_NEM_ENABLED(pVM));
2727 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2728 Assert(cbBitmap == (uint32_t)cbBitmap);
2729 RT_NOREF(uNemRange);
2730
2731 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2732 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2733 if (SUCCEEDED(hrc))
2734 return VINF_SUCCESS;
2735
2736 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2737 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2738 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2739}
2740
2741
2742VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2743 uint8_t *pu2State, uint32_t *puNemRange)
2744{
2745 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2746 *pu2State = UINT8_MAX;
2747 *puNemRange = 0;
2748
2749#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2750 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2751 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2752 {
2753 const void *pvPage;
2754 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2755 if (RT_SUCCESS(rc))
2756 {
2757 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2758 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2759 if (SUCCEEDED(hrc))
2760 { /* likely */ }
2761 else
2762 {
2763 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2764 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2765 return VERR_NEM_INIT_FAILED;
2766 }
2767 }
2768 else
2769 {
2770 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2771 return rc;
2772 }
2773 }
2774 RT_NOREF_PV(fFlags);
2775#else
2776 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2777#endif
2778 return VINF_SUCCESS;
2779}
2780
2781
2782VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2783 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2784{
2785 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2786 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2787 *pu2State = UINT8_MAX;
2788
2789 /*
2790 * (Re-)map readonly.
2791 */
2792 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2793 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2794 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2795 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2796 if (SUCCEEDED(hrc))
2797 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2798 else
2799 {
2800 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2801 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2802 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2803 return VERR_NEM_MAP_PAGES_FAILED;
2804 }
2805 RT_NOREF(fFlags, puNemRange);
2806 return VINF_SUCCESS;
2807}
2808
2809VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2810{
2811 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2812 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
2813 RT_NOREF(pVCpu, fEnabled);
2814}
2815
2816
2817void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2818{
2819 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2820 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2821}
2822
2823
2824VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2825 RTR3PTR pvMemR3, uint8_t *pu2State)
2826{
2827 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2828 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2829
2830 *pu2State = UINT8_MAX;
2831 if (pvMemR3)
2832 {
2833 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2834 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
2835 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2836 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2837 if (SUCCEEDED(hrc))
2838 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2839 else
2840 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
2841 pvMemR3, GCPhys, cb, hrc));
2842 }
2843 RT_NOREF(enmKind);
2844}
2845
2846
2847void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2848 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2849{
2850 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2851 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2852 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2853}
2854
2855
2856/**
2857 * Worker that maps pages into Hyper-V.
2858 *
2859 * This is used by the PGM physical page notifications as well as the memory
2860 * access VMEXIT handlers.
2861 *
2862 * @returns VBox status code.
2863 * @param pVM The cross context VM structure.
2864 * @param pVCpu The cross context virtual CPU structure of the
2865 * calling EMT.
2866 * @param GCPhysSrc The source page address.
2867 * @param GCPhysDst The hyper-V destination page. This may differ from
2868 * GCPhysSrc when A20 is disabled.
2869 * @param fPageProt NEM_PAGE_PROT_XXX.
2870 * @param pu2State Our page state (input/output).
2871 * @param fBackingChanged Set if the page backing is being changed.
2872 * @thread EMT(pVCpu)
2873 */
2874NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
2875 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
2876{
2877 /*
2878 * Looks like we need to unmap a page before we can change the backing
2879 * or even modify the protection. This is going to be *REALLY* efficient.
2880 * PGM lends us two bits to keep track of the state here.
2881 */
2882 RT_NOREF(pVCpu);
2883 uint8_t const u2OldState = *pu2State;
2884 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
2885 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
2886 if ( fBackingChanged
2887 || u2NewState != u2OldState)
2888 {
2889 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
2890 {
2891 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2892 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
2893 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2894 if (SUCCEEDED(hrc))
2895 {
2896 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2897 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2898 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2899 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
2900 {
2901 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
2902 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2903 return VINF_SUCCESS;
2904 }
2905 }
2906 else
2907 {
2908 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2909 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2910 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2911 return VERR_NEM_INIT_FAILED;
2912 }
2913 }
2914 }
2915
2916 /*
2917 * Writeable mapping?
2918 */
2919 if (fPageProt & NEM_PAGE_PROT_WRITE)
2920 {
2921 void *pvPage;
2922 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
2923 if (RT_SUCCESS(rc))
2924 {
2925 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
2926 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2927 if (SUCCEEDED(hrc))
2928 {
2929 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2930 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
2931 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2932 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2933 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2934 return VINF_SUCCESS;
2935 }
2936 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2937 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2938 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2939 return VERR_NEM_INIT_FAILED;
2940 }
2941 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2942 return rc;
2943 }
2944
2945 if (fPageProt & NEM_PAGE_PROT_READ)
2946 {
2947 const void *pvPage;
2948 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
2949 if (RT_SUCCESS(rc))
2950 {
2951 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
2952 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
2953 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2954 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
2955 if (SUCCEEDED(hrc))
2956 {
2957 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2958 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
2959 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2960 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2961 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2962 return VINF_SUCCESS;
2963 }
2964 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2965 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2966 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2967 return VERR_NEM_INIT_FAILED;
2968 }
2969 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2970 return rc;
2971 }
2972
2973 /* We already unmapped it above. */
2974 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2975 return VINF_SUCCESS;
2976}
2977
2978
2979NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
2980{
2981 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
2982 {
2983 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
2984 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2985 return VINF_SUCCESS;
2986 }
2987
2988 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2989 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
2990 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2991 if (SUCCEEDED(hrc))
2992 {
2993 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2994 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2995 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2996 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
2997 return VINF_SUCCESS;
2998 }
2999 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3000 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3001 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3002 return VERR_NEM_IPE_6;
3003}
3004
3005
3006int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3007 PGMPAGETYPE enmType, uint8_t *pu2State)
3008{
3009 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3010 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3011 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3012
3013 int rc;
3014 RT_NOREF_PV(fPageProt);
3015 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3016 return rc;
3017}
3018
3019
3020VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3021 PGMPAGETYPE enmType, uint8_t *pu2State)
3022{
3023 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3024 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3025 Assert(VM_IS_NEM_ENABLED(pVM));
3026 RT_NOREF(HCPhys, enmType, pvR3);
3027
3028 RT_NOREF_PV(fPageProt);
3029 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3030}
3031
3032
3033VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3034 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3035{
3036 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
3037 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
3038 Assert(VM_IS_NEM_ENABLED(pVM));
3039 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
3040
3041 RT_NOREF_PV(fPageProt);
3042 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3043}
3044
3045
3046/**
3047 * Returns features supported by the NEM backend.
3048 *
3049 * @returns Flags of features supported by the native NEM backend.
3050 * @param pVM The cross context VM structure.
3051 */
3052VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3053{
3054 RT_NOREF(pVM);
3055 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
3056 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
3057}
3058
3059
3060/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
3061 *
3062 * Open questions:
3063 * - Why can't one read and write WHvArm64RegisterId*
3064 * - WHvArm64RegisterDbgbcr0El1 is not readable?
3065 * - Getting notified about system register reads/writes (GIC)?
3066 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
3067 * - Handling of (vTimer) interrupts, how is WHvRequestInterrupt() supposed to be used?
3068 */
3069
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette