VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 106842

Last change on this file since 106842 was 106728, checked in by vboxsync, 2 months ago

VMM/NEM-win/arm: Release build warnings. jiraref:VBP-1253

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 125.1 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 106728 2024-10-27 21:40:07Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/vmm/nem.h>
57#include <VBox/vmm/iem.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/pdm.h>
61#include <VBox/vmm/dbgftrace.h>
62#include "NEMInternal.h"
63#include <VBox/vmm/vmcc.h>
64
65#include <iprt/ldr.h>
66#include <iprt/path.h>
67#include <iprt/string.h>
68#include <iprt/system.h>
69#include <iprt/utf16.h>
70
71#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
72HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
73# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
74#endif
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80
81
82/*********************************************************************************************************************************
83* Global Variables *
84*********************************************************************************************************************************/
85/** @name APIs imported from WinHvPlatform.dll
86 * @{ */
87static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
88static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
89static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
90static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
91static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
92static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
93static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
94static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
95static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
96static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
97static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
98static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
99static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
100static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
101static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
102static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
103decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
104/** @} */
105
106/** The Windows build number. */
107static uint32_t g_uBuildNo = 17134;
108
109
110
111/**
112 * Import instructions.
113 */
114static const struct
115{
116 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
117 bool fOptional; /**< Set if import is optional. */
118 PFNRT *ppfn; /**< The function pointer variable. */
119 const char *pszName; /**< The function name. */
120} g_aImports[] =
121{
122#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
123 NEM_WIN_IMPORT(0, false, WHvGetCapability),
124 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
125 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
126 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
127 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
128 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
129 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
130 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
131 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
132 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
133 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
134 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
135 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
136 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
137 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
138 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
139 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
140#undef NEM_WIN_IMPORT
141};
142
143
144/*
145 * Let the preprocessor alias the APIs to import variables for better autocompletion.
146 */
147#ifndef IN_SLICKEDIT
148# define WHvGetCapability g_pfnWHvGetCapability
149# define WHvCreatePartition g_pfnWHvCreatePartition
150# define WHvSetupPartition g_pfnWHvSetupPartition
151# define WHvDeletePartition g_pfnWHvDeletePartition
152# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
153# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
154# define WHvMapGpaRange g_pfnWHvMapGpaRange
155# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
156# define WHvTranslateGva g_pfnWHvTranslateGva
157# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
158# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
159# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
160# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
161# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
162# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
163# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
164# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
165# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
166
167# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
168# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
169# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
170
171#endif
172
173#if 0 /* unused */
174/** WHV_MEMORY_ACCESS_TYPE names */
175static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
176#endif
177/** NEM_WIN_PAGE_STATE_XXX names. */
178NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
179#ifdef LOG_ENABLED
180/** HV_INTERCEPT_ACCESS_TYPE names. */
181static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
182#endif
183
184
185/*********************************************************************************************************************************
186* Internal Functions *
187*********************************************************************************************************************************/
188DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
189DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
190
191NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
192 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
193
194/**
195 * Worker for nemR3NativeInit that probes and load the native API.
196 *
197 * @returns VBox status code.
198 * @param fForced Whether the HMForced flag is set and we should
199 * fail if we cannot initialize.
200 * @param pErrInfo Where to always return error info.
201 */
202static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
203{
204 /*
205 * Check that the DLL files we need are present, but without loading them.
206 * We'd like to avoid loading them unnecessarily.
207 */
208 WCHAR wszPath[MAX_PATH + 64];
209 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
210 if (cwcPath >= MAX_PATH || cwcPath < 2)
211 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
212
213 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
214 wszPath[cwcPath++] = '\\';
215 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
216 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
217 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
218
219 /*
220 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
221 */
222 /** @todo */
223
224 /** @todo would be great if we could recognize a root partition from the
225 * CPUID info, but I currently don't dare do that. */
226
227 /*
228 * Now try load the DLLs and resolve the APIs.
229 */
230 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
231 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
232 int rc = VINF_SUCCESS;
233 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
234 {
235 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
236 if (RT_FAILURE(rc2))
237 {
238 if (!RTErrInfoIsSet(pErrInfo))
239 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
240 else
241 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
242 ahMods[i] = NIL_RTLDRMOD;
243 rc = VERR_NEM_INIT_FAILED;
244 }
245 }
246 if (RT_SUCCESS(rc))
247 {
248 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
249 {
250 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
251 if (RT_SUCCESS(rc2))
252 {
253 if (g_aImports[i].fOptional)
254 LogRel(("NEM: info: Found optional import %s!%s.\n",
255 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
256 }
257 else
258 {
259 *g_aImports[i].ppfn = NULL;
260
261 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
262 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
263 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
264 if (!g_aImports[i].fOptional)
265 {
266 if (RTErrInfoIsSet(pErrInfo))
267 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
268 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
269 else
270 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
271 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
272 Assert(RT_FAILURE(rc));
273 }
274 }
275 }
276 if (RT_SUCCESS(rc))
277 {
278 Assert(!RTErrInfoIsSet(pErrInfo));
279 }
280 }
281
282 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
283 RTLdrClose(ahMods[i]);
284 return rc;
285}
286
287
288/**
289 * Wrapper for different WHvGetCapability signatures.
290 */
291DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
292{
293 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
294}
295
296
297/**
298 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
299 *
300 * @returns VBox status code.
301 * @param pVM The cross context VM structure.
302 * @param pErrInfo Where to always return error info.
303 */
304static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
305{
306#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
307#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
308#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
309
310 /*
311 * Is the hypervisor present with the desired capability?
312 *
313 * In build 17083 this translates into:
314 * - CPUID[0x00000001].HVP is set
315 * - CPUID[0x40000000] == "Microsoft Hv"
316 * - CPUID[0x40000001].eax == "Hv#1"
317 * - CPUID[0x40000003].ebx[12] is set.
318 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
319 * a non-zero value.
320 */
321 /**
322 * @todo Someone at Microsoft please explain weird API design:
323 * 1. Pointless CapabilityCode duplication int the output;
324 * 2. No output size.
325 */
326 WHV_CAPABILITY Caps;
327 RT_ZERO(Caps);
328 SetLastError(0);
329 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
330 DWORD rcWin = GetLastError();
331 if (FAILED(hrc))
332 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
333 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
334 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
335 if (!Caps.HypervisorPresent)
336 {
337 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
338 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
339 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
340 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
341 }
342 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
343
344
345 /*
346 * Check what extended VM exits are supported.
347 */
348 RT_ZERO(Caps);
349 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
350 if (FAILED(hrc))
351 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
352 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
353 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
354 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
355 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
356 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
357 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
358 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
359 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
360 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
361 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
362
363 /*
364 * Check features in case they end up defining any.
365 */
366 RT_ZERO(Caps);
367 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
368 if (FAILED(hrc))
369 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
370 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
371 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
372 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
373 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
374 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
375
376 /*
377 * Check that the CPU vendor is supported.
378 */
379 RT_ZERO(Caps);
380 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
381 if (FAILED(hrc))
382 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
383 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
384 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
385 switch (Caps.ProcessorVendor)
386 {
387 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
388 case WHvProcessorVendorArm:
389 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
390 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
391 break;
392 default:
393 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
394 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
395 }
396
397 /*
398 * CPU features, guessing these are virtual CPU features?
399 */
400 RT_ZERO(Caps);
401 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
402 if (FAILED(hrc))
403 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
404 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
405 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
406 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
407#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
408 NEM_LOG_REL_CPU_FEATURE(Asid16);
409 NEM_LOG_REL_CPU_FEATURE(TGran16);
410 NEM_LOG_REL_CPU_FEATURE(TGran64);
411 NEM_LOG_REL_CPU_FEATURE(Haf);
412 NEM_LOG_REL_CPU_FEATURE(Hdbs);
413 NEM_LOG_REL_CPU_FEATURE(Pan);
414 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
415 NEM_LOG_REL_CPU_FEATURE(Uao);
416 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
417 NEM_LOG_REL_CPU_FEATURE(Fp);
418 NEM_LOG_REL_CPU_FEATURE(FpHp);
419 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
420 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
421 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
422 NEM_LOG_REL_CPU_FEATURE(GicV41);
423 NEM_LOG_REL_CPU_FEATURE(Ras);
424 NEM_LOG_REL_CPU_FEATURE(PmuV3);
425 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
426 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
427 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
428 NEM_LOG_REL_CPU_FEATURE(Aes);
429 NEM_LOG_REL_CPU_FEATURE(PolyMul);
430 NEM_LOG_REL_CPU_FEATURE(Sha1);
431 NEM_LOG_REL_CPU_FEATURE(Sha256);
432 NEM_LOG_REL_CPU_FEATURE(Sha512);
433 NEM_LOG_REL_CPU_FEATURE(Crc32);
434 NEM_LOG_REL_CPU_FEATURE(Atomic);
435 NEM_LOG_REL_CPU_FEATURE(Rdm);
436 NEM_LOG_REL_CPU_FEATURE(Sha3);
437 NEM_LOG_REL_CPU_FEATURE(Sm3);
438 NEM_LOG_REL_CPU_FEATURE(Sm4);
439 NEM_LOG_REL_CPU_FEATURE(Dp);
440 NEM_LOG_REL_CPU_FEATURE(Fhm);
441 NEM_LOG_REL_CPU_FEATURE(DcCvap);
442 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
443 NEM_LOG_REL_CPU_FEATURE(ApaBase);
444 NEM_LOG_REL_CPU_FEATURE(ApaEp);
445 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
446 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
447 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
448 NEM_LOG_REL_CPU_FEATURE(Jscvt);
449 NEM_LOG_REL_CPU_FEATURE(Fcma);
450 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
451 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
452 NEM_LOG_REL_CPU_FEATURE(Gpa);
453 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
454 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
455
456#undef NEM_LOG_REL_CPU_FEATURE
457 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
458 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
459 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
460 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
461
462 /*
463 * The cache line flush size.
464 */
465 RT_ZERO(Caps);
466 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
467 if (FAILED(hrc))
468 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
469 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
470 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
471 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
472 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
473 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
474 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
475
476 RT_ZERO(Caps);
477 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
478 if (FAILED(hrc))
479 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
480 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
481 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
482 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
483 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
484 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
485 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
486
487
488 /*
489 * See if they've added more properties that we're not aware of.
490 */
491 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
492 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
493 {
494 static const struct
495 {
496 uint32_t iMin, iMax; } s_aUnknowns[] =
497 {
498 { 0x0004, 0x000f },
499 { 0x1003, 0x100f },
500 { 0x2000, 0x200f },
501 { 0x3000, 0x300f },
502 { 0x4000, 0x400f },
503 };
504 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
505 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
506 {
507 RT_ZERO(Caps);
508 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
509 if (SUCCEEDED(hrc))
510 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
511 }
512 }
513
514 /*
515 * For proper operation, we require CPUID exits.
516 */
517 /** @todo Any? */
518
519#undef NEM_LOG_REL_CAP_EX
520#undef NEM_LOG_REL_CAP_SUB_EX
521#undef NEM_LOG_REL_CAP_SUB
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Creates and sets up a Hyper-V (exo) partition.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param pErrInfo Where to always return error info.
532 */
533static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
534{
535 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
536 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
537
538 /*
539 * Create the partition.
540 */
541 WHV_PARTITION_HANDLE hPartition;
542 HRESULT hrc = WHvCreatePartition(&hPartition);
543 if (FAILED(hrc))
544 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
545 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
546
547 int rc;
548
549 /*
550 * Set partition properties, most importantly the CPU count.
551 */
552 /**
553 * @todo Someone at Microsoft please explain another weird API:
554 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
555 * argument rather than as part of the struct. That is so weird if you've
556 * used any other NT or windows API, including WHvGetCapability().
557 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
558 * technically only need 9 bytes for setting/getting
559 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
560 WHV_PARTITION_PROPERTY Property;
561 RT_ZERO(Property);
562 Property.ProcessorCount = pVM->cCpus;
563 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
564 if (SUCCEEDED(hrc))
565 {
566 RT_ZERO(Property);
567 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
568 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
569 if (SUCCEEDED(hrc))
570 {
571 /*
572 * We'll continue setup in nemR3NativeInitAfterCPUM.
573 */
574 pVM->nem.s.fCreatedEmts = false;
575 pVM->nem.s.hPartition = hPartition;
576 LogRel(("NEM: Created partition %p.\n", hPartition));
577 return VINF_SUCCESS;
578 }
579
580 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
581 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
582 Property.ExtendedVmExits.AsUINT64, hrc);
583 }
584 else
585 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
586 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
587 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
588 WHvDeletePartition(hPartition);
589
590 Assert(!pVM->nem.s.hPartitionDevice);
591 Assert(!pVM->nem.s.hPartition);
592 return rc;
593}
594
595
596static int nemR3NativeInitSetupVm(PVM pVM)
597{
598 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
599 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
600 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
601 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
602
603 /*
604 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
605 */
606 WHV_PARTITION_PROPERTY Property;
607 HRESULT hrc;
608
609#if 0
610 /* Not sure if we really need to set the vendor.
611 Update: Apparently we don't. WHvPartitionPropertyCodeProcessorVendor was removed in 17110. */
612 RT_ZERO(Property);
613 Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd
614 : WHvProcessorVendorIntel;
615 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property));
616 if (FAILED(hrc))
617 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
618 "Failed to set WHvPartitionPropertyCodeProcessorVendor to %u: %Rhrc (Last=%#x/%u)",
619 Property.ProcessorVendor, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
620#endif
621
622 /* Not sure if we really need to set the cache line flush size. */
623 RT_ZERO(Property);
624 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
625 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
626 if (FAILED(hrc))
627 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
628 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
629 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
630
631 /*
632 * Sync CPU features with CPUM.
633 */
634 /** @todo sync CPU features with CPUM. */
635
636 /* Set the partition property. */
637 RT_ZERO(Property);
638 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
639 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
640 if (FAILED(hrc))
641 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
642 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
643 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
644
645 /*
646 * Set up the partition.
647 *
648 * Seems like this is where the partition is actually instantiated and we get
649 * a handle to it.
650 */
651 hrc = WHvSetupPartition(hPartition);
652 if (FAILED(hrc))
653 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
654 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
655 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
656
657 /*
658 * Setup the EMTs.
659 */
660 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
661 {
662 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
663 if (FAILED(hrc))
664 {
665 NTSTATUS const rcNtLast = RTNtLastStatusValue();
666 DWORD const dwErrLast = RTNtLastErrorValue();
667 while (idCpu-- > 0)
668 {
669 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
670 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
671 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
672 RTNtLastErrorValue()));
673 }
674 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
675 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
676 }
677
678 if (idCpu == 0)
679 {
680 /* Need to query the ID registers and populate CPUM. */
681 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
682
683#if 1
684 WHV_REGISTER_NAME aenmNames[12];
685 WHV_REGISTER_VALUE aValues[12];
686 RT_ZERO(aValues);
687
688 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
689 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
690 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
691 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
692 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
693 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
694 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
695 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
696 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
697 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
698 aenmNames[10] = WHvArm64RegisterCtrEl0;
699 aenmNames[11] = WHvArm64RegisterDczidEl0;
700
701 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
702 AssertLogRelMsgReturn(SUCCEEDED(hrc),
703 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
704 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
705 , VERR_NEM_GET_REGISTERS_FAILED);
706
707 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
708 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
709 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
710 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
711 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
712 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
713 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
714 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
715 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
716 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
717 IdRegs.u64RegCtrEl0 = aValues[10].Reg64;
718 IdRegs.u64RegDczidEl0 = aValues[11].Reg64;
719#else
720 switch (pVM->nem.s.cPhysicalAddressWidth)
721 {
722 case 32: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_32BITS); break;
723 case 36: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_36BITS); break;
724 case 40: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_40BITS); break;
725 case 42: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_42BITS); break;
726 case 44: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_44BITS); break;
727 case 48: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_48BITS); break;
728 case 52: IdRegs.u64RegIdAa64Mmfr0El1 = RT_BF_SET(IdRegs.u64RegIdAa64Mmfr0El1, ARMV8_ID_AA64MMFR0_EL1_PARANGE, ARMV8_ID_AA64MMFR0_EL1_PARANGE_52BITS); break;
729 default: AssertReleaseFailed(); break;
730 }
731#endif
732
733 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
734 if (RT_FAILURE(rc))
735 return rc;
736 }
737 }
738 pVM->nem.s.fCreatedEmts = true;
739
740 LogRel(("NEM: Successfully set up partition\n"));
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Try initialize the native API.
747 *
748 * This may only do part of the job, more can be done in
749 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
750 *
751 * @returns VBox status code.
752 * @param pVM The cross context VM structure.
753 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
754 * the latter we'll fail if we cannot initialize.
755 * @param fForced Whether the HMForced flag is set and we should
756 * fail if we cannot initialize.
757 */
758int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
759{
760 g_uBuildNo = RTSystemGetNtBuildNo();
761
762 /*
763 * Error state.
764 * The error message will be non-empty on failure and 'rc' will be set too.
765 */
766 RTERRINFOSTATIC ErrInfo;
767 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
768 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
769 if (RT_SUCCESS(rc))
770 {
771 /*
772 * Check the capabilties of the hypervisor, starting with whether it's present.
773 */
774 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
775 if (RT_SUCCESS(rc))
776 {
777 /*
778 * Create and initialize a partition.
779 */
780 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
781 if (RT_SUCCESS(rc))
782 {
783 rc = nemR3NativeInitSetupVm(pVM);
784 if (RT_SUCCESS(rc))
785 {
786 /*
787 * Set ourselves as the execution engine and make config adjustments.
788 */
789 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
790 Log(("NEM: Marked active!\n"));
791 PGMR3EnableNemMode(pVM);
792
793 /*
794 * Register release statistics
795 */
796 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
797 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
798 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
799 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
800 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
801 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
802 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
803 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
804 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
805 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
806 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
807 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
808 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
809 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
810 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
811 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
812 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
813 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
814
815 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
816 {
817 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
818 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
819 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
820 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
821 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
822 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
823 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
824 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
825 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
826 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
827 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
828 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
829 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
830 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
831 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
832 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
833 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
834 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
835 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
836 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
837 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
838 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
839 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
840 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
841 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
842 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
843 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
844 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
845 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
846 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
847 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
848 }
849
850 if (!SUPR3IsDriverless())
851 {
852 PUVM pUVM = pVM->pUVM;
853 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
854 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
855 "/NEM/R0Stats/cPagesAvailable");
856 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
857 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
858 "/NEM/R0Stats/cPagesInUse");
859 }
860 }
861
862 }
863
864 }
865 }
866
867 /*
868 * We only fail if in forced mode, otherwise just log the complaint and return.
869 */
870 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
871 if ( (fForced || !fFallback)
872 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
873 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
874
875 if (RTErrInfoIsSet(pErrInfo))
876 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
877 return VINF_SUCCESS;
878}
879
880
881/**
882 * This is called after CPUMR3Init is done.
883 *
884 * @returns VBox status code.
885 * @param pVM The VM handle..
886 */
887int nemR3NativeInitAfterCPUM(PVM pVM)
888{
889 /*
890 * Validate sanity.
891 */
892 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
893
894 /** @todo */
895
896 /*
897 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
898 */
899 /** @todo stats */
900
901 /*
902 * Adjust features.
903 *
904 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
905 * the first init call.
906 */
907
908 return VINF_SUCCESS;
909}
910
911
912int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
913{
914 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
915 //AssertLogRel(fRet);
916
917 NOREF(pVM); NOREF(enmWhat);
918 return VINF_SUCCESS;
919}
920
921
922int nemR3NativeTerm(PVM pVM)
923{
924 /*
925 * Delete the partition.
926 */
927 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
928 pVM->nem.s.hPartition = NULL;
929 pVM->nem.s.hPartitionDevice = NULL;
930 if (hPartition != NULL)
931 {
932 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
933 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
934 while (idCpu-- > 0)
935 {
936 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
937 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
938 hPartition, idCpu, hrc, RTNtLastStatusValue(),
939 RTNtLastErrorValue()));
940 }
941 WHvDeletePartition(hPartition);
942 }
943 pVM->nem.s.fCreatedEmts = false;
944 return VINF_SUCCESS;
945}
946
947
948/**
949 * VM reset notification.
950 *
951 * @param pVM The cross context VM structure.
952 */
953void nemR3NativeReset(PVM pVM)
954{
955 RT_NOREF(pVM);
956}
957
958
959/**
960 * Reset CPU due to INIT IPI or hot (un)plugging.
961 *
962 * @param pVCpu The cross context virtual CPU structure of the CPU being
963 * reset.
964 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
965 */
966void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
967{
968 RT_NOREF(pVCpu, fInitIpi);
969}
970
971
972NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
973{
974 WHV_REGISTER_NAME aenmNames[128];
975 WHV_REGISTER_VALUE aValues[128];
976
977 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
978 if (!fWhat)
979 return VINF_SUCCESS;
980 uintptr_t iReg = 0;
981
982#define ADD_REG64(a_enmName, a_uValue) do { \
983 aenmNames[iReg] = (a_enmName); \
984 aValues[iReg].Reg128.High64 = 0; \
985 aValues[iReg].Reg64 = (a_uValue).x; \
986 iReg++; \
987 } while (0)
988#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
989 aenmNames[iReg] = (a_enmName); \
990 aValues[iReg].Reg128.High64 = 0; \
991 aValues[iReg].Reg64 = (a_uValue); \
992 iReg++; \
993 } while (0)
994#define ADD_REG128(a_enmName, a_uValue) do { \
995 aenmNames[iReg] = (a_enmName); \
996 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
997 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
998 iReg++; \
999 } while (0)
1000
1001 /* GPRs */
1002 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1003 {
1004 if (fWhat & CPUMCTX_EXTRN_X0)
1005 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1006 if (fWhat & CPUMCTX_EXTRN_X1)
1007 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1008 if (fWhat & CPUMCTX_EXTRN_X2)
1009 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1010 if (fWhat & CPUMCTX_EXTRN_X3)
1011 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1012 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1013 {
1014 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1015 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1016 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1017 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1018 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1019 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1020 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1021 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1022 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1023 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1024 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1025 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1026 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1027 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1028 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1029 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1030 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1031 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1032 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1033 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1034 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1035 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1036 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1037 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1038 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1039 }
1040 if (fWhat & CPUMCTX_EXTRN_LR)
1041 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1042 if (fWhat & CPUMCTX_EXTRN_FP)
1043 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1044 }
1045
1046 /* RIP & Flags */
1047 if (fWhat & CPUMCTX_EXTRN_PC)
1048 ADD_REG64_RAW(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc.u64);
1049 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1050 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1051
1052 /* Vector state. */
1053 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1054 {
1055 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1056 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1057 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1058 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1059 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1060 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1061 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1062 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1063 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1064 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1065 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1066 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1067 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1068 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1069 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1070 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1071 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1072 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1073 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1074 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1075 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1076 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1077 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1078 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1079 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1080 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1081 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1082 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1083 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1084 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1085 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1086 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1087 }
1088
1089#undef ADD_REG64
1090#undef ADD_REG64_RAW
1091#undef ADD_REG128
1092
1093 /*
1094 * Set the registers.
1095 */
1096 Assert(iReg < RT_ELEMENTS(aValues));
1097 Assert(iReg < RT_ELEMENTS(aenmNames));
1098 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1099 if (SUCCEEDED(hrc))
1100 {
1101 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1102 return VINF_SUCCESS;
1103 }
1104 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1105 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1106 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1107 return VERR_INTERNAL_ERROR;
1108}
1109
1110
1111NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1112{
1113 WHV_REGISTER_NAME aenmNames[128];
1114
1115 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1116 if (!fWhat)
1117 return VINF_SUCCESS;
1118
1119 uintptr_t iReg = 0;
1120
1121 /* GPRs */
1122 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1123 {
1124 if (fWhat & CPUMCTX_EXTRN_X0)
1125 aenmNames[iReg++] = WHvArm64RegisterX0;
1126 if (fWhat & CPUMCTX_EXTRN_X1)
1127 aenmNames[iReg++] = WHvArm64RegisterX1;
1128 if (fWhat & CPUMCTX_EXTRN_X2)
1129 aenmNames[iReg++] = WHvArm64RegisterX2;
1130 if (fWhat & CPUMCTX_EXTRN_X3)
1131 aenmNames[iReg++] = WHvArm64RegisterX3;
1132 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1133 {
1134 aenmNames[iReg++] = WHvArm64RegisterX4;
1135 aenmNames[iReg++] = WHvArm64RegisterX5;
1136 aenmNames[iReg++] = WHvArm64RegisterX6;
1137 aenmNames[iReg++] = WHvArm64RegisterX7;
1138 aenmNames[iReg++] = WHvArm64RegisterX8;
1139 aenmNames[iReg++] = WHvArm64RegisterX9;
1140 aenmNames[iReg++] = WHvArm64RegisterX10;
1141 aenmNames[iReg++] = WHvArm64RegisterX11;
1142 aenmNames[iReg++] = WHvArm64RegisterX12;
1143 aenmNames[iReg++] = WHvArm64RegisterX13;
1144 aenmNames[iReg++] = WHvArm64RegisterX14;
1145 aenmNames[iReg++] = WHvArm64RegisterX15;
1146 aenmNames[iReg++] = WHvArm64RegisterX16;
1147 aenmNames[iReg++] = WHvArm64RegisterX17;
1148 aenmNames[iReg++] = WHvArm64RegisterX18;
1149 aenmNames[iReg++] = WHvArm64RegisterX19;
1150 aenmNames[iReg++] = WHvArm64RegisterX20;
1151 aenmNames[iReg++] = WHvArm64RegisterX21;
1152 aenmNames[iReg++] = WHvArm64RegisterX22;
1153 aenmNames[iReg++] = WHvArm64RegisterX23;
1154 aenmNames[iReg++] = WHvArm64RegisterX24;
1155 aenmNames[iReg++] = WHvArm64RegisterX25;
1156 aenmNames[iReg++] = WHvArm64RegisterX26;
1157 aenmNames[iReg++] = WHvArm64RegisterX27;
1158 aenmNames[iReg++] = WHvArm64RegisterX28;
1159 }
1160 if (fWhat & CPUMCTX_EXTRN_LR)
1161 aenmNames[iReg++] = WHvArm64RegisterLr;
1162 if (fWhat & CPUMCTX_EXTRN_FP)
1163 aenmNames[iReg++] = WHvArm64RegisterFp;
1164 }
1165
1166 /* PC & Flags */
1167 if (fWhat & CPUMCTX_EXTRN_PC)
1168 aenmNames[iReg++] = WHvArm64RegisterPc;
1169 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1170 aenmNames[iReg++] = WHvArm64RegisterPstate;
1171 if (fWhat & CPUMCTX_EXTRN_SPSR)
1172 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1173 if (fWhat & CPUMCTX_EXTRN_ELR)
1174 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1175 if (fWhat & CPUMCTX_EXTRN_SP)
1176 {
1177 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1178 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1179 }
1180 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1181 {
1182 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1183 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1184 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1185 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1186 }
1187
1188 /* Vector state. */
1189 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1190 {
1191 aenmNames[iReg++] = WHvArm64RegisterQ0;
1192 aenmNames[iReg++] = WHvArm64RegisterQ1;
1193 aenmNames[iReg++] = WHvArm64RegisterQ2;
1194 aenmNames[iReg++] = WHvArm64RegisterQ3;
1195 aenmNames[iReg++] = WHvArm64RegisterQ4;
1196 aenmNames[iReg++] = WHvArm64RegisterQ5;
1197 aenmNames[iReg++] = WHvArm64RegisterQ6;
1198 aenmNames[iReg++] = WHvArm64RegisterQ7;
1199 aenmNames[iReg++] = WHvArm64RegisterQ8;
1200 aenmNames[iReg++] = WHvArm64RegisterQ9;
1201 aenmNames[iReg++] = WHvArm64RegisterQ10;
1202 aenmNames[iReg++] = WHvArm64RegisterQ11;
1203 aenmNames[iReg++] = WHvArm64RegisterQ12;
1204 aenmNames[iReg++] = WHvArm64RegisterQ13;
1205 aenmNames[iReg++] = WHvArm64RegisterQ14;
1206 aenmNames[iReg++] = WHvArm64RegisterQ15;
1207
1208 aenmNames[iReg++] = WHvArm64RegisterQ16;
1209 aenmNames[iReg++] = WHvArm64RegisterQ17;
1210 aenmNames[iReg++] = WHvArm64RegisterQ18;
1211 aenmNames[iReg++] = WHvArm64RegisterQ19;
1212 aenmNames[iReg++] = WHvArm64RegisterQ20;
1213 aenmNames[iReg++] = WHvArm64RegisterQ21;
1214 aenmNames[iReg++] = WHvArm64RegisterQ22;
1215 aenmNames[iReg++] = WHvArm64RegisterQ23;
1216 aenmNames[iReg++] = WHvArm64RegisterQ24;
1217 aenmNames[iReg++] = WHvArm64RegisterQ25;
1218 aenmNames[iReg++] = WHvArm64RegisterQ26;
1219 aenmNames[iReg++] = WHvArm64RegisterQ27;
1220 aenmNames[iReg++] = WHvArm64RegisterQ28;
1221 aenmNames[iReg++] = WHvArm64RegisterQ29;
1222 aenmNames[iReg++] = WHvArm64RegisterQ30;
1223 aenmNames[iReg++] = WHvArm64RegisterQ31;
1224 }
1225 if (fWhat & CPUMCTX_EXTRN_FPCR)
1226 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1227 if (fWhat & CPUMCTX_EXTRN_FPSR)
1228 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1229
1230 /* System registers. */
1231 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1232 {
1233 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1234 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1235 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1236 /** @todo */
1237 }
1238
1239#if 0
1240 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1241 {
1242 aenmNames[iReg++] = WHvArm64RegisterDbgbcr0El1;
1243 /** @todo */
1244 }
1245#endif
1246
1247 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1248 {
1249 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1250 /** @todo */
1251 }
1252
1253 size_t const cRegs = iReg;
1254 Assert(cRegs < RT_ELEMENTS(aenmNames));
1255
1256 /*
1257 * Get the registers.
1258 */
1259 WHV_REGISTER_VALUE aValues[128];
1260 RT_ZERO(aValues);
1261 Assert(RT_ELEMENTS(aValues) >= cRegs);
1262 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1263 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1264 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1265 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1266 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1267 , VERR_NEM_GET_REGISTERS_FAILED);
1268
1269 iReg = 0;
1270#define GET_REG64(a_DstVar, a_enmName) do { \
1271 Assert(aenmNames[iReg] == (a_enmName)); \
1272 (a_DstVar).x = aValues[iReg].Reg64; \
1273 iReg++; \
1274 } while (0)
1275#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1276 Assert(aenmNames[iReg] == (a_enmName)); \
1277 (a_DstVar) = aValues[iReg].Reg64; \
1278 iReg++; \
1279 } while (0)
1280#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1281 Assert(aenmNames[iReg] == (a_enmName)); \
1282 (a_DstVar).u64 = aValues[iReg].Reg64; \
1283 iReg++; \
1284 } while (0)
1285#define GET_REG128(a_DstVar, a_enmName) do { \
1286 Assert(aenmNames[iReg] == a_enmName); \
1287 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1288 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1289 iReg++; \
1290 } while (0)
1291
1292 /* GPRs */
1293 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1294 {
1295 if (fWhat & CPUMCTX_EXTRN_X0)
1296 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1297 if (fWhat & CPUMCTX_EXTRN_X1)
1298 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1299 if (fWhat & CPUMCTX_EXTRN_X2)
1300 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1301 if (fWhat & CPUMCTX_EXTRN_X3)
1302 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1303 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1304 {
1305 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1306 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1307 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1308 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1309 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1310 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1311 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1312 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1313 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1314 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1315 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1316 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1317 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1318 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1319 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1320 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1321 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1322 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1323 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1324 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1325 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1326 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1327 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1328 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1329 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1330 }
1331 if (fWhat & CPUMCTX_EXTRN_LR)
1332 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1333 if (fWhat & CPUMCTX_EXTRN_FP)
1334 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1335 }
1336
1337 /* RIP & Flags */
1338 if (fWhat & CPUMCTX_EXTRN_PC)
1339 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1340 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1341 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1342 if (fWhat & CPUMCTX_EXTRN_SPSR)
1343 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1344 if (fWhat & CPUMCTX_EXTRN_ELR)
1345 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1346 if (fWhat & CPUMCTX_EXTRN_SP)
1347 {
1348 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1349 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1350 }
1351 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1352 {
1353 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1354 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1355 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1356 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1357 }
1358
1359 /* Vector state. */
1360 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1361 {
1362 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1363 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1364 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1365 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1366 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1367 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1368 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1369 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1370 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1371 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1372 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1373 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1374 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1375 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1376 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1377 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1378
1379 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1380 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1381 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1382 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1383 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1384 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1385 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1386 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1387 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1388 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1389 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1390 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1391 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1392 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1393 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1394 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1395 }
1396 if (fWhat & CPUMCTX_EXTRN_FPCR)
1397 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1398 if (fWhat & CPUMCTX_EXTRN_FPSR)
1399 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1400
1401 /* System registers. */
1402 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1403 {
1404 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1405 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1406 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1407 /** @todo */
1408 }
1409
1410#if 0
1411 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1412 {
1413 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[0].Ctrl, WHvArm64RegisterDbgbcr0El1);
1414 /** @todo */
1415 }
1416#endif
1417
1418 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1419 {
1420 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1421 /** @todo */
1422 }
1423
1424 /* Almost done, just update extrn flags. */
1425 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1426 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1427 pVCpu->cpum.GstCtx.fExtrn = 0;
1428
1429 return VINF_SUCCESS;
1430}
1431
1432
1433/**
1434 * Interface for importing state on demand (used by IEM).
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context CPU structure.
1438 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1439 */
1440VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1441{
1442 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1443 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1444}
1445
1446
1447/**
1448 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1449 *
1450 * @returns VBox status code.
1451 * @param pVCpu The cross context CPU structure.
1452 * @param pcTicks Where to return the CPU tick count.
1453 * @param puAux Where to return the TSC_AUX register value.
1454 */
1455VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1456{
1457 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1458
1459 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1460 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1461 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1462
1463#pragma message("NEMHCQueryCpuTick: Implement it!")
1464#if 0 /** @todo */
1465 /* Call the offical API. */
1466 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1467 WHV_REGISTER_VALUE aValues[2] = { { {0, 0} }, { {0, 0} } };
1468 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1469 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1470 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1471 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1472 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1473 , VERR_NEM_GET_REGISTERS_FAILED);
1474 *pcTicks = aValues[0].Reg64;
1475 if (puAux)
1476 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[1].Reg64 : CPUMGetGuestTscAux(pVCpu);
1477#else
1478 RT_NOREF(pVCpu, pcTicks, puAux);
1479#endif
1480 return VINF_SUCCESS;
1481}
1482
1483
1484/**
1485 * Resumes CPU clock (TSC) on all virtual CPUs.
1486 *
1487 * This is called by TM when the VM is started, restored, resumed or similar.
1488 *
1489 * @returns VBox status code.
1490 * @param pVM The cross context VM structure.
1491 * @param pVCpu The cross context CPU structure of the calling EMT.
1492 * @param uPausedTscValue The TSC value at the time of pausing.
1493 */
1494VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1495{
1496 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1497 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1498
1499 /*
1500 * Call the offical API to do the job.
1501 */
1502 if (pVM->cCpus > 1)
1503 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1504
1505#pragma message("NEMHCResumeCpuTickOnAll: Implement it!")
1506#if 0 /** @todo */
1507 /* Start with the first CPU. */
1508 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1509 WHV_REGISTER_VALUE Value = { {0, 0} };
1510 Value.Reg64 = uPausedTscValue;
1511 uint64_t const uFirstTsc = ASMReadTSC();
1512 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1513 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1514 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1515 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1516 , VERR_NEM_SET_TSC);
1517
1518 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1519 that we don't introduce too much drift here. */
1520 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1521 {
1522 Assert(enmName == WHvX64RegisterTsc);
1523 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1524 Value.Reg64 = uPausedTscValue + offDelta;
1525 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1526 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1527 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1528 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1529 , VERR_NEM_SET_TSC);
1530 }
1531#else
1532 RT_NOREF(uPausedTscValue);
1533#endif
1534
1535 return VINF_SUCCESS;
1536}
1537
1538
1539#ifdef LOG_ENABLED
1540/**
1541 * Logs the current CPU state.
1542 */
1543static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1544{
1545 if (LogIs3Enabled())
1546 {
1547 char szRegs[4096];
1548 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1549 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1550 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1551 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1552 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1553 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1554 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1555 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1556 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1557 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1558 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1559 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1560 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1561 "vbar_el1=%016VR{vbar_el1}\n"
1562 );
1563 char szInstr[256]; RT_ZERO(szInstr);
1564#if 0
1565 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1566 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1567 szInstr, sizeof(szInstr), NULL);
1568#endif
1569 Log3(("%s%s\n", szRegs, szInstr));
1570 }
1571}
1572#endif /* LOG_ENABLED */
1573
1574
1575/**
1576 * Copies register state from the (common) exit context.
1577 *
1578 * ASSUMES no state copied yet.
1579 *
1580 * @param pVCpu The cross context per CPU structure.
1581 * @param pMsgHdr The common message header.
1582 */
1583DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1584{
1585 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1586 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1587
1588 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1589 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1590
1591 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1592}
1593
1594
1595/**
1596 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1597 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1598 */
1599typedef struct NEMHCWINHMACPCCSTATE
1600{
1601 /** Input: Write access. */
1602 bool fWriteAccess;
1603 /** Output: Set if we did something. */
1604 bool fDidSomething;
1605 /** Output: Set it we should resume. */
1606 bool fCanResume;
1607} NEMHCWINHMACPCCSTATE;
1608
1609/**
1610 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1611 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1612 * NEMHCWINHMACPCCSTATE structure. }
1613 */
1614NEM_TMPL_STATIC DECLCALLBACK(int)
1615nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1616{
1617 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1618 pState->fDidSomething = false;
1619 pState->fCanResume = false;
1620
1621 /* If A20 is disabled, we may need to make another query on the masked
1622 page to get the correct protection information. */
1623 uint8_t u2State = pInfo->u2NemState;
1624 RTGCPHYS GCPhysSrc = GCPhys;
1625
1626 /*
1627 * Consolidate current page state with actual page protection and access type.
1628 * We don't really consider downgrades here, as they shouldn't happen.
1629 */
1630 int rc;
1631 switch (u2State)
1632 {
1633 case NEM_WIN_PAGE_STATE_UNMAPPED:
1634 case NEM_WIN_PAGE_STATE_NOT_SET:
1635 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1636 {
1637 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1638 return VINF_SUCCESS;
1639 }
1640
1641 /* Don't bother remapping it if it's a write request to a non-writable page. */
1642 if ( pState->fWriteAccess
1643 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1644 {
1645 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1646 return VINF_SUCCESS;
1647 }
1648
1649 /* Map the page. */
1650 rc = nemHCNativeSetPhysPage(pVM,
1651 pVCpu,
1652 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1653 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1654 pInfo->fNemProt,
1655 &u2State,
1656 true /*fBackingState*/);
1657 pInfo->u2NemState = u2State;
1658 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1659 GCPhys, g_apszPageStates[u2State], rc));
1660 pState->fDidSomething = true;
1661 pState->fCanResume = true;
1662 return rc;
1663
1664 case NEM_WIN_PAGE_STATE_READABLE:
1665 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1666 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1667 {
1668 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1669 return VINF_SUCCESS;
1670 }
1671
1672 break;
1673
1674 case NEM_WIN_PAGE_STATE_WRITABLE:
1675 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1676 {
1677 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1678 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1679 else
1680 {
1681 pState->fCanResume = true;
1682 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1683 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1684 }
1685 return VINF_SUCCESS;
1686 }
1687 break;
1688
1689 default:
1690 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1691 }
1692
1693 /*
1694 * Unmap and restart the instruction.
1695 * If this fails, which it does every so often, just unmap everything for now.
1696 */
1697 /** @todo figure out whether we mess up the state or if it's WHv. */
1698 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1699 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1700 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1701 if (SUCCEEDED(hrc))
1702 {
1703 pState->fDidSomething = true;
1704 pState->fCanResume = true;
1705 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1706 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1707 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1708 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1709 return VINF_SUCCESS;
1710 }
1711 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1712 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
1713 GCPhys, g_apszPageStates[u2State], hrc, hrc));
1714 return VERR_NEM_UNMAP_PAGES_FAILED;
1715}
1716
1717
1718/**
1719 * Returns the byte size from the given access SAS value.
1720 *
1721 * @returns Number of bytes to transfer.
1722 * @param uSas The SAS value to convert.
1723 */
1724DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
1725{
1726 switch (uSas)
1727 {
1728 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1729 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1730 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1731 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1732 default:
1733 AssertReleaseFailed();
1734 }
1735
1736 return 0;
1737}
1738
1739
1740/**
1741 * Sets the given general purpose register to the given value.
1742 *
1743 * @param pVCpu The cross context virtual CPU structure of the
1744 * calling EMT.
1745 * @param uReg The register index.
1746 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1747 * @param fSignExtend Flag whether to sign extend the value.
1748 * @param u64Val The value.
1749 */
1750DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1751{
1752 AssertReturnVoid(uReg < 31);
1753
1754 if (f64BitReg)
1755 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1756 else
1757 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1758
1759 /* Mark the register as not extern anymore. */
1760 switch (uReg)
1761 {
1762 case 0:
1763 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1764 break;
1765 case 1:
1766 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1767 break;
1768 case 2:
1769 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1770 break;
1771 case 3:
1772 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1773 break;
1774 default:
1775 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1776 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1777 }
1778}
1779
1780
1781/**
1782 * Gets the given general purpose register and returns the value.
1783 *
1784 * @returns Value from the given register.
1785 * @param pVCpu The cross context virtual CPU structure of the
1786 * calling EMT.
1787 * @param uReg The register index.
1788 */
1789DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1790{
1791 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1792
1793 if (uReg == ARMV8_AARCH64_REG_ZR)
1794 return 0;
1795
1796 /** @todo Import the register if extern. */
1797 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1798
1799 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1800}
1801
1802
1803/**
1804 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1805 *
1806 * @returns Strict VBox status code.
1807 * @param pVM The cross context VM structure.
1808 * @param pVCpu The cross context per CPU structure.
1809 * @param pExit The VM exit information to handle.
1810 * @sa nemHCWinHandleMessageMemory
1811 */
1812NEM_TMPL_STATIC VBOXSTRICTRC
1813nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1814{
1815 uint64_t const uHostTsc = ASMReadTSC();
1816 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
1817
1818 /*
1819 * Ask PGM for information about the given GCPhys. We need to check if we're
1820 * out of sync first.
1821 */
1822 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
1823 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
1824 PGMPHYSNEMPAGEINFO Info;
1825 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1826 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1827 if (RT_SUCCESS(rc))
1828 {
1829 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1830 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1831 {
1832 if (State.fCanResume)
1833 {
1834 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1835 pVCpu->idCpu, pHdr->Pc,
1836 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1837 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1838 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1839 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
1840 pHdr->Pc, uHostTsc);
1841 return VINF_SUCCESS;
1842 }
1843 }
1844 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1845 pVCpu->idCpu, pHdr->Pc,
1846 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1847 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1848 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1849 }
1850 else
1851 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
1852 pVCpu->idCpu, pHdr->Pc,
1853 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
1854 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
1855
1856 /*
1857 * Emulate the memory access, either access handler or special memory.
1858 */
1859 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
1860 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1861 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1862 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1863 pHdr->Pc, uHostTsc);
1864#pragma message("nemR3WinHandleExitMemory: Why not calling nemR3WinCopyStateFromArmHeader?")
1865/** @todo r=bird: Why is nemR3WinCopyStateFromArmHeader commented out? */
1866 //nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
1867 RT_NOREF_PV(pExitRec);
1868 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1869 AssertRCReturn(rc, rc);
1870
1871#ifdef LOG_ENABLED
1872 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
1873 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
1874#endif
1875 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
1876 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
1877 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1878 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1879 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1880 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1881 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1882 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1883 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1884 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
1885 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
1886 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
1887
1888 RT_NOREF(fL2Fault);
1889
1890 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
1891
1892 EMHistoryAddExit(pVCpu,
1893 fWrite
1894 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1895 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1896 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1897
1898 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1899 uint64_t u64Val = 0;
1900 if (fWrite)
1901 {
1902 u64Val = nemR3WinGetGReg(pVCpu, uReg);
1903 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1904 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1905 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
1906 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1907 }
1908 else
1909 {
1910 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1911 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1912 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
1913 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1914 if (rcStrict == VINF_SUCCESS)
1915 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1916 }
1917
1918 if (rcStrict == VINF_SUCCESS)
1919 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
1920
1921 return rcStrict;
1922}
1923
1924
1925/**
1926 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
1927 *
1928 * @returns Strict VBox status code.
1929 * @param pVM The cross context VM structure.
1930 * @param pVCpu The cross context per CPU structure.
1931 * @param pExit The VM exit information to handle.
1932 * @sa nemHCWinHandleMessageUnrecoverableException
1933 */
1934NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1935{
1936#if 0
1937 /*
1938 * Just copy the state we've got and handle it in the loop for now.
1939 */
1940 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
1941 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
1942 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
1943 RT_NOREF_PV(pVM);
1944 return VINF_EM_TRIPLE_FAULT;
1945#else
1946 /*
1947 * Let IEM decide whether this is really it.
1948 */
1949 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
1950 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
1951 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
1952 AssertReleaseFailed();
1953 RT_NOREF_PV(pVM);
1954 return VINF_SUCCESS;
1955#endif
1956}
1957
1958
1959/**
1960 * Handles VM exits.
1961 *
1962 * @returns Strict VBox status code.
1963 * @param pVM The cross context VM structure.
1964 * @param pVCpu The cross context per CPU structure.
1965 * @param pExit The VM exit information to handle.
1966 * @sa nemHCWinHandleMessage
1967 */
1968NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1969{
1970 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1971 AssertRCReturn(rc, rc);
1972
1973#ifdef LOG_ENABLED
1974 if (LogIs3Enabled())
1975 nemR3WinLogState(pVM, pVCpu);
1976#endif
1977
1978 switch (pExit->ExitReason)
1979 {
1980 case WHvRunVpExitReasonUnmappedGpa:
1981 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
1982 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
1983
1984 case WHvRunVpExitReasonCanceled:
1985 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
1986 return VINF_SUCCESS;
1987
1988 case WHvRunVpExitReasonUnrecoverableException:
1989 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
1990 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
1991
1992 case WHvRunVpExitReasonUnsupportedFeature:
1993 case WHvRunVpExitReasonInvalidVpRegisterValue:
1994 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
1995 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
1996 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
1997
1998 /* Undesired exits: */
1999 case WHvRunVpExitReasonNone:
2000 default:
2001 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2002 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2003 }
2004}
2005
2006
2007VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2008{
2009 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2010#ifdef LOG_ENABLED
2011 if (LogIs3Enabled())
2012 nemR3WinLogState(pVM, pVCpu);
2013#endif
2014
2015 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2016 {
2017 /*
2018 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2019 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2020 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2021 */
2022 PCCPUMIDREGS pIdRegsGst = NULL;
2023 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2024 AssertRCReturn(rc, rc);
2025
2026 WHV_REGISTER_NAME aenmNames[12];
2027 WHV_REGISTER_VALUE aValues[12];
2028
2029 uint32_t iReg = 0;
2030#define ADD_REG64(a_enmName, a_uValue) do { \
2031 aenmNames[iReg] = (a_enmName); \
2032 aValues[iReg].Reg128.High64 = 0; \
2033 aValues[iReg].Reg64 = (a_uValue); \
2034 iReg++; \
2035 } while (0)
2036
2037
2038 ADD_REG64(WHvArm64RegisterIdAa64Mmfr0El1, pIdRegsGst->u64RegIdAa64Mmfr0El1);
2039#undef ADD_REG64
2040
2041 //HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
2042 //AssertReturn(SUCCEEDED(hrc), VERR_NEM_IPE_9);
2043
2044 pVCpu->nem.s.fIdRegsSynced = true;
2045 }
2046
2047 /*
2048 * Try switch to NEM runloop state.
2049 */
2050 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2051 { /* likely */ }
2052 else
2053 {
2054 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2055 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2056 return VINF_SUCCESS;
2057 }
2058
2059 /*
2060 * The run loop.
2061 *
2062 * Current approach to state updating to use the sledgehammer and sync
2063 * everything every time. This will be optimized later.
2064 */
2065 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2066// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
2067// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
2068// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
2069 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2070 for (unsigned iLoop = 0;; iLoop++)
2071 {
2072 /*
2073 * Pending interrupts or such? Need to check and deal with this prior
2074 * to the state syncing.
2075 */
2076#if 0
2077 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_UPDATE_IRQ))
2078 {
2079 /* Try inject interrupt. */
2080 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
2081 if (rcStrict == VINF_SUCCESS)
2082 { /* likely */ }
2083 else
2084 {
2085 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2086 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2087 break;
2088 }
2089 }
2090#endif
2091
2092 /* Ensure that Hyper-V has the whole state. */
2093 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2094 AssertRCReturn(rc2, rc2);
2095
2096 /*
2097 * Poll timers and run for a bit.
2098 *
2099 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2100 * so we take the time of the next timer event and uses that as a deadline.
2101 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2102 */
2103 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2104 * the whole polling job when timers have changed... */
2105 uint64_t offDeltaIgnored;
2106 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2107 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2108 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2109 {
2110 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2111 {
2112#if 0 //def LOG_ENABLED
2113 if (LogIsFlowEnabled())
2114 {
2115 static const WHV_REGISTER_NAME s_aNames[6] = { WHvX64RegisterCs, WHvX64RegisterRip, WHvX64RegisterRflags,
2116 WHvX64RegisterSs, WHvX64RegisterRsp, WHvX64RegisterCr0 };
2117 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { {{0, 0} } };
2118 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2119 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
2120 pVCpu->idCpu, aRegs[0].Segment.Selector, aRegs[1].Reg64, RT_BOOL(aRegs[2].Reg64 & X86_EFL_IF),
2121 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64));
2122 }
2123#endif
2124 WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2125 TMNotifyStartOfExecution(pVM, pVCpu);
2126
2127 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2128
2129 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2130 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2131#ifdef LOG_ENABLED
2132 LogFlow(("NEM/%u: Exit @ @todo Reason=%#x\n", pVCpu->idCpu, ExitReason.ExitReason));
2133#endif
2134 if (SUCCEEDED(hrc))
2135 {
2136 /*
2137 * Deal with the message.
2138 */
2139 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2140 if (rcStrict == VINF_SUCCESS)
2141 { /* hopefully likely */ }
2142 else
2143 {
2144 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2145 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2146 break;
2147 }
2148 }
2149 else
2150 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2151 pVCpu->idCpu, hrc, GetLastError()),
2152 VERR_NEM_IPE_0);
2153
2154 /*
2155 * If no relevant FFs are pending, loop.
2156 */
2157 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2158 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2159 continue;
2160
2161 /** @todo Try handle pending flags, not just return to EM loops. Take care
2162 * not to set important RCs here unless we've handled a message. */
2163 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2164 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2165 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2166 }
2167 else
2168 {
2169 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2170 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2171 }
2172 }
2173 else
2174 {
2175 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2176 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2177 }
2178 break;
2179 } /* the run loop */
2180
2181
2182 /*
2183 * If the CPU is running, make sure to stop it before we try sync back the
2184 * state and return to EM. We don't sync back the whole state if we can help it.
2185 */
2186 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2187 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2188
2189 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2190 {
2191 /* Try anticipate what we might need. */
2192 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2193 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2194 || RT_FAILURE(rcStrict))
2195 fImport = CPUMCTX_EXTRN_ALL;
2196 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2197 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2198
2199 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2200 {
2201 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2202 if (RT_SUCCESS(rc2))
2203 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2204 else if (RT_SUCCESS(rcStrict))
2205 rcStrict = rc2;
2206 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2207 pVCpu->cpum.GstCtx.fExtrn = 0;
2208 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2209 }
2210 else
2211 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2212 }
2213 else
2214 {
2215 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2216 pVCpu->cpum.GstCtx.fExtrn = 0;
2217 }
2218
2219#if 0
2220 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel,
2221 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, VBOXSTRICTRC_VAL(rcStrict) ));
2222#endif
2223 return rcStrict;
2224}
2225
2226
2227VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2228{
2229 Assert(VM_IS_NEM_ENABLED(pVM));
2230 RT_NOREF(pVM, pVCpu);
2231 return true;
2232}
2233
2234
2235bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2236{
2237 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2238 return false;
2239}
2240
2241
2242void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2243{
2244 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2245 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2246 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2247 RT_NOREF_PV(hrc);
2248 RT_NOREF_PV(fFlags);
2249}
2250
2251
2252DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2253{
2254 RT_NOREF(pVM, fUseDebugLoop);
2255 return false;
2256}
2257
2258
2259DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2260{
2261 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2262 return false;
2263}
2264
2265
2266DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2267{
2268 PGMPAGEMAPLOCK Lock;
2269 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2270 if (RT_SUCCESS(rc))
2271 PGMPhysReleasePageMappingLock(pVM, &Lock);
2272 return rc;
2273}
2274
2275
2276DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2277{
2278 PGMPAGEMAPLOCK Lock;
2279 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2280 if (RT_SUCCESS(rc))
2281 PGMPhysReleasePageMappingLock(pVM, &Lock);
2282 return rc;
2283}
2284
2285
2286VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2287 uint8_t *pu2State, uint32_t *puNemRange)
2288{
2289 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2290 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2291
2292 *pu2State = UINT8_MAX;
2293 RT_NOREF(puNemRange);
2294
2295 if (pvR3)
2296 {
2297 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2298 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2299 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2300 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2301 if (SUCCEEDED(hrc))
2302 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2303 else
2304 {
2305 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2306 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2307 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2308 return VERR_NEM_MAP_PAGES_FAILED;
2309 }
2310 }
2311 return VINF_SUCCESS;
2312}
2313
2314
2315VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2316{
2317 RT_NOREF(pVM);
2318 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2319}
2320
2321
2322VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2323 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2324{
2325 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2326 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2327 RT_NOREF(puNemRange);
2328
2329 /*
2330 * Unmap the RAM we're replacing.
2331 */
2332 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2333 {
2334 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2335 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2336 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2337 if (SUCCEEDED(hrc))
2338 { /* likely */ }
2339 else if (pvMmio2)
2340 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2341 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2342 else
2343 {
2344 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2345 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2346 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2347 return VERR_NEM_UNMAP_PAGES_FAILED;
2348 }
2349 }
2350
2351 /*
2352 * Map MMIO2 if any.
2353 */
2354 if (pvMmio2)
2355 {
2356 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2357 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2358 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2359 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2360 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2361 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2362 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2363 if (SUCCEEDED(hrc))
2364 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2365 else
2366 {
2367 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2368 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2369 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2370 return VERR_NEM_MAP_PAGES_FAILED;
2371 }
2372 }
2373 else
2374 {
2375 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2376 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2377 }
2378 RT_NOREF(pvRam);
2379 return VINF_SUCCESS;
2380}
2381
2382
2383VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2384 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2385{
2386 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2387 return VINF_SUCCESS;
2388}
2389
2390
2391VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2392 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2393{
2394 int rc = VINF_SUCCESS;
2395 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2396 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2397
2398 /*
2399 * Unmap the MMIO2 pages.
2400 */
2401 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2402 * we may have more stuff to unmap even in case of pure MMIO... */
2403 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2404 {
2405 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2406 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2407 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2408 if (FAILED(hrc))
2409 {
2410 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2411 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2412 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2413 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2414 }
2415 }
2416
2417 /*
2418 * Restore the RAM we replaced.
2419 */
2420 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2421 {
2422 AssertPtr(pvRam);
2423 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2424 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2425 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2426 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2427 if (SUCCEEDED(hrc))
2428 { /* likely */ }
2429 else
2430 {
2431 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2432 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2433 rc = VERR_NEM_MAP_PAGES_FAILED;
2434 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2435 }
2436 if (pu2State)
2437 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2438 }
2439 /* Mark the pages as unmapped if relevant. */
2440 else if (pu2State)
2441 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2442
2443 RT_NOREF(pvMmio2, puNemRange);
2444 return rc;
2445}
2446
2447
2448VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2449 void *pvBitmap, size_t cbBitmap)
2450{
2451 Assert(VM_IS_NEM_ENABLED(pVM));
2452 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2453 Assert(cbBitmap == (uint32_t)cbBitmap);
2454 RT_NOREF(uNemRange);
2455
2456 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2457 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2458 if (SUCCEEDED(hrc))
2459 return VINF_SUCCESS;
2460
2461 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2462 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2463 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2464}
2465
2466
2467VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2468 uint8_t *pu2State, uint32_t *puNemRange)
2469{
2470 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2471 *pu2State = UINT8_MAX;
2472 *puNemRange = 0;
2473
2474#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2475 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2476 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2477 {
2478 const void *pvPage;
2479 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2480 if (RT_SUCCESS(rc))
2481 {
2482 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2483 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2484 if (SUCCEEDED(hrc))
2485 { /* likely */ }
2486 else
2487 {
2488 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2489 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2490 return VERR_NEM_INIT_FAILED;
2491 }
2492 }
2493 else
2494 {
2495 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2496 return rc;
2497 }
2498 }
2499 RT_NOREF_PV(fFlags);
2500#else
2501 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2502#endif
2503 return VINF_SUCCESS;
2504}
2505
2506
2507VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2508 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2509{
2510 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2511 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2512 *pu2State = UINT8_MAX;
2513
2514 /*
2515 * (Re-)map readonly.
2516 */
2517 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2518 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2519 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2520 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2521 if (SUCCEEDED(hrc))
2522 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2523 else
2524 {
2525 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2526 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2527 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2528 return VERR_NEM_MAP_PAGES_FAILED;
2529 }
2530 RT_NOREF(fFlags, puNemRange);
2531 return VINF_SUCCESS;
2532}
2533
2534VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2535{
2536 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2537 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
2538 RT_NOREF(pVCpu, fEnabled);
2539}
2540
2541
2542void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2543{
2544 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2545 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2546}
2547
2548
2549VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2550 RTR3PTR pvMemR3, uint8_t *pu2State)
2551{
2552 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2553 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2554
2555 *pu2State = UINT8_MAX;
2556 if (pvMemR3)
2557 {
2558 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2559 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
2560 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2561 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2562 if (SUCCEEDED(hrc))
2563 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2564 else
2565 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
2566 pvMemR3, GCPhys, cb, hrc));
2567 }
2568 RT_NOREF(enmKind);
2569}
2570
2571
2572void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2573 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2574{
2575 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2576 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2577 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2578}
2579
2580
2581/**
2582 * Worker that maps pages into Hyper-V.
2583 *
2584 * This is used by the PGM physical page notifications as well as the memory
2585 * access VMEXIT handlers.
2586 *
2587 * @returns VBox status code.
2588 * @param pVM The cross context VM structure.
2589 * @param pVCpu The cross context virtual CPU structure of the
2590 * calling EMT.
2591 * @param GCPhysSrc The source page address.
2592 * @param GCPhysDst The hyper-V destination page. This may differ from
2593 * GCPhysSrc when A20 is disabled.
2594 * @param fPageProt NEM_PAGE_PROT_XXX.
2595 * @param pu2State Our page state (input/output).
2596 * @param fBackingChanged Set if the page backing is being changed.
2597 * @thread EMT(pVCpu)
2598 */
2599NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
2600 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
2601{
2602 /*
2603 * Looks like we need to unmap a page before we can change the backing
2604 * or even modify the protection. This is going to be *REALLY* efficient.
2605 * PGM lends us two bits to keep track of the state here.
2606 */
2607 RT_NOREF(pVCpu);
2608 uint8_t const u2OldState = *pu2State;
2609 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
2610 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
2611 if ( fBackingChanged
2612 || u2NewState != u2OldState)
2613 {
2614 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
2615 {
2616 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2617 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
2618 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2619 if (SUCCEEDED(hrc))
2620 {
2621 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2622 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2623 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2624 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
2625 {
2626 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
2627 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2628 return VINF_SUCCESS;
2629 }
2630 }
2631 else
2632 {
2633 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2634 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2635 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2636 return VERR_NEM_INIT_FAILED;
2637 }
2638 }
2639 }
2640
2641 /*
2642 * Writeable mapping?
2643 */
2644 if (fPageProt & NEM_PAGE_PROT_WRITE)
2645 {
2646 void *pvPage;
2647 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
2648 if (RT_SUCCESS(rc))
2649 {
2650 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
2651 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2652 if (SUCCEEDED(hrc))
2653 {
2654 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2655 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
2656 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2657 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2658 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2659 return VINF_SUCCESS;
2660 }
2661 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2662 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2663 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2664 return VERR_NEM_INIT_FAILED;
2665 }
2666 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2667 return rc;
2668 }
2669
2670 if (fPageProt & NEM_PAGE_PROT_READ)
2671 {
2672 const void *pvPage;
2673 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
2674 if (RT_SUCCESS(rc))
2675 {
2676 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
2677 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
2678 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2679 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
2680 if (SUCCEEDED(hrc))
2681 {
2682 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2683 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
2684 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2685 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2686 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2687 return VINF_SUCCESS;
2688 }
2689 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2690 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2691 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2692 return VERR_NEM_INIT_FAILED;
2693 }
2694 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2695 return rc;
2696 }
2697
2698 /* We already unmapped it above. */
2699 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2700 return VINF_SUCCESS;
2701}
2702
2703
2704NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
2705{
2706 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
2707 {
2708 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
2709 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2710 return VINF_SUCCESS;
2711 }
2712
2713 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2714 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
2715 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2716 if (SUCCEEDED(hrc))
2717 {
2718 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2719 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2720 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2721 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
2722 return VINF_SUCCESS;
2723 }
2724 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2725 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
2726 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2727 return VERR_NEM_IPE_6;
2728}
2729
2730
2731int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2732 PGMPAGETYPE enmType, uint8_t *pu2State)
2733{
2734 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2735 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2736 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
2737
2738 int rc;
2739 RT_NOREF_PV(fPageProt);
2740 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2741 return rc;
2742}
2743
2744
2745VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2746 PGMPAGETYPE enmType, uint8_t *pu2State)
2747{
2748 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2749 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2750 Assert(VM_IS_NEM_ENABLED(pVM));
2751 RT_NOREF(HCPhys, enmType, pvR3);
2752
2753 RT_NOREF_PV(fPageProt);
2754 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2755}
2756
2757
2758VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2759 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2760{
2761 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
2762 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
2763 Assert(VM_IS_NEM_ENABLED(pVM));
2764 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
2765
2766 RT_NOREF_PV(fPageProt);
2767 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2768}
2769
2770
2771/**
2772 * Returns features supported by the NEM backend.
2773 *
2774 * @returns Flags of features supported by the native NEM backend.
2775 * @param pVM The cross context VM structure.
2776 */
2777VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2778{
2779 RT_NOREF(pVM);
2780 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
2781 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
2782}
2783
2784
2785/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
2786 *
2787 * Open questions:
2788 * - Why can't one read and write WHvArm64RegisterId*
2789 * - WHvArm64RegisterDbgbcr0El1 is not readable?
2790 * - Getting notified about system register reads/writes (GIC)?
2791 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
2792 * - Handling of (vTimer) interrupts, how is WHvRequestInterrupt() supposed to be used?
2793 */
2794
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette