VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 72816

Last change on this file since 72816 was 72689, checked in by vboxsync, 6 years ago

NEM/win: Redid the IoCtlMessageSlotHandleAndGetNext stuff during nemHCWinStopCpu(). Ran into issue with DRx syncing, added todo. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.4 KB
Line 
1/* $Id: NEMR0Native-win.cpp 72689 2018-06-26 02:37:40Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44
45
46/* Assert compile context sanity. */
47#ifndef RT_OS_WINDOWS
48# error "Windows only file!"
49#endif
50#ifndef RT_ARCH_AMD64
51# error "AMD64 only file!"
52#endif
53
54
55/*********************************************************************************************************************************
56* Internal Functions *
57*********************************************************************************************************************************/
58typedef uint32_t DWORD; /* for winerror.h constants */
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
65
66/**
67 * WinHvr.sys!WinHvDepositMemory
68 *
69 * This API will try allocates cPages on IdealNode and deposit it to the
70 * hypervisor for use with the given partition. The memory will be freed when
71 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
72 *
73 * Apparently node numbers above 64 has a different meaning.
74 */
75static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
76
77
78/*********************************************************************************************************************************
79* Internal Functions *
80*********************************************************************************************************************************/
81NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
82 uint32_t cPages, uint32_t fFlags);
83NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
84NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
85NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat);
86NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
87NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
88DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
89 void *pvOutput, uint32_t cbOutput);
90
91
92/*
93 * Instantate the code we share with ring-0.
94 */
95#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
96
97/**
98 * Worker for NEMR0InitVM that allocates a hypercall page.
99 *
100 * @returns VBox status code.
101 * @param pHypercallData The hypercall data page to initialize.
102 */
103static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
104{
105 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
106 if (RT_SUCCESS(rc))
107 {
108 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
109 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
110 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
111 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
112 if (RT_SUCCESS(rc))
113 return VINF_SUCCESS;
114
115 /* bail out */
116 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
117 }
118 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
119 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
120 pHypercallData->pbPage = NULL;
121 return rc;
122}
123
124/**
125 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
126 *
127 * @param pHypercallData The hypercall data page to uninitialize.
128 */
129static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
130{
131 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
132 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
133 if (pHypercallData->pbPage != NULL)
134 {
135 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
136 pHypercallData->pbPage = NULL;
137 }
138 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
139 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
140}
141
142
143/**
144 * Called by NEMR3Init to make sure we've got what we need.
145 *
146 * @returns VBox status code.
147 * @param pGVM The ring-0 VM handle.
148 * @param pVM The cross context VM handle.
149 * @thread EMT(0)
150 */
151VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
152{
153 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
154 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
155
156 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
157 AssertRCReturn(rc, rc);
158
159 /*
160 * We want to perform hypercalls here. The NT kernel started to expose a very low
161 * level interface to do this thru somewhere between build 14271 and 16299. Since
162 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
163 *
164 * We also need to deposit memory to the hypervisor for use with partition (page
165 * mapping structures, stuff).
166 */
167 RTDBGKRNLINFO hKrnlInfo;
168 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
169 if (RT_SUCCESS(rc))
170 {
171 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
172 if (RT_SUCCESS(rc))
173 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
174 RTR0DbgKrnlInfoRelease(hKrnlInfo);
175 if (RT_SUCCESS(rc))
176 {
177 /*
178 * Allocate a page for non-EMT threads to use for hypercalls (update
179 * statistics and such) and a critical section protecting it.
180 */
181 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
182 if (RT_SUCCESS(rc))
183 {
184 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
185 if (RT_SUCCESS(rc))
186 {
187 /*
188 * Allocate a page for each VCPU to place hypercall data on.
189 */
190 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
191 {
192 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
193 if (RT_FAILURE(rc))
194 {
195 while (i-- > 0)
196 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
197 break;
198 }
199 }
200 if (RT_SUCCESS(rc))
201 {
202 /*
203 * So far, so good.
204 */
205 return rc;
206 }
207
208 /*
209 * Bail out.
210 */
211 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
212 }
213 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
214 }
215 }
216 else
217 rc = VERR_NEM_MISSING_KERNEL_API;
218 }
219
220 RT_NOREF(pVM);
221 return rc;
222}
223
224
225/**
226 * Perform an I/O control operation on the partition handle (VID.SYS).
227 *
228 * @returns NT status code.
229 * @param pGVM The ring-0 VM structure.
230 * @param uFunction The function to perform.
231 * @param pvInput The input buffer. This must point within the VM
232 * structure so we can easily convert to a ring-3
233 * pointer if necessary.
234 * @param cbInput The size of the input. @a pvInput must be NULL when
235 * zero.
236 * @param pvOutput The output buffer. This must also point within the
237 * VM structure for ring-3 pointer magic.
238 * @param cbOutput The size of the output. @a pvOutput must be NULL
239 * when zero.
240 */
241DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
242 void *pvOutput, uint32_t cbOutput)
243{
244#ifdef RT_STRICT
245 /*
246 * Input and output parameters are part of the VM CPU structure.
247 */
248 PVM pVM = pGVM->pVM;
249 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
250 if (pvInput)
251 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
252 if (pvOutput)
253 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
254#endif
255
256 int32_t rcNt = STATUS_UNSUCCESSFUL;
257 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
258 pvInput,
259 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
260 cbInput,
261 pvOutput,
262 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
263 cbOutput,
264 &rcNt);
265 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
266 return (NTSTATUS)rcNt;
267 return STATUS_UNSUCCESSFUL;
268}
269
270
271/**
272 * 2nd part of the initialization, after we've got a partition handle.
273 *
274 * @returns VBox status code.
275 * @param pGVM The ring-0 VM handle.
276 * @param pVM The cross context VM handle.
277 * @thread EMT(0)
278 */
279VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
280{
281 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
282 AssertRCReturn(rc, rc);
283 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
284
285 /*
286 * Copy and validate the I/O control information from ring-3.
287 */
288 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
289 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
290 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
291 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
292 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
293
294 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
295 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
296 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
297 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
298 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
299 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
300
301 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
302 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
303 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
304 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
305 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
306 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
307 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
308
309 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
310 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
311 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
312 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
313 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
314 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
315 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
316 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
317
318 /*
319 * Setup of an I/O control context for the partition handle for later use.
320 */
321 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
322 AssertLogRelRCReturn(rc, rc);
323 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
324
325 /*
326 * Get the partition ID.
327 */
328 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
329 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
330 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
331 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
332 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
333 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
334 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
335 VERR_NEM_INIT_FAILED);
336
337 return rc;
338}
339
340
341/**
342 * Cleanup the NEM parts of the VM in ring-0.
343 *
344 * This is always called and must deal the state regardless of whether
345 * NEMR0InitVM() was called or not. So, take care here.
346 *
347 * @param pGVM The ring-0 VM handle.
348 */
349VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
350{
351 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
352
353 /* Clean up I/O control context. */
354 if (pGVM->nem.s.pIoCtlCtx)
355 {
356 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
357 AssertRC(rc);
358 pGVM->nem.s.pIoCtlCtx = NULL;
359 }
360
361 /* Free the hypercall pages. */
362 VMCPUID i = pGVM->cCpus;
363 while (i-- > 0)
364 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
365
366 /* The non-EMT one too. */
367 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
368 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
369 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
370}
371
372
373#if 0 /* for debugging GPA unmapping. */
374static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
375{
376 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
377 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
378 pIn->PartitionId = pGVM->nem.s.idHvPartition;
379 pIn->VpIndex = pGVCpu->idCpu;
380 pIn->ByteCount = 0x10;
381 pIn->BaseGpa = GCPhys;
382 pIn->ControlFlags.AsUINT64 = 0;
383 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
384 memset(pOut, 0xfe, sizeof(*pOut));
385 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
386 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
387 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
388 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
389 __debugbreak();
390
391 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
392}
393#endif
394
395
396/**
397 * Worker for NEMR0MapPages and others.
398 */
399NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
400 uint32_t cPages, uint32_t fFlags)
401{
402 /*
403 * Validate.
404 */
405 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
406
407 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
408 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
409 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
410 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
411 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
412 if (GCPhysSrc != GCPhysDst)
413 {
414 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
415 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
416 }
417
418 /*
419 * Compose and make the hypercall.
420 * Ring-3 is not allowed to fill in the host physical addresses of the call.
421 */
422 for (uint32_t iTries = 0;; iTries++)
423 {
424 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
425 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
426 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
427 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
428 pMapPages->MapFlags = fFlags;
429 pMapPages->u32ExplicitPadding = 0;
430 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
431 {
432 RTHCPHYS HCPhys = NIL_RTGCPHYS;
433 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
434 AssertRCReturn(rc, rc);
435 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
436 }
437
438 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
439 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
440 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
441 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
442 if (uResult == ((uint64_t)cPages << 32))
443 return VINF_SUCCESS;
444
445 /*
446 * If the partition is out of memory, try donate another 512 pages to
447 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
448 */
449 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
450 || iTries > 16
451 || g_pfnWinHvDepositMemory == NULL)
452 {
453 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
454 return VERR_NEM_MAP_PAGES_FAILED;
455 }
456
457 size_t cPagesAdded = 0;
458 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
459 if (!cPagesAdded)
460 {
461 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
462 return VERR_NEM_MAP_PAGES_FAILED;
463 }
464 }
465}
466
467
468/**
469 * Maps pages into the guest physical address space.
470 *
471 * Generally the caller will be under the PGM lock already, so no extra effort
472 * is needed to make sure all changes happens under it.
473 *
474 * @returns VBox status code.
475 * @param pGVM The ring-0 VM handle.
476 * @param pVM The cross context VM handle.
477 * @param idCpu The calling EMT. Necessary for getting the
478 * hypercall page and arguments.
479 * @thread EMT(idCpu)
480 */
481VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
482{
483 /*
484 * Unpack the call.
485 */
486 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
487 if (RT_SUCCESS(rc))
488 {
489 PVMCPU pVCpu = &pVM->aCpus[idCpu];
490 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
491
492 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
493 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
494 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
495 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
496
497 /*
498 * Do the work.
499 */
500 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
501 }
502 return rc;
503}
504
505
506/**
507 * Worker for NEMR0UnmapPages and others.
508 */
509NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
510{
511 /*
512 * Validate input.
513 */
514 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
515
516 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
517 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
518 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
519 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
520
521 /*
522 * Compose and make the hypercall.
523 */
524 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
525 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
526 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
527 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
528 pUnmapPages->fFlags = 0;
529
530 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
531 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
532 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
533 if (uResult == ((uint64_t)cPages << 32))
534 {
535#if 1 /* Do we need to do this? Hopefully not... */
536 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
537 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
538 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
539#endif
540 return VINF_SUCCESS;
541 }
542
543 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
544 return VERR_NEM_UNMAP_PAGES_FAILED;
545}
546
547
548/**
549 * Unmaps pages from the guest physical address space.
550 *
551 * Generally the caller will be under the PGM lock already, so no extra effort
552 * is needed to make sure all changes happens under it.
553 *
554 * @returns VBox status code.
555 * @param pGVM The ring-0 VM handle.
556 * @param pVM The cross context VM handle.
557 * @param idCpu The calling EMT. Necessary for getting the
558 * hypercall page and arguments.
559 * @thread EMT(idCpu)
560 */
561VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
562{
563 /*
564 * Unpack the call.
565 */
566 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
567 if (RT_SUCCESS(rc))
568 {
569 PVMCPU pVCpu = &pVM->aCpus[idCpu];
570 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
571
572 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
573 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
574
575 /*
576 * Do the work.
577 */
578 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
579 }
580 return rc;
581}
582
583
584/**
585 * Worker for NEMR0ExportState.
586 *
587 * Intention is to use it internally later.
588 *
589 * @returns VBox status code.
590 * @param pGVM The ring-0 VM handle.
591 * @param pGVCpu The ring-0 VCPU handle.
592 * @param pCtx The CPU context structure to import into.
593 */
594NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
595{
596 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
597 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
598 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
599 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
600
601 pInput->PartitionId = pGVM->nem.s.idHvPartition;
602 pInput->VpIndex = pGVCpu->idCpu;
603 pInput->RsvdZ = 0;
604
605 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
606 if ( !fWhat
607 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
608 return VINF_SUCCESS;
609 uintptr_t iReg = 0;
610
611 /* GPRs */
612 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
613 {
614 if (fWhat & CPUMCTX_EXTRN_RAX)
615 {
616 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
617 pInput->Elements[iReg].Name = HvX64RegisterRax;
618 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
619 iReg++;
620 }
621 if (fWhat & CPUMCTX_EXTRN_RCX)
622 {
623 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
624 pInput->Elements[iReg].Name = HvX64RegisterRcx;
625 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
626 iReg++;
627 }
628 if (fWhat & CPUMCTX_EXTRN_RDX)
629 {
630 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
631 pInput->Elements[iReg].Name = HvX64RegisterRdx;
632 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
633 iReg++;
634 }
635 if (fWhat & CPUMCTX_EXTRN_RBX)
636 {
637 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
638 pInput->Elements[iReg].Name = HvX64RegisterRbx;
639 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
640 iReg++;
641 }
642 if (fWhat & CPUMCTX_EXTRN_RSP)
643 {
644 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
645 pInput->Elements[iReg].Name = HvX64RegisterRsp;
646 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
647 iReg++;
648 }
649 if (fWhat & CPUMCTX_EXTRN_RBP)
650 {
651 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
652 pInput->Elements[iReg].Name = HvX64RegisterRbp;
653 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
654 iReg++;
655 }
656 if (fWhat & CPUMCTX_EXTRN_RSI)
657 {
658 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
659 pInput->Elements[iReg].Name = HvX64RegisterRsi;
660 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
661 iReg++;
662 }
663 if (fWhat & CPUMCTX_EXTRN_RDI)
664 {
665 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
666 pInput->Elements[iReg].Name = HvX64RegisterRdi;
667 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
668 iReg++;
669 }
670 if (fWhat & CPUMCTX_EXTRN_R8_R15)
671 {
672 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
673 pInput->Elements[iReg].Name = HvX64RegisterR8;
674 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
675 iReg++;
676 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
677 pInput->Elements[iReg].Name = HvX64RegisterR9;
678 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
679 iReg++;
680 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
681 pInput->Elements[iReg].Name = HvX64RegisterR10;
682 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
683 iReg++;
684 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
685 pInput->Elements[iReg].Name = HvX64RegisterR11;
686 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
687 iReg++;
688 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
689 pInput->Elements[iReg].Name = HvX64RegisterR12;
690 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
691 iReg++;
692 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
693 pInput->Elements[iReg].Name = HvX64RegisterR13;
694 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
695 iReg++;
696 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
697 pInput->Elements[iReg].Name = HvX64RegisterR14;
698 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
699 iReg++;
700 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
701 pInput->Elements[iReg].Name = HvX64RegisterR15;
702 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
703 iReg++;
704 }
705 }
706
707 /* RIP & Flags */
708 if (fWhat & CPUMCTX_EXTRN_RIP)
709 {
710 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
711 pInput->Elements[iReg].Name = HvX64RegisterRip;
712 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
713 iReg++;
714 }
715 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
716 {
717 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
718 pInput->Elements[iReg].Name = HvX64RegisterRflags;
719 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
720 iReg++;
721 }
722
723 /* Segments */
724#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
725 do { \
726 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
727 pInput->Elements[a_idx].Name = a_enmName; \
728 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
729 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
730 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
731 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
732 } while (0)
733 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
734 {
735 if (fWhat & CPUMCTX_EXTRN_CS)
736 {
737 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
738 iReg++;
739 }
740 if (fWhat & CPUMCTX_EXTRN_ES)
741 {
742 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
743 iReg++;
744 }
745 if (fWhat & CPUMCTX_EXTRN_SS)
746 {
747 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
748 iReg++;
749 }
750 if (fWhat & CPUMCTX_EXTRN_DS)
751 {
752 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
753 iReg++;
754 }
755 if (fWhat & CPUMCTX_EXTRN_FS)
756 {
757 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
758 iReg++;
759 }
760 if (fWhat & CPUMCTX_EXTRN_GS)
761 {
762 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
763 iReg++;
764 }
765 }
766
767 /* Descriptor tables & task segment. */
768 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
769 {
770 if (fWhat & CPUMCTX_EXTRN_LDTR)
771 {
772 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
773 iReg++;
774 }
775 if (fWhat & CPUMCTX_EXTRN_TR)
776 {
777 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
778 iReg++;
779 }
780
781 if (fWhat & CPUMCTX_EXTRN_IDTR)
782 {
783 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
784 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
785 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
786 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
787 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
788 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
789 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
790 iReg++;
791 }
792 if (fWhat & CPUMCTX_EXTRN_GDTR)
793 {
794 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
795 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
796 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
797 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
798 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
799 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
800 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
801 iReg++;
802 }
803 }
804
805 /* Control registers. */
806 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
807 {
808 if (fWhat & CPUMCTX_EXTRN_CR0)
809 {
810 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
811 pInput->Elements[iReg].Name = HvX64RegisterCr0;
812 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
813 iReg++;
814 }
815 if (fWhat & CPUMCTX_EXTRN_CR2)
816 {
817 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
818 pInput->Elements[iReg].Name = HvX64RegisterCr2;
819 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
820 iReg++;
821 }
822 if (fWhat & CPUMCTX_EXTRN_CR3)
823 {
824 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
825 pInput->Elements[iReg].Name = HvX64RegisterCr3;
826 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
827 iReg++;
828 }
829 if (fWhat & CPUMCTX_EXTRN_CR4)
830 {
831 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
832 pInput->Elements[iReg].Name = HvX64RegisterCr4;
833 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
834 iReg++;
835 }
836 }
837 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
838 {
839 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
840 pInput->Elements[iReg].Name = HvX64RegisterCr8;
841 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
842 iReg++;
843 }
844
845 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
846
847 /* Debug registers. */
848/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
849 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
850 {
851 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
852 pInput->Elements[iReg].Name = HvX64RegisterDr0;
853 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
854 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
855 iReg++;
856 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
857 pInput->Elements[iReg].Name = HvX64RegisterDr1;
858 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
859 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
860 iReg++;
861 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
862 pInput->Elements[iReg].Name = HvX64RegisterDr2;
863 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
864 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
865 iReg++;
866 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
867 pInput->Elements[iReg].Name = HvX64RegisterDr3;
868 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
869 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
870 iReg++;
871 }
872 if (fWhat & CPUMCTX_EXTRN_DR6)
873 {
874 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
875 pInput->Elements[iReg].Name = HvX64RegisterDr6;
876 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
877 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
878 iReg++;
879 }
880 if (fWhat & CPUMCTX_EXTRN_DR7)
881 {
882 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
883 pInput->Elements[iReg].Name = HvX64RegisterDr7;
884 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
885 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
886 iReg++;
887 }
888
889 /* Floating point state. */
890 if (fWhat & CPUMCTX_EXTRN_X87)
891 {
892 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
893 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
894 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
895 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
896 iReg++;
897 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
898 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
899 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
900 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
901 iReg++;
902 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
903 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
904 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
905 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
906 iReg++;
907 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
908 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
909 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
910 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
911 iReg++;
912 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
913 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
914 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
915 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
916 iReg++;
917 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
918 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
919 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
920 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
921 iReg++;
922 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
923 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
924 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
925 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
926 iReg++;
927 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
928 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
929 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
930 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
931 iReg++;
932
933 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
934 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
935 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
936 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
937 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
938 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
939 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
940 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
941 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
942 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
943 iReg++;
944/** @todo we've got trouble if if we try write just SSE w/o X87. */
945 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
946 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
947 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
948 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
949 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
950 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
951 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
952 iReg++;
953 }
954
955 /* Vector state. */
956 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
957 {
958 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
959 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
960 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
961 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
962 iReg++;
963 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
964 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
965 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
966 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
967 iReg++;
968 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
969 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
970 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
971 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
972 iReg++;
973 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
974 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
975 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
976 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
977 iReg++;
978 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
979 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
980 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
981 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
982 iReg++;
983 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
984 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
985 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
986 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
987 iReg++;
988 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
989 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
990 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
991 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
992 iReg++;
993 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
994 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
995 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
996 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
997 iReg++;
998 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
999 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1000 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1001 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1002 iReg++;
1003 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1004 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1005 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1006 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1007 iReg++;
1008 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1009 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1010 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1011 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1012 iReg++;
1013 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1014 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1015 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1016 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1017 iReg++;
1018 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1019 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1020 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1021 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1022 iReg++;
1023 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1024 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1025 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1026 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1027 iReg++;
1028 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1029 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1030 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1031 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1032 iReg++;
1033 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1034 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1035 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1036 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1037 iReg++;
1038 }
1039
1040 /* MSRs */
1041 // HvX64RegisterTsc - don't touch
1042 if (fWhat & CPUMCTX_EXTRN_EFER)
1043 {
1044 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1045 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1046 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1047 iReg++;
1048 }
1049 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1050 {
1051 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1052 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1053 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1054 iReg++;
1055 }
1056 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1057 {
1058 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1059 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1060 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1061 iReg++;
1062 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1063 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1064 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1065 iReg++;
1066 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1067 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1068 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1069 iReg++;
1070 }
1071 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1072 {
1073 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1074 pInput->Elements[iReg].Name = HvX64RegisterStar;
1075 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1076 iReg++;
1077 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1078 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1079 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1080 iReg++;
1081 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1082 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1083 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1084 iReg++;
1085 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1086 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1087 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1088 iReg++;
1089 }
1090 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1091 {
1092 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1093 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1094 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1095 iReg++;
1096 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1097 pInput->Elements[iReg].Name = HvX64RegisterPat;
1098 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1099 iReg++;
1100#if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1101 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1102 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1103 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1104 iReg++;
1105#endif
1106
1107 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1108
1109 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1110 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1111 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1112 iReg++;
1113
1114 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1115
1116 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1117 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1118 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1119 iReg++;
1120 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1121 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1122 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1123 iReg++;
1124 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1125 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1126 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1127 iReg++;
1128 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1129 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1130 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1131 iReg++;
1132 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1133 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1134 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1135 iReg++;
1136 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1137 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1138 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1139 iReg++;
1140 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1141 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1142 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1143 iReg++;
1144 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1145 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1146 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1147 iReg++;
1148 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1149 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1150 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1151 iReg++;
1152 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1153 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1154 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1155 iReg++;
1156 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1157 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1158 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1159 iReg++;
1160 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1161 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1162 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1163 iReg++;
1164
1165#if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1166 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1167 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1168 {
1169 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1170 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1171 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1172 iReg++;
1173 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1174 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1175 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1176 iReg++;
1177 }
1178#endif
1179 }
1180
1181 /* event injection (clear it). */
1182 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1183 {
1184 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1185 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1186 pInput->Elements[iReg].Value.Reg64 = 0;
1187 iReg++;
1188 }
1189
1190 /* Interruptibility state. This can get a little complicated since we get
1191 half of the state via HV_X64_VP_EXECUTION_STATE. */
1192 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1193 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1194 {
1195 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1196 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1197 pInput->Elements[iReg].Value.Reg64 = 0;
1198 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1199 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1200 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1201 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1202 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1203 iReg++;
1204 }
1205 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1206 {
1207 if ( pVCpu->nem.s.fLastInterruptShadow
1208 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1209 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1210 {
1211 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1212 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1213 pInput->Elements[iReg].Value.Reg64 = 0;
1214 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1215 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1216 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1217 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1218 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1219 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1220 iReg++;
1221 }
1222 }
1223 else
1224 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1225
1226 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1227 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1228 if ( fDesiredIntWin
1229 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1230 {
1231 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1232 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1233 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1234 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1235 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1236 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1237 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1238 iReg++;
1239 }
1240
1241 /// @todo HvRegisterPendingEvent0
1242 /// @todo HvRegisterPendingEvent1
1243
1244 /*
1245 * Set the registers.
1246 */
1247 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1248
1249 /*
1250 * Make the hypercall.
1251 */
1252 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1253 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1254 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1255 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1256 VERR_NEM_SET_REGISTERS_FAILED);
1257 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1258 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1259 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1260 return VINF_SUCCESS;
1261}
1262
1263
1264/**
1265 * Export the state to the native API (out of CPUMCTX).
1266 *
1267 * @returns VBox status code
1268 * @param pGVM The ring-0 VM handle.
1269 * @param pVM The cross context VM handle.
1270 * @param idCpu The calling EMT. Necessary for getting the
1271 * hypercall page and arguments.
1272 */
1273VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1274{
1275 /*
1276 * Validate the call.
1277 */
1278 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1279 if (RT_SUCCESS(rc))
1280 {
1281 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1282 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1283 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1284
1285 /*
1286 * Call worker.
1287 */
1288 rc = nemR0WinExportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1289 }
1290 return rc;
1291}
1292
1293
1294/**
1295 * Worker for NEMR0ImportState.
1296 *
1297 * Intention is to use it internally later.
1298 *
1299 * @returns VBox status code.
1300 * @param pGVM The ring-0 VM handle.
1301 * @param pGVCpu The ring-0 VCPU handle.
1302 * @param pCtx The CPU context structure to import into.
1303 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1304 */
1305NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1306{
1307 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1308 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1309 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1310
1311 fWhat &= pCtx->fExtrn;
1312
1313 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1314 pInput->VpIndex = pGVCpu->idCpu;
1315 pInput->fFlags = 0;
1316
1317 /* GPRs */
1318 uintptr_t iReg = 0;
1319 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1320 {
1321 if (fWhat & CPUMCTX_EXTRN_RAX)
1322 pInput->Names[iReg++] = HvX64RegisterRax;
1323 if (fWhat & CPUMCTX_EXTRN_RCX)
1324 pInput->Names[iReg++] = HvX64RegisterRcx;
1325 if (fWhat & CPUMCTX_EXTRN_RDX)
1326 pInput->Names[iReg++] = HvX64RegisterRdx;
1327 if (fWhat & CPUMCTX_EXTRN_RBX)
1328 pInput->Names[iReg++] = HvX64RegisterRbx;
1329 if (fWhat & CPUMCTX_EXTRN_RSP)
1330 pInput->Names[iReg++] = HvX64RegisterRsp;
1331 if (fWhat & CPUMCTX_EXTRN_RBP)
1332 pInput->Names[iReg++] = HvX64RegisterRbp;
1333 if (fWhat & CPUMCTX_EXTRN_RSI)
1334 pInput->Names[iReg++] = HvX64RegisterRsi;
1335 if (fWhat & CPUMCTX_EXTRN_RDI)
1336 pInput->Names[iReg++] = HvX64RegisterRdi;
1337 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1338 {
1339 pInput->Names[iReg++] = HvX64RegisterR8;
1340 pInput->Names[iReg++] = HvX64RegisterR9;
1341 pInput->Names[iReg++] = HvX64RegisterR10;
1342 pInput->Names[iReg++] = HvX64RegisterR11;
1343 pInput->Names[iReg++] = HvX64RegisterR12;
1344 pInput->Names[iReg++] = HvX64RegisterR13;
1345 pInput->Names[iReg++] = HvX64RegisterR14;
1346 pInput->Names[iReg++] = HvX64RegisterR15;
1347 }
1348 }
1349
1350 /* RIP & Flags */
1351 if (fWhat & CPUMCTX_EXTRN_RIP)
1352 pInput->Names[iReg++] = HvX64RegisterRip;
1353 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1354 pInput->Names[iReg++] = HvX64RegisterRflags;
1355
1356 /* Segments */
1357 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1358 {
1359 if (fWhat & CPUMCTX_EXTRN_CS)
1360 pInput->Names[iReg++] = HvX64RegisterCs;
1361 if (fWhat & CPUMCTX_EXTRN_ES)
1362 pInput->Names[iReg++] = HvX64RegisterEs;
1363 if (fWhat & CPUMCTX_EXTRN_SS)
1364 pInput->Names[iReg++] = HvX64RegisterSs;
1365 if (fWhat & CPUMCTX_EXTRN_DS)
1366 pInput->Names[iReg++] = HvX64RegisterDs;
1367 if (fWhat & CPUMCTX_EXTRN_FS)
1368 pInput->Names[iReg++] = HvX64RegisterFs;
1369 if (fWhat & CPUMCTX_EXTRN_GS)
1370 pInput->Names[iReg++] = HvX64RegisterGs;
1371 }
1372
1373 /* Descriptor tables and the task segment. */
1374 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1375 {
1376 if (fWhat & CPUMCTX_EXTRN_LDTR)
1377 pInput->Names[iReg++] = HvX64RegisterLdtr;
1378 if (fWhat & CPUMCTX_EXTRN_TR)
1379 pInput->Names[iReg++] = HvX64RegisterTr;
1380 if (fWhat & CPUMCTX_EXTRN_IDTR)
1381 pInput->Names[iReg++] = HvX64RegisterIdtr;
1382 if (fWhat & CPUMCTX_EXTRN_GDTR)
1383 pInput->Names[iReg++] = HvX64RegisterGdtr;
1384 }
1385
1386 /* Control registers. */
1387 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1388 {
1389 if (fWhat & CPUMCTX_EXTRN_CR0)
1390 pInput->Names[iReg++] = HvX64RegisterCr0;
1391 if (fWhat & CPUMCTX_EXTRN_CR2)
1392 pInput->Names[iReg++] = HvX64RegisterCr2;
1393 if (fWhat & CPUMCTX_EXTRN_CR3)
1394 pInput->Names[iReg++] = HvX64RegisterCr3;
1395 if (fWhat & CPUMCTX_EXTRN_CR4)
1396 pInput->Names[iReg++] = HvX64RegisterCr4;
1397 }
1398 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1399 pInput->Names[iReg++] = HvX64RegisterCr8;
1400
1401 /* Debug registers. */
1402 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1403 {
1404 pInput->Names[iReg++] = HvX64RegisterDr0;
1405 pInput->Names[iReg++] = HvX64RegisterDr1;
1406 pInput->Names[iReg++] = HvX64RegisterDr2;
1407 pInput->Names[iReg++] = HvX64RegisterDr3;
1408 }
1409 if (fWhat & CPUMCTX_EXTRN_DR6)
1410 pInput->Names[iReg++] = HvX64RegisterDr6;
1411 if (fWhat & CPUMCTX_EXTRN_DR7)
1412 pInput->Names[iReg++] = HvX64RegisterDr7;
1413
1414 /* Floating point state. */
1415 if (fWhat & CPUMCTX_EXTRN_X87)
1416 {
1417 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1418 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1419 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1420 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1421 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1422 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1423 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1424 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1425 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1426 }
1427 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1428 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1429
1430 /* Vector state. */
1431 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1432 {
1433 pInput->Names[iReg++] = HvX64RegisterXmm0;
1434 pInput->Names[iReg++] = HvX64RegisterXmm1;
1435 pInput->Names[iReg++] = HvX64RegisterXmm2;
1436 pInput->Names[iReg++] = HvX64RegisterXmm3;
1437 pInput->Names[iReg++] = HvX64RegisterXmm4;
1438 pInput->Names[iReg++] = HvX64RegisterXmm5;
1439 pInput->Names[iReg++] = HvX64RegisterXmm6;
1440 pInput->Names[iReg++] = HvX64RegisterXmm7;
1441 pInput->Names[iReg++] = HvX64RegisterXmm8;
1442 pInput->Names[iReg++] = HvX64RegisterXmm9;
1443 pInput->Names[iReg++] = HvX64RegisterXmm10;
1444 pInput->Names[iReg++] = HvX64RegisterXmm11;
1445 pInput->Names[iReg++] = HvX64RegisterXmm12;
1446 pInput->Names[iReg++] = HvX64RegisterXmm13;
1447 pInput->Names[iReg++] = HvX64RegisterXmm14;
1448 pInput->Names[iReg++] = HvX64RegisterXmm15;
1449 }
1450
1451 /* MSRs */
1452 // HvX64RegisterTsc - don't touch
1453 if (fWhat & CPUMCTX_EXTRN_EFER)
1454 pInput->Names[iReg++] = HvX64RegisterEfer;
1455 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1456 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1457 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1458 {
1459 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1460 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1461 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1462 }
1463 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1464 {
1465 pInput->Names[iReg++] = HvX64RegisterStar;
1466 pInput->Names[iReg++] = HvX64RegisterLstar;
1467 pInput->Names[iReg++] = HvX64RegisterCstar;
1468 pInput->Names[iReg++] = HvX64RegisterSfmask;
1469 }
1470
1471#ifdef LOG_ENABLED
1472 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1473#endif
1474 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1475 {
1476 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1477 pInput->Names[iReg++] = HvX64RegisterPat;
1478#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1479 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1480#endif
1481 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1482 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1483 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1484 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1485 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1486 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1487 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1488 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1489 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1490 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1491 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1492 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1493 pInput->Names[iReg++] = HvX64RegisterTscAux;
1494#if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1495 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1496 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1497#endif
1498#ifdef LOG_ENABLED
1499 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1500 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1501#endif
1502 }
1503
1504 /* Interruptibility. */
1505 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1506 {
1507 pInput->Names[iReg++] = HvRegisterInterruptState;
1508 pInput->Names[iReg++] = HvX64RegisterRip;
1509 }
1510
1511 /* event injection */
1512 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1513 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1514 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1515 size_t const cRegs = iReg;
1516 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1517
1518 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1519 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1520 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1521
1522 /*
1523 * Make the hypercall.
1524 */
1525 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1526 pGVCpu->nem.s.HypercallData.HCPhysPage,
1527 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1528 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1529 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1530 VERR_NEM_GET_REGISTERS_FAILED);
1531 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1532
1533 /*
1534 * Copy information to the CPUM context.
1535 */
1536 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1537 iReg = 0;
1538
1539 /* GPRs */
1540 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1541 {
1542 if (fWhat & CPUMCTX_EXTRN_RAX)
1543 {
1544 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1545 pCtx->rax = paValues[iReg++].Reg64;
1546 }
1547 if (fWhat & CPUMCTX_EXTRN_RCX)
1548 {
1549 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1550 pCtx->rcx = paValues[iReg++].Reg64;
1551 }
1552 if (fWhat & CPUMCTX_EXTRN_RDX)
1553 {
1554 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1555 pCtx->rdx = paValues[iReg++].Reg64;
1556 }
1557 if (fWhat & CPUMCTX_EXTRN_RBX)
1558 {
1559 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1560 pCtx->rbx = paValues[iReg++].Reg64;
1561 }
1562 if (fWhat & CPUMCTX_EXTRN_RSP)
1563 {
1564 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1565 pCtx->rsp = paValues[iReg++].Reg64;
1566 }
1567 if (fWhat & CPUMCTX_EXTRN_RBP)
1568 {
1569 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1570 pCtx->rbp = paValues[iReg++].Reg64;
1571 }
1572 if (fWhat & CPUMCTX_EXTRN_RSI)
1573 {
1574 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1575 pCtx->rsi = paValues[iReg++].Reg64;
1576 }
1577 if (fWhat & CPUMCTX_EXTRN_RDI)
1578 {
1579 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1580 pCtx->rdi = paValues[iReg++].Reg64;
1581 }
1582 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1583 {
1584 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1585 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1586 pCtx->r8 = paValues[iReg++].Reg64;
1587 pCtx->r9 = paValues[iReg++].Reg64;
1588 pCtx->r10 = paValues[iReg++].Reg64;
1589 pCtx->r11 = paValues[iReg++].Reg64;
1590 pCtx->r12 = paValues[iReg++].Reg64;
1591 pCtx->r13 = paValues[iReg++].Reg64;
1592 pCtx->r14 = paValues[iReg++].Reg64;
1593 pCtx->r15 = paValues[iReg++].Reg64;
1594 }
1595 }
1596
1597 /* RIP & Flags */
1598 if (fWhat & CPUMCTX_EXTRN_RIP)
1599 {
1600 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1601 pCtx->rip = paValues[iReg++].Reg64;
1602 }
1603 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1604 {
1605 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1606 pCtx->rflags.u = paValues[iReg++].Reg64;
1607 }
1608
1609 /* Segments */
1610#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1611 do { \
1612 Assert(pInput->Names[a_idx] == a_enmName); \
1613 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1614 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1615 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1616 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1617 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1618 } while (0)
1619 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1620 {
1621 if (fWhat & CPUMCTX_EXTRN_CS)
1622 {
1623 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1624 iReg++;
1625 }
1626 if (fWhat & CPUMCTX_EXTRN_ES)
1627 {
1628 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1629 iReg++;
1630 }
1631 if (fWhat & CPUMCTX_EXTRN_SS)
1632 {
1633 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1634 iReg++;
1635 }
1636 if (fWhat & CPUMCTX_EXTRN_DS)
1637 {
1638 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1639 iReg++;
1640 }
1641 if (fWhat & CPUMCTX_EXTRN_FS)
1642 {
1643 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1644 iReg++;
1645 }
1646 if (fWhat & CPUMCTX_EXTRN_GS)
1647 {
1648 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1649 iReg++;
1650 }
1651 }
1652 /* Descriptor tables and the task segment. */
1653 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1654 {
1655 if (fWhat & CPUMCTX_EXTRN_LDTR)
1656 {
1657 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1658 iReg++;
1659 }
1660 if (fWhat & CPUMCTX_EXTRN_TR)
1661 {
1662 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1663 avoid to trigger sanity assertions around the code, always fix this. */
1664 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1665 switch (pCtx->tr.Attr.n.u4Type)
1666 {
1667 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1668 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1669 break;
1670 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1671 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1672 break;
1673 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1674 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1675 break;
1676 }
1677 iReg++;
1678 }
1679 if (fWhat & CPUMCTX_EXTRN_IDTR)
1680 {
1681 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1682 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1683 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1684 iReg++;
1685 }
1686 if (fWhat & CPUMCTX_EXTRN_GDTR)
1687 {
1688 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1689 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1690 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1691 iReg++;
1692 }
1693 }
1694
1695 /* Control registers. */
1696 bool fMaybeChangedMode = false;
1697 bool fFlushTlb = false;
1698 bool fFlushGlobalTlb = false;
1699 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1700 {
1701 if (fWhat & CPUMCTX_EXTRN_CR0)
1702 {
1703 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1704 if (pCtx->cr0 != paValues[iReg].Reg64)
1705 {
1706 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1707 fMaybeChangedMode = true;
1708 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1709 }
1710 iReg++;
1711 }
1712 if (fWhat & CPUMCTX_EXTRN_CR2)
1713 {
1714 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1715 pCtx->cr2 = paValues[iReg].Reg64;
1716 iReg++;
1717 }
1718 if (fWhat & CPUMCTX_EXTRN_CR3)
1719 {
1720 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1721 if (pCtx->cr3 != paValues[iReg].Reg64)
1722 {
1723 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1724 fFlushTlb = true;
1725 }
1726 iReg++;
1727 }
1728 if (fWhat & CPUMCTX_EXTRN_CR4)
1729 {
1730 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1731 if (pCtx->cr4 != paValues[iReg].Reg64)
1732 {
1733 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1734 fMaybeChangedMode = true;
1735 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1736 }
1737 iReg++;
1738 }
1739 }
1740 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1741 {
1742 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1743 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1744 iReg++;
1745 }
1746
1747 /* Debug registers. */
1748/** @todo fixme */
1749/** @todo There are recalc issues here. Recalc will get register content and
1750 * that may assert since we doesn't clear CPUMCTX_EXTRN_ until the end. */
1751 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1752 {
1753 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1754 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1755 if (pCtx->dr[0] != paValues[iReg].Reg64)
1756 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1757 iReg++;
1758 if (pCtx->dr[1] != paValues[iReg].Reg64)
1759 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1760 iReg++;
1761 if (pCtx->dr[2] != paValues[iReg].Reg64)
1762 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1763 iReg++;
1764 if (pCtx->dr[3] != paValues[iReg].Reg64)
1765 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1766 iReg++;
1767 }
1768 if (fWhat & CPUMCTX_EXTRN_DR6)
1769 {
1770 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1771 if (pCtx->dr[6] != paValues[iReg].Reg64)
1772 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1773 iReg++;
1774 }
1775 if (fWhat & CPUMCTX_EXTRN_DR7)
1776 {
1777 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1778 if (pCtx->dr[7] != paValues[iReg].Reg64)
1779 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1780 iReg++;
1781 }
1782
1783 /* Floating point state. */
1784 if (fWhat & CPUMCTX_EXTRN_X87)
1785 {
1786 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1787 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1788 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1789 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1790 iReg++;
1791 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1792 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1793 iReg++;
1794 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1795 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1796 iReg++;
1797 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1798 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1799 iReg++;
1800 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1801 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1802 iReg++;
1803 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1804 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1805 iReg++;
1806 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1807 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1808 iReg++;
1809 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1810 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1811 iReg++;
1812
1813 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1814 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1815 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1816 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1817 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1818 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1819 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1820 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1821 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1822 iReg++;
1823 }
1824
1825 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1826 {
1827 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1828 if (fWhat & CPUMCTX_EXTRN_X87)
1829 {
1830 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1831 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1832 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1833 }
1834 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1835 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1836 iReg++;
1837 }
1838
1839 /* Vector state. */
1840 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1841 {
1842 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1843 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1844 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1845 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1846 iReg++;
1847 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1848 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1849 iReg++;
1850 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1851 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1852 iReg++;
1853 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1854 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1855 iReg++;
1856 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1857 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1858 iReg++;
1859 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1860 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1861 iReg++;
1862 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1863 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1864 iReg++;
1865 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1866 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1867 iReg++;
1868 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1869 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1870 iReg++;
1871 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1872 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1873 iReg++;
1874 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1875 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1876 iReg++;
1877 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1878 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1879 iReg++;
1880 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1881 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1882 iReg++;
1883 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1884 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1885 iReg++;
1886 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1887 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1888 iReg++;
1889 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1890 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1891 iReg++;
1892 }
1893
1894
1895 /* MSRs */
1896 // HvX64RegisterTsc - don't touch
1897 if (fWhat & CPUMCTX_EXTRN_EFER)
1898 {
1899 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1900 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1901 {
1902 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1903 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1904 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1905 pCtx->msrEFER = paValues[iReg].Reg64;
1906 fMaybeChangedMode = true;
1907 }
1908 iReg++;
1909 }
1910 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1911 {
1912 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1913 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1914 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1915 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1916 iReg++;
1917 }
1918 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1919 {
1920 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1921 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1922 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1923 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1924 iReg++;
1925
1926 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1927 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1928 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1929 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1930 iReg++;
1931
1932 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1933 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1934 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1935 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1936 iReg++;
1937 }
1938 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1939 {
1940 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1941 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1942 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1943 pCtx->msrSTAR = paValues[iReg].Reg64;
1944 iReg++;
1945
1946 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1947 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1948 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1949 pCtx->msrLSTAR = paValues[iReg].Reg64;
1950 iReg++;
1951
1952 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1953 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1954 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1955 pCtx->msrCSTAR = paValues[iReg].Reg64;
1956 iReg++;
1957
1958 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1959 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1960 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1961 pCtx->msrSFMASK = paValues[iReg].Reg64;
1962 iReg++;
1963 }
1964 bool fUpdateApicBase = false;
1965 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1966 {
1967 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1968 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1969 if (paValues[iReg].Reg64 != uOldBase)
1970 {
1971 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1972 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
1973 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
1974 if (rc2 == VINF_CPUM_R3_MSR_WRITE)
1975 {
1976 pVCpu->nem.s.uPendingApicBase = paValues[iReg].Reg64;
1977 fUpdateApicBase = true;
1978 }
1979 else
1980 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", VBOXSTRICTRC_VAL(rc2), paValues[iReg].Reg64));
1981 }
1982 iReg++;
1983
1984 Assert(pInput->Names[iReg] == HvX64RegisterPat);
1985 if (pCtx->msrPAT != paValues[iReg].Reg64)
1986 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
1987 pCtx->msrPAT = paValues[iReg].Reg64;
1988 iReg++;
1989
1990#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1991 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
1992 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
1993 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
1994 iReg++;
1995#endif
1996
1997 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1998 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
1999 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2000 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2001 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2002 iReg++;
2003
2004 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2005
2006 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2007 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2008 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2009 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2010 iReg++;
2011
2012 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2013 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2014 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2015 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2016 iReg++;
2017
2018 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2019 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2020 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2021 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2022 iReg++;
2023
2024 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2025 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2026 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2027 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2028 iReg++;
2029
2030 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2031 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2032 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2033 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2034 iReg++;
2035
2036 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2037 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2038 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2039 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2040 iReg++;
2041
2042 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2043 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2044 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2045 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2046 iReg++;
2047
2048 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2049 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2050 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2051 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2052 iReg++;
2053
2054 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2055 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2056 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2057 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2058 iReg++;
2059
2060 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2061 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2062 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2063 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2064 iReg++;
2065
2066 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2067 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2068 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2069 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2070 iReg++;
2071
2072 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2073 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2074 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2075 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2076 iReg++;
2077
2078#if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2079 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2080 {
2081 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2082 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2083 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2084 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2085 iReg++;
2086 }
2087#endif
2088#ifdef LOG_ENABLED
2089 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2090 {
2091 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2092 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2093 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2094 iReg++;
2095 }
2096#endif
2097 }
2098
2099 /* Interruptibility. */
2100 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2101 {
2102 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2103 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2104
2105 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2106 {
2107 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2108 if (paValues[iReg].InterruptState.InterruptShadow)
2109 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2110 else
2111 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2112 }
2113
2114 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2115 {
2116 if (paValues[iReg].InterruptState.NmiMasked)
2117 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2118 else
2119 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2120 }
2121
2122 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2123 iReg += 2;
2124 }
2125
2126 /* Event injection. */
2127 /// @todo HvRegisterPendingInterruption
2128 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2129 if (paValues[iReg].PendingInterruption.InterruptionPending)
2130 {
2131 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2132 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2133 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2134 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2135 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2136 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2137 }
2138
2139 /// @todo HvRegisterPendingEvent0
2140 /// @todo HvRegisterPendingEvent1
2141
2142 /* Almost done, just update extrn flags and maybe change PGM mode. */
2143 pCtx->fExtrn &= ~fWhat;
2144 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2145 pCtx->fExtrn = 0;
2146
2147 /* Typical. */
2148 if (!fMaybeChangedMode && !fFlushTlb && !fUpdateApicBase)
2149 return VINF_SUCCESS;
2150
2151 /*
2152 * Slow.
2153 */
2154 int rc = VINF_SUCCESS;
2155 if (fMaybeChangedMode)
2156 {
2157 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2158 if (rc == VINF_PGM_CHANGE_MODE)
2159 {
2160 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n"));
2161 return VERR_NEM_CHANGE_PGM_MODE;
2162 }
2163 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
2164 }
2165
2166 if (fFlushTlb)
2167 {
2168 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2169 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2170 }
2171
2172 if (fUpdateApicBase && rc == VINF_SUCCESS)
2173 {
2174 LogFlow(("nemR0WinImportState: -> VERR_NEM_UPDATE_APIC_BASE!\n"));
2175 rc = VERR_NEM_UPDATE_APIC_BASE;
2176 }
2177
2178 return rc;
2179}
2180
2181
2182/**
2183 * Import the state from the native API (back to CPUMCTX).
2184 *
2185 * @returns VBox status code
2186 * @param pGVM The ring-0 VM handle.
2187 * @param pVM The cross context VM handle.
2188 * @param idCpu The calling EMT. Necessary for getting the
2189 * hypercall page and arguments.
2190 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2191 * CPUMCTX_EXTERN_ALL for everything.
2192 */
2193VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2194{
2195 /*
2196 * Validate the call.
2197 */
2198 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2199 if (RT_SUCCESS(rc))
2200 {
2201 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2202 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2203 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2204
2205 /*
2206 * Call worker.
2207 */
2208 rc = nemR0WinImportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu), fWhat);
2209 }
2210 return rc;
2211}
2212
2213
2214/**
2215 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2216 *
2217 * @returns VBox status code.
2218 * @param pGVM The ring-0 VM handle.
2219 * @param pGVCpu The ring-0 VCPU handle.
2220 * @param pcTicks Where to return the current CPU tick count.
2221 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2222 */
2223NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2224{
2225 /*
2226 * Hypercall parameters.
2227 */
2228 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2229 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2230 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2231
2232 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2233 pInput->VpIndex = pGVCpu->idCpu;
2234 pInput->fFlags = 0;
2235 pInput->Names[0] = HvX64RegisterTsc;
2236 pInput->Names[1] = HvX64RegisterTscAux;
2237
2238 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2239 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2240 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2241
2242 /*
2243 * Make the hypercall.
2244 */
2245 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2246 pGVCpu->nem.s.HypercallData.HCPhysPage,
2247 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2248 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2249 VERR_NEM_GET_REGISTERS_FAILED);
2250
2251 /*
2252 * Get results.
2253 */
2254 *pcTicks = paValues[0].Reg64;
2255 if (pcAux)
2256 *pcAux = paValues[0].Reg32;
2257 return VINF_SUCCESS;
2258}
2259
2260
2261/**
2262 * Queries the TSC and TSC_AUX values, putting the results in .
2263 *
2264 * @returns VBox status code
2265 * @param pGVM The ring-0 VM handle.
2266 * @param pVM The cross context VM handle.
2267 * @param idCpu The calling EMT. Necessary for getting the
2268 * hypercall page and arguments.
2269 */
2270VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2271{
2272 /*
2273 * Validate the call.
2274 */
2275 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2276 if (RT_SUCCESS(rc))
2277 {
2278 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2279 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2280 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2281
2282 /*
2283 * Call worker.
2284 */
2285 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2286 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2287 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2288 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2289 }
2290 return rc;
2291}
2292
2293
2294/**
2295 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2296 *
2297 * @returns VBox status code.
2298 * @param pGVM The ring-0 VM handle.
2299 * @param pGVCpu The ring-0 VCPU handle.
2300 * @param uPausedTscValue The TSC value at the time of pausing.
2301 */
2302NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2303{
2304 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2305
2306 /*
2307 * Set up the hypercall parameters.
2308 */
2309 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2310 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2311
2312 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2313 pInput->VpIndex = 0;
2314 pInput->RsvdZ = 0;
2315 pInput->Elements[0].Name = HvX64RegisterTsc;
2316 pInput->Elements[0].Pad0 = 0;
2317 pInput->Elements[0].Pad1 = 0;
2318 pInput->Elements[0].Value.Reg128.High64 = 0;
2319 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2320
2321 /*
2322 * Disable interrupts and do the first virtual CPU.
2323 */
2324 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2325 uint64_t const uFirstTsc = ASMReadTSC();
2326 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2327 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2328 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2329 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2330
2331 /*
2332 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2333 * that we don't introduce too much drift here.
2334 */
2335 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2336 {
2337 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition);
2338 Assert(pInput->RsvdZ == 0);
2339 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2340 Assert(pInput->Elements[0].Pad0 == 0);
2341 Assert(pInput->Elements[0].Pad1 == 0);
2342 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2343
2344 pInput->VpIndex = iCpu;
2345 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2346 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2347
2348 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2349 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2350 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2351 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2352 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2353 }
2354
2355 /*
2356 * Done.
2357 */
2358 ASMSetFlags(fSavedFlags);
2359 return VINF_SUCCESS;
2360}
2361
2362
2363/**
2364 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2365 *
2366 * @returns VBox status code
2367 * @param pGVM The ring-0 VM handle.
2368 * @param pVM The cross context VM handle.
2369 * @param idCpu The calling EMT. Necessary for getting the
2370 * hypercall page and arguments.
2371 * @param uPausedTscValue The TSC value at the time of pausing.
2372 */
2373VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2374{
2375 /*
2376 * Validate the call.
2377 */
2378 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2379 if (RT_SUCCESS(rc))
2380 {
2381 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2382 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2383 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2384
2385 /*
2386 * Call worker.
2387 */
2388 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2389 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2390 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2391 }
2392 return rc;
2393}
2394
2395
2396VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2397{
2398#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2399 PVM pVM = pGVM->pVM;
2400 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2401#else
2402 RT_NOREF(pGVM, idCpu);
2403 return VERR_NOT_IMPLEMENTED;
2404#endif
2405}
2406
2407
2408/**
2409 * Updates statistics in the VM structure.
2410 *
2411 * @returns VBox status code.
2412 * @param pGVM The ring-0 VM handle.
2413 * @param pVM The cross context VM handle.
2414 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2415 * page and arguments.
2416 */
2417VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2418{
2419 /*
2420 * Validate the call.
2421 */
2422 int rc;
2423 if (idCpu == NIL_VMCPUID)
2424 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2425 else
2426 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2427 if (RT_SUCCESS(rc))
2428 {
2429 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2430
2431 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2432 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2433 : &pGVM->nem.s.HypercallData;
2434 if ( RT_VALID_PTR(pHypercallData->pbPage)
2435 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2436 {
2437 if (idCpu == NIL_VMCPUID)
2438 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2439 if (RT_SUCCESS(rc))
2440 {
2441 /*
2442 * Query the memory statistics for the partition.
2443 */
2444 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2445 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2446 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2447 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2448 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2449 pInput->ProximityDomainInfo.Id = 0;
2450
2451 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2452 RT_ZERO(*pOutput);
2453
2454 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2455 pHypercallData->HCPhysPage,
2456 pHypercallData->HCPhysPage + sizeof(*pInput));
2457 if (uResult == HV_STATUS_SUCCESS)
2458 {
2459 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2460 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2461 rc = VINF_SUCCESS;
2462 }
2463 else
2464 {
2465 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2466 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2467 rc = VERR_NEM_IPE_0;
2468 }
2469
2470 if (idCpu == NIL_VMCPUID)
2471 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2472 }
2473 }
2474 else
2475 rc = VERR_WRONG_ORDER;
2476 }
2477 return rc;
2478}
2479
2480
2481#if 1 && defined(DEBUG_bird)
2482/**
2483 * Debug only interface for poking around and exploring Hyper-V stuff.
2484 *
2485 * @param pGVM The ring-0 VM handle.
2486 * @param pVM The cross context VM handle.
2487 * @param idCpu The calling EMT.
2488 * @param u64Arg What to query. 0 == registers.
2489 */
2490VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64Arg)
2491{
2492 /*
2493 * Resolve CPU structures.
2494 */
2495 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2496 if (RT_SUCCESS(rc))
2497 {
2498 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2499
2500 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2501 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2502 if (u64Arg == 0)
2503 {
2504 /*
2505 * Query register.
2506 */
2507 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2508 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2509
2510 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2511 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2512 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2513
2514 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2515 pInput->VpIndex = pGVCpu->idCpu;
2516 pInput->fFlags = 0;
2517 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2518
2519 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2520 pGVCpu->nem.s.HypercallData.HCPhysPage,
2521 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2522 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2523 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2524 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2525 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2526 rc = VINF_SUCCESS;
2527 }
2528 else if (u64Arg == 1)
2529 {
2530 /*
2531 * Query partition property.
2532 */
2533 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nem.s.HypercallData.pbPage;
2534 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2535
2536 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2537 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2538 pOutput->PropertyValue = 0;
2539
2540 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2541 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2542 pInput->uPadding = 0;
2543
2544 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2545 pGVCpu->nem.s.HypercallData.HCPhysPage,
2546 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2547 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2548 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2549 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2550 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2551 rc = VINF_SUCCESS;
2552 }
2553 else if (u64Arg == 2)
2554 {
2555 /*
2556 * Set register.
2557 */
2558 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2559 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2560 RT_BZERO(pInput, RT_OFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2561
2562 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2563 pInput->VpIndex = pGVCpu->idCpu;
2564 pInput->RsvdZ = 0;
2565 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2566 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2567 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2568
2569 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2570 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
2571 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2572 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2573 rc = VINF_SUCCESS;
2574 }
2575 else
2576 rc = VERR_INVALID_FUNCTION;
2577 }
2578 return rc;
2579}
2580#endif /* DEBUG_bird */
2581
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette