VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 73327

Last change on this file since 73327 was 73327, checked in by vboxsync, 6 years ago

NEM/win: Do CR3 flush/update from ring-0 when we can (requires setjmp for VMMCALLRING3_PGM_MAP_CHUNK). bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.8 KB
Line 
1/* $Id: NEMR0Native-win.cpp 73327 2018-07-23 14:25:42Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @param pVM The cross context VM handle.
159 * @thread EMT(0)
160 */
161VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
162{
163 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
164 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
165
166 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
167 AssertRCReturn(rc, rc);
168
169 /*
170 * We want to perform hypercalls here. The NT kernel started to expose a very low
171 * level interface to do this thru somewhere between build 14271 and 16299. Since
172 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
173 *
174 * We also need to deposit memory to the hypervisor for use with partition (page
175 * mapping structures, stuff).
176 */
177 RTDBGKRNLINFO hKrnlInfo;
178 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
179 if (RT_SUCCESS(rc))
180 {
181 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
182 if (RT_SUCCESS(rc))
183 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
184 RTR0DbgKrnlInfoRelease(hKrnlInfo);
185 if (RT_SUCCESS(rc))
186 {
187 /*
188 * Allocate a page for non-EMT threads to use for hypercalls (update
189 * statistics and such) and a critical section protecting it.
190 */
191 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
192 if (RT_SUCCESS(rc))
193 {
194 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
195 if (RT_SUCCESS(rc))
196 {
197 /*
198 * Allocate a page for each VCPU to place hypercall data on.
199 */
200 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
201 {
202 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
203 if (RT_FAILURE(rc))
204 {
205 while (i-- > 0)
206 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
207 break;
208 }
209 }
210 if (RT_SUCCESS(rc))
211 {
212 /*
213 * So far, so good.
214 */
215 return rc;
216 }
217
218 /*
219 * Bail out.
220 */
221 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
222 }
223 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
224 }
225 }
226 else
227 rc = VERR_NEM_MISSING_KERNEL_API;
228 }
229
230 RT_NOREF(pVM);
231 return rc;
232}
233
234
235/**
236 * Perform an I/O control operation on the partition handle (VID.SYS).
237 *
238 * @returns NT status code.
239 * @param pGVM The ring-0 VM structure.
240 * @param uFunction The function to perform.
241 * @param pvInput The input buffer. This must point within the VM
242 * structure so we can easily convert to a ring-3
243 * pointer if necessary.
244 * @param cbInput The size of the input. @a pvInput must be NULL when
245 * zero.
246 * @param pvOutput The output buffer. This must also point within the
247 * VM structure for ring-3 pointer magic.
248 * @param cbOutput The size of the output. @a pvOutput must be NULL
249 * when zero.
250 */
251DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
252 void *pvOutput, uint32_t cbOutput)
253{
254#ifdef RT_STRICT
255 /*
256 * Input and output parameters are part of the VM CPU structure.
257 */
258 PVM pVM = pGVM->pVM;
259 size_t const cbVM = RT_UOFFSETOF_DYN(VM, aCpus[pGVM->cCpus]);
260 if (pvInput)
261 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
262 if (pvOutput)
263 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
264#endif
265
266 int32_t rcNt = STATUS_UNSUCCESSFUL;
267 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
268 pvInput,
269 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
270 cbInput,
271 pvOutput,
272 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
273 cbOutput,
274 &rcNt);
275 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
276 return (NTSTATUS)rcNt;
277 return STATUS_UNSUCCESSFUL;
278}
279
280
281/**
282 * 2nd part of the initialization, after we've got a partition handle.
283 *
284 * @returns VBox status code.
285 * @param pGVM The ring-0 VM handle.
286 * @param pVM The cross context VM handle.
287 * @thread EMT(0)
288 */
289VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
290{
291 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
292 AssertRCReturn(rc, rc);
293 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
294
295 /*
296 * Copy and validate the I/O control information from ring-3.
297 */
298 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
299 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
300 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
301 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
302 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
303
304 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
305 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
306 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
307 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
308 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
309 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
310
311 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
312 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
313 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
314 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
315 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
316 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
317 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
318
319 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
320 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
321 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
322 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
323 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
324 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
325 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
326 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
327
328 /*
329 * Setup of an I/O control context for the partition handle for later use.
330 */
331 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
332 AssertLogRelRCReturn(rc, rc);
333 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
334
335 /*
336 * Get the partition ID.
337 */
338 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
339 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
340 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
341 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
342 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
343 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
344 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
345 VERR_NEM_INIT_FAILED);
346
347 return rc;
348}
349
350
351/**
352 * Cleanup the NEM parts of the VM in ring-0.
353 *
354 * This is always called and must deal the state regardless of whether
355 * NEMR0InitVM() was called or not. So, take care here.
356 *
357 * @param pGVM The ring-0 VM handle.
358 */
359VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
360{
361 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
362
363 /* Clean up I/O control context. */
364 if (pGVM->nem.s.pIoCtlCtx)
365 {
366 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
367 AssertRC(rc);
368 pGVM->nem.s.pIoCtlCtx = NULL;
369 }
370
371 /* Free the hypercall pages. */
372 VMCPUID i = pGVM->cCpus;
373 while (i-- > 0)
374 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
375
376 /* The non-EMT one too. */
377 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
378 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
379 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
380}
381
382
383#if 0 /* for debugging GPA unmapping. */
384static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
385{
386 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
387 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
388 pIn->PartitionId = pGVM->nem.s.idHvPartition;
389 pIn->VpIndex = pGVCpu->idCpu;
390 pIn->ByteCount = 0x10;
391 pIn->BaseGpa = GCPhys;
392 pIn->ControlFlags.AsUINT64 = 0;
393 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
394 memset(pOut, 0xfe, sizeof(*pOut));
395 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
396 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
397 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
398 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
399 __debugbreak();
400
401 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
402}
403#endif
404
405
406/**
407 * Worker for NEMR0MapPages and others.
408 */
409NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
410 uint32_t cPages, uint32_t fFlags)
411{
412 /*
413 * Validate.
414 */
415 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
416
417 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
418 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
419 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
420 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
421 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
422 if (GCPhysSrc != GCPhysDst)
423 {
424 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
425 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
426 }
427
428 /*
429 * Compose and make the hypercall.
430 * Ring-3 is not allowed to fill in the host physical addresses of the call.
431 */
432 for (uint32_t iTries = 0;; iTries++)
433 {
434 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
435 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
436 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
437 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
438 pMapPages->MapFlags = fFlags;
439 pMapPages->u32ExplicitPadding = 0;
440 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
441 {
442 RTHCPHYS HCPhys = NIL_RTGCPHYS;
443 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
444 AssertRCReturn(rc, rc);
445 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
446 }
447
448 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
449 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
450 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
451 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
452 if (uResult == ((uint64_t)cPages << 32))
453 return VINF_SUCCESS;
454
455 /*
456 * If the partition is out of memory, try donate another 512 pages to
457 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
458 */
459 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
460 || iTries > 16
461 || g_pfnWinHvDepositMemory == NULL)
462 {
463 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
464 return VERR_NEM_MAP_PAGES_FAILED;
465 }
466
467 size_t cPagesAdded = 0;
468 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
469 if (!cPagesAdded)
470 {
471 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
472 return VERR_NEM_MAP_PAGES_FAILED;
473 }
474 }
475}
476
477
478/**
479 * Maps pages into the guest physical address space.
480 *
481 * Generally the caller will be under the PGM lock already, so no extra effort
482 * is needed to make sure all changes happens under it.
483 *
484 * @returns VBox status code.
485 * @param pGVM The ring-0 VM handle.
486 * @param pVM The cross context VM handle.
487 * @param idCpu The calling EMT. Necessary for getting the
488 * hypercall page and arguments.
489 * @thread EMT(idCpu)
490 */
491VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
492{
493 /*
494 * Unpack the call.
495 */
496 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
497 if (RT_SUCCESS(rc))
498 {
499 PVMCPU pVCpu = &pVM->aCpus[idCpu];
500 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
501
502 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
503 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
504 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
505 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
506
507 /*
508 * Do the work.
509 */
510 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
511 }
512 return rc;
513}
514
515
516/**
517 * Worker for NEMR0UnmapPages and others.
518 */
519NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
520{
521 /*
522 * Validate input.
523 */
524 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
525
526 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
527 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
528 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
529 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
530
531 /*
532 * Compose and make the hypercall.
533 */
534 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
535 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
536 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
537 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
538 pUnmapPages->fFlags = 0;
539
540 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
541 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
542 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
543 if (uResult == ((uint64_t)cPages << 32))
544 {
545#if 1 /* Do we need to do this? Hopefully not... */
546 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
547 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
548 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
549#endif
550 return VINF_SUCCESS;
551 }
552
553 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
554 return VERR_NEM_UNMAP_PAGES_FAILED;
555}
556
557
558/**
559 * Unmaps pages from the guest physical address space.
560 *
561 * Generally the caller will be under the PGM lock already, so no extra effort
562 * is needed to make sure all changes happens under it.
563 *
564 * @returns VBox status code.
565 * @param pGVM The ring-0 VM handle.
566 * @param pVM The cross context VM handle.
567 * @param idCpu The calling EMT. Necessary for getting the
568 * hypercall page and arguments.
569 * @thread EMT(idCpu)
570 */
571VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
572{
573 /*
574 * Unpack the call.
575 */
576 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
577 if (RT_SUCCESS(rc))
578 {
579 PVMCPU pVCpu = &pVM->aCpus[idCpu];
580 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
581
582 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
583 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
584
585 /*
586 * Do the work.
587 */
588 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
589 }
590 return rc;
591}
592
593
594#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
595/**
596 * Worker for NEMR0ExportState.
597 *
598 * Intention is to use it internally later.
599 *
600 * @returns VBox status code.
601 * @param pGVM The ring-0 VM handle.
602 * @param pGVCpu The ring-0 VCPU handle.
603 * @param pCtx The CPU context structure to import into.
604 */
605NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
606{
607 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
608 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
609 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
610 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
611
612 pInput->PartitionId = pGVM->nem.s.idHvPartition;
613 pInput->VpIndex = pGVCpu->idCpu;
614 pInput->RsvdZ = 0;
615
616 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
617 if ( !fWhat
618 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
619 return VINF_SUCCESS;
620 uintptr_t iReg = 0;
621
622 /* GPRs */
623 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
624 {
625 if (fWhat & CPUMCTX_EXTRN_RAX)
626 {
627 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
628 pInput->Elements[iReg].Name = HvX64RegisterRax;
629 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
630 iReg++;
631 }
632 if (fWhat & CPUMCTX_EXTRN_RCX)
633 {
634 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
635 pInput->Elements[iReg].Name = HvX64RegisterRcx;
636 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
637 iReg++;
638 }
639 if (fWhat & CPUMCTX_EXTRN_RDX)
640 {
641 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
642 pInput->Elements[iReg].Name = HvX64RegisterRdx;
643 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
644 iReg++;
645 }
646 if (fWhat & CPUMCTX_EXTRN_RBX)
647 {
648 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
649 pInput->Elements[iReg].Name = HvX64RegisterRbx;
650 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
651 iReg++;
652 }
653 if (fWhat & CPUMCTX_EXTRN_RSP)
654 {
655 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
656 pInput->Elements[iReg].Name = HvX64RegisterRsp;
657 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
658 iReg++;
659 }
660 if (fWhat & CPUMCTX_EXTRN_RBP)
661 {
662 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
663 pInput->Elements[iReg].Name = HvX64RegisterRbp;
664 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
665 iReg++;
666 }
667 if (fWhat & CPUMCTX_EXTRN_RSI)
668 {
669 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
670 pInput->Elements[iReg].Name = HvX64RegisterRsi;
671 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
672 iReg++;
673 }
674 if (fWhat & CPUMCTX_EXTRN_RDI)
675 {
676 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
677 pInput->Elements[iReg].Name = HvX64RegisterRdi;
678 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
679 iReg++;
680 }
681 if (fWhat & CPUMCTX_EXTRN_R8_R15)
682 {
683 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
684 pInput->Elements[iReg].Name = HvX64RegisterR8;
685 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
686 iReg++;
687 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
688 pInput->Elements[iReg].Name = HvX64RegisterR9;
689 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
690 iReg++;
691 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
692 pInput->Elements[iReg].Name = HvX64RegisterR10;
693 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
694 iReg++;
695 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
696 pInput->Elements[iReg].Name = HvX64RegisterR11;
697 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
698 iReg++;
699 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
700 pInput->Elements[iReg].Name = HvX64RegisterR12;
701 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
702 iReg++;
703 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
704 pInput->Elements[iReg].Name = HvX64RegisterR13;
705 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
706 iReg++;
707 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
708 pInput->Elements[iReg].Name = HvX64RegisterR14;
709 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
710 iReg++;
711 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
712 pInput->Elements[iReg].Name = HvX64RegisterR15;
713 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
714 iReg++;
715 }
716 }
717
718 /* RIP & Flags */
719 if (fWhat & CPUMCTX_EXTRN_RIP)
720 {
721 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
722 pInput->Elements[iReg].Name = HvX64RegisterRip;
723 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
724 iReg++;
725 }
726 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
727 {
728 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
729 pInput->Elements[iReg].Name = HvX64RegisterRflags;
730 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
731 iReg++;
732 }
733
734 /* Segments */
735# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
736 do { \
737 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
738 pInput->Elements[a_idx].Name = a_enmName; \
739 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
740 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
741 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
742 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
743 } while (0)
744 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
745 {
746 if (fWhat & CPUMCTX_EXTRN_CS)
747 {
748 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
749 iReg++;
750 }
751 if (fWhat & CPUMCTX_EXTRN_ES)
752 {
753 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
754 iReg++;
755 }
756 if (fWhat & CPUMCTX_EXTRN_SS)
757 {
758 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
759 iReg++;
760 }
761 if (fWhat & CPUMCTX_EXTRN_DS)
762 {
763 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
764 iReg++;
765 }
766 if (fWhat & CPUMCTX_EXTRN_FS)
767 {
768 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
769 iReg++;
770 }
771 if (fWhat & CPUMCTX_EXTRN_GS)
772 {
773 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
774 iReg++;
775 }
776 }
777
778 /* Descriptor tables & task segment. */
779 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
780 {
781 if (fWhat & CPUMCTX_EXTRN_LDTR)
782 {
783 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
784 iReg++;
785 }
786 if (fWhat & CPUMCTX_EXTRN_TR)
787 {
788 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
789 iReg++;
790 }
791
792 if (fWhat & CPUMCTX_EXTRN_IDTR)
793 {
794 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
795 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
796 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
797 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
798 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
799 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
800 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
801 iReg++;
802 }
803 if (fWhat & CPUMCTX_EXTRN_GDTR)
804 {
805 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
806 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
807 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
808 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
809 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
810 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
811 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
812 iReg++;
813 }
814 }
815
816 /* Control registers. */
817 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
818 {
819 if (fWhat & CPUMCTX_EXTRN_CR0)
820 {
821 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
822 pInput->Elements[iReg].Name = HvX64RegisterCr0;
823 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
824 iReg++;
825 }
826 if (fWhat & CPUMCTX_EXTRN_CR2)
827 {
828 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
829 pInput->Elements[iReg].Name = HvX64RegisterCr2;
830 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
831 iReg++;
832 }
833 if (fWhat & CPUMCTX_EXTRN_CR3)
834 {
835 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
836 pInput->Elements[iReg].Name = HvX64RegisterCr3;
837 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
838 iReg++;
839 }
840 if (fWhat & CPUMCTX_EXTRN_CR4)
841 {
842 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
843 pInput->Elements[iReg].Name = HvX64RegisterCr4;
844 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
845 iReg++;
846 }
847 }
848 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
849 {
850 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
851 pInput->Elements[iReg].Name = HvX64RegisterCr8;
852 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
853 iReg++;
854 }
855
856 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
857
858 /* Debug registers. */
859/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
860 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
861 {
862 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
863 pInput->Elements[iReg].Name = HvX64RegisterDr0;
864 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
865 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
866 iReg++;
867 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
868 pInput->Elements[iReg].Name = HvX64RegisterDr1;
869 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
870 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
871 iReg++;
872 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
873 pInput->Elements[iReg].Name = HvX64RegisterDr2;
874 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
875 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
876 iReg++;
877 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
878 pInput->Elements[iReg].Name = HvX64RegisterDr3;
879 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
880 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
881 iReg++;
882 }
883 if (fWhat & CPUMCTX_EXTRN_DR6)
884 {
885 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
886 pInput->Elements[iReg].Name = HvX64RegisterDr6;
887 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
888 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
889 iReg++;
890 }
891 if (fWhat & CPUMCTX_EXTRN_DR7)
892 {
893 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
894 pInput->Elements[iReg].Name = HvX64RegisterDr7;
895 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
896 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
897 iReg++;
898 }
899
900 /* Floating point state. */
901 if (fWhat & CPUMCTX_EXTRN_X87)
902 {
903 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
904 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
905 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
906 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
907 iReg++;
908 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
909 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
910 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
911 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
912 iReg++;
913 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
914 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
915 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
916 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
917 iReg++;
918 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
919 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
920 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
921 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
922 iReg++;
923 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
924 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
925 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
926 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
927 iReg++;
928 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
929 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
930 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
931 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
932 iReg++;
933 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
934 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
935 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
936 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
937 iReg++;
938 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
939 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
940 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
941 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
942 iReg++;
943
944 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
945 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
946 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
947 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
948 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
949 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
950 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
951 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
952 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
953 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
954 iReg++;
955/** @todo we've got trouble if if we try write just SSE w/o X87. */
956 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
957 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
958 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
959 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
960 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
961 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
962 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
963 iReg++;
964 }
965
966 /* Vector state. */
967 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
968 {
969 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
970 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
971 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
972 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
973 iReg++;
974 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
975 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
976 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
977 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
978 iReg++;
979 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
980 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
981 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
982 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
983 iReg++;
984 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
985 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
986 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
987 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
988 iReg++;
989 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
990 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
991 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
992 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
993 iReg++;
994 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
995 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
996 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
997 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
998 iReg++;
999 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1000 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1001 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1002 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1003 iReg++;
1004 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1005 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1006 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1007 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1008 iReg++;
1009 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1010 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1011 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1012 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1013 iReg++;
1014 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1015 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1016 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1017 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1018 iReg++;
1019 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1020 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1021 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1022 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1023 iReg++;
1024 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1025 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1026 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1027 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1028 iReg++;
1029 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1030 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1031 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1032 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1033 iReg++;
1034 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1035 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1036 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1037 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1038 iReg++;
1039 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1040 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1041 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1042 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1043 iReg++;
1044 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1045 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1046 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1047 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1048 iReg++;
1049 }
1050
1051 /* MSRs */
1052 // HvX64RegisterTsc - don't touch
1053 if (fWhat & CPUMCTX_EXTRN_EFER)
1054 {
1055 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1056 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1057 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1058 iReg++;
1059 }
1060 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1061 {
1062 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1063 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1064 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1065 iReg++;
1066 }
1067 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1068 {
1069 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1070 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1071 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1072 iReg++;
1073 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1074 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1075 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1076 iReg++;
1077 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1078 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1079 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1080 iReg++;
1081 }
1082 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1083 {
1084 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1085 pInput->Elements[iReg].Name = HvX64RegisterStar;
1086 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1087 iReg++;
1088 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1089 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1090 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1091 iReg++;
1092 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1093 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1094 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1095 iReg++;
1096 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1097 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1098 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1099 iReg++;
1100 }
1101 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1102 {
1103 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1104 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1105 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1106 iReg++;
1107 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1108 pInput->Elements[iReg].Name = HvX64RegisterPat;
1109 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1110 iReg++;
1111# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1112 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1113 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1114 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1115 iReg++;
1116# endif
1117
1118 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1119
1120 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1121 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1122 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1123 iReg++;
1124
1125 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1126
1127 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1128 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1129 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1130 iReg++;
1131 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1132 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1133 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1134 iReg++;
1135 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1136 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1137 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1138 iReg++;
1139 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1140 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1141 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1142 iReg++;
1143 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1144 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1145 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1146 iReg++;
1147 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1148 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1149 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1150 iReg++;
1151 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1152 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1153 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1154 iReg++;
1155 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1156 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1157 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1158 iReg++;
1159 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1160 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1161 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1162 iReg++;
1163 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1164 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1165 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1166 iReg++;
1167 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1168 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1169 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1170 iReg++;
1171 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1172 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1173 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1174 iReg++;
1175
1176# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1177 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1178 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1179 {
1180 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1181 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1182 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1183 iReg++;
1184 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1185 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1186 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1187 iReg++;
1188 }
1189# endif
1190 }
1191
1192 /* event injection (clear it). */
1193 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1194 {
1195 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1196 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1197 pInput->Elements[iReg].Value.Reg64 = 0;
1198 iReg++;
1199 }
1200
1201 /* Interruptibility state. This can get a little complicated since we get
1202 half of the state via HV_X64_VP_EXECUTION_STATE. */
1203 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1204 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1205 {
1206 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1207 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1208 pInput->Elements[iReg].Value.Reg64 = 0;
1209 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1210 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1211 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1212 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1213 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1214 iReg++;
1215 }
1216 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1217 {
1218 if ( pVCpu->nem.s.fLastInterruptShadow
1219 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1220 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1221 {
1222 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1223 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1224 pInput->Elements[iReg].Value.Reg64 = 0;
1225 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1226 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1227 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1228 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1229 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1230 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1231 iReg++;
1232 }
1233 }
1234 else
1235 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1236
1237 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1238 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1239 if ( fDesiredIntWin
1240 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1241 {
1242 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1243 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1244 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1245 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1246 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1247 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1248 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1249 iReg++;
1250 }
1251
1252 /// @todo HvRegisterPendingEvent0
1253 /// @todo HvRegisterPendingEvent1
1254
1255 /*
1256 * Set the registers.
1257 */
1258 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1259
1260 /*
1261 * Make the hypercall.
1262 */
1263 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1264 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1265 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1266 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1267 VERR_NEM_SET_REGISTERS_FAILED);
1268 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1269 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1270 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1271 return VINF_SUCCESS;
1272}
1273#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1274
1275
1276/**
1277 * Export the state to the native API (out of CPUMCTX).
1278 *
1279 * @returns VBox status code
1280 * @param pGVM The ring-0 VM handle.
1281 * @param pVM The cross context VM handle.
1282 * @param idCpu The calling EMT. Necessary for getting the
1283 * hypercall page and arguments.
1284 */
1285VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1286{
1287#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1288 /*
1289 * Validate the call.
1290 */
1291 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1292 if (RT_SUCCESS(rc))
1293 {
1294 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1295 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1296 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1297
1298 /*
1299 * Call worker.
1300 */
1301 rc = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
1302 }
1303 return rc;
1304#else
1305 RT_NOREF(pGVM, pVM, idCpu);
1306 return VERR_NOT_IMPLEMENTED;
1307#endif
1308}
1309
1310
1311#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1312/**
1313 * Worker for NEMR0ImportState.
1314 *
1315 * Intention is to use it internally later.
1316 *
1317 * @returns VBox status code.
1318 * @param pGVM The ring-0 VM handle.
1319 * @param pGVCpu The ring-0 VCPU handle.
1320 * @param pCtx The CPU context structure to import into.
1321 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1322 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1323 */
1324NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1325{
1326 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1327 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1328 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1329 Assert(pCtx == &pGVCpu->pVCpu->cpum.GstCtx);
1330
1331 fWhat &= pCtx->fExtrn;
1332
1333 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1334 pInput->VpIndex = pGVCpu->idCpu;
1335 pInput->fFlags = 0;
1336
1337 /* GPRs */
1338 uintptr_t iReg = 0;
1339 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1340 {
1341 if (fWhat & CPUMCTX_EXTRN_RAX)
1342 pInput->Names[iReg++] = HvX64RegisterRax;
1343 if (fWhat & CPUMCTX_EXTRN_RCX)
1344 pInput->Names[iReg++] = HvX64RegisterRcx;
1345 if (fWhat & CPUMCTX_EXTRN_RDX)
1346 pInput->Names[iReg++] = HvX64RegisterRdx;
1347 if (fWhat & CPUMCTX_EXTRN_RBX)
1348 pInput->Names[iReg++] = HvX64RegisterRbx;
1349 if (fWhat & CPUMCTX_EXTRN_RSP)
1350 pInput->Names[iReg++] = HvX64RegisterRsp;
1351 if (fWhat & CPUMCTX_EXTRN_RBP)
1352 pInput->Names[iReg++] = HvX64RegisterRbp;
1353 if (fWhat & CPUMCTX_EXTRN_RSI)
1354 pInput->Names[iReg++] = HvX64RegisterRsi;
1355 if (fWhat & CPUMCTX_EXTRN_RDI)
1356 pInput->Names[iReg++] = HvX64RegisterRdi;
1357 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1358 {
1359 pInput->Names[iReg++] = HvX64RegisterR8;
1360 pInput->Names[iReg++] = HvX64RegisterR9;
1361 pInput->Names[iReg++] = HvX64RegisterR10;
1362 pInput->Names[iReg++] = HvX64RegisterR11;
1363 pInput->Names[iReg++] = HvX64RegisterR12;
1364 pInput->Names[iReg++] = HvX64RegisterR13;
1365 pInput->Names[iReg++] = HvX64RegisterR14;
1366 pInput->Names[iReg++] = HvX64RegisterR15;
1367 }
1368 }
1369
1370 /* RIP & Flags */
1371 if (fWhat & CPUMCTX_EXTRN_RIP)
1372 pInput->Names[iReg++] = HvX64RegisterRip;
1373 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1374 pInput->Names[iReg++] = HvX64RegisterRflags;
1375
1376 /* Segments */
1377 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1378 {
1379 if (fWhat & CPUMCTX_EXTRN_CS)
1380 pInput->Names[iReg++] = HvX64RegisterCs;
1381 if (fWhat & CPUMCTX_EXTRN_ES)
1382 pInput->Names[iReg++] = HvX64RegisterEs;
1383 if (fWhat & CPUMCTX_EXTRN_SS)
1384 pInput->Names[iReg++] = HvX64RegisterSs;
1385 if (fWhat & CPUMCTX_EXTRN_DS)
1386 pInput->Names[iReg++] = HvX64RegisterDs;
1387 if (fWhat & CPUMCTX_EXTRN_FS)
1388 pInput->Names[iReg++] = HvX64RegisterFs;
1389 if (fWhat & CPUMCTX_EXTRN_GS)
1390 pInput->Names[iReg++] = HvX64RegisterGs;
1391 }
1392
1393 /* Descriptor tables and the task segment. */
1394 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1395 {
1396 if (fWhat & CPUMCTX_EXTRN_LDTR)
1397 pInput->Names[iReg++] = HvX64RegisterLdtr;
1398 if (fWhat & CPUMCTX_EXTRN_TR)
1399 pInput->Names[iReg++] = HvX64RegisterTr;
1400 if (fWhat & CPUMCTX_EXTRN_IDTR)
1401 pInput->Names[iReg++] = HvX64RegisterIdtr;
1402 if (fWhat & CPUMCTX_EXTRN_GDTR)
1403 pInput->Names[iReg++] = HvX64RegisterGdtr;
1404 }
1405
1406 /* Control registers. */
1407 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1408 {
1409 if (fWhat & CPUMCTX_EXTRN_CR0)
1410 pInput->Names[iReg++] = HvX64RegisterCr0;
1411 if (fWhat & CPUMCTX_EXTRN_CR2)
1412 pInput->Names[iReg++] = HvX64RegisterCr2;
1413 if (fWhat & CPUMCTX_EXTRN_CR3)
1414 pInput->Names[iReg++] = HvX64RegisterCr3;
1415 if (fWhat & CPUMCTX_EXTRN_CR4)
1416 pInput->Names[iReg++] = HvX64RegisterCr4;
1417 }
1418 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1419 pInput->Names[iReg++] = HvX64RegisterCr8;
1420
1421 /* Debug registers. */
1422 if (fWhat & CPUMCTX_EXTRN_DR7)
1423 pInput->Names[iReg++] = HvX64RegisterDr7;
1424 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1425 {
1426 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1427 {
1428 fWhat |= CPUMCTX_EXTRN_DR7;
1429 pInput->Names[iReg++] = HvX64RegisterDr7;
1430 }
1431 pInput->Names[iReg++] = HvX64RegisterDr0;
1432 pInput->Names[iReg++] = HvX64RegisterDr1;
1433 pInput->Names[iReg++] = HvX64RegisterDr2;
1434 pInput->Names[iReg++] = HvX64RegisterDr3;
1435 }
1436 if (fWhat & CPUMCTX_EXTRN_DR6)
1437 pInput->Names[iReg++] = HvX64RegisterDr6;
1438
1439 /* Floating point state. */
1440 if (fWhat & CPUMCTX_EXTRN_X87)
1441 {
1442 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1443 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1444 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1445 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1446 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1447 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1448 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1449 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1450 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1451 }
1452 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1453 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1454
1455 /* Vector state. */
1456 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1457 {
1458 pInput->Names[iReg++] = HvX64RegisterXmm0;
1459 pInput->Names[iReg++] = HvX64RegisterXmm1;
1460 pInput->Names[iReg++] = HvX64RegisterXmm2;
1461 pInput->Names[iReg++] = HvX64RegisterXmm3;
1462 pInput->Names[iReg++] = HvX64RegisterXmm4;
1463 pInput->Names[iReg++] = HvX64RegisterXmm5;
1464 pInput->Names[iReg++] = HvX64RegisterXmm6;
1465 pInput->Names[iReg++] = HvX64RegisterXmm7;
1466 pInput->Names[iReg++] = HvX64RegisterXmm8;
1467 pInput->Names[iReg++] = HvX64RegisterXmm9;
1468 pInput->Names[iReg++] = HvX64RegisterXmm10;
1469 pInput->Names[iReg++] = HvX64RegisterXmm11;
1470 pInput->Names[iReg++] = HvX64RegisterXmm12;
1471 pInput->Names[iReg++] = HvX64RegisterXmm13;
1472 pInput->Names[iReg++] = HvX64RegisterXmm14;
1473 pInput->Names[iReg++] = HvX64RegisterXmm15;
1474 }
1475
1476 /* MSRs */
1477 // HvX64RegisterTsc - don't touch
1478 if (fWhat & CPUMCTX_EXTRN_EFER)
1479 pInput->Names[iReg++] = HvX64RegisterEfer;
1480 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1481 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1482 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1483 {
1484 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1485 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1486 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1487 }
1488 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1489 {
1490 pInput->Names[iReg++] = HvX64RegisterStar;
1491 pInput->Names[iReg++] = HvX64RegisterLstar;
1492 pInput->Names[iReg++] = HvX64RegisterCstar;
1493 pInput->Names[iReg++] = HvX64RegisterSfmask;
1494 }
1495
1496# ifdef LOG_ENABLED
1497 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1498# endif
1499 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1500 {
1501 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1502 pInput->Names[iReg++] = HvX64RegisterPat;
1503# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1504 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1505# endif
1506 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1507 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1508 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1509 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1510 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1511 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1512 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1513 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1514 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1515 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1516 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1517 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1518 pInput->Names[iReg++] = HvX64RegisterTscAux;
1519# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1520 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1521 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1522# endif
1523# ifdef LOG_ENABLED
1524 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1525 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1526# endif
1527 }
1528
1529 /* Interruptibility. */
1530 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1531 {
1532 pInput->Names[iReg++] = HvRegisterInterruptState;
1533 pInput->Names[iReg++] = HvX64RegisterRip;
1534 }
1535
1536 /* event injection */
1537 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1538 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1539 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1540 size_t const cRegs = iReg;
1541 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1542
1543 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1544 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1545 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1546
1547 /*
1548 * Make the hypercall.
1549 */
1550 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1551 pGVCpu->nem.s.HypercallData.HCPhysPage,
1552 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1553 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1554 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1555 VERR_NEM_GET_REGISTERS_FAILED);
1556 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1557
1558 /*
1559 * Copy information to the CPUM context.
1560 */
1561 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1562 iReg = 0;
1563
1564 /* GPRs */
1565 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1566 {
1567 if (fWhat & CPUMCTX_EXTRN_RAX)
1568 {
1569 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1570 pCtx->rax = paValues[iReg++].Reg64;
1571 }
1572 if (fWhat & CPUMCTX_EXTRN_RCX)
1573 {
1574 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1575 pCtx->rcx = paValues[iReg++].Reg64;
1576 }
1577 if (fWhat & CPUMCTX_EXTRN_RDX)
1578 {
1579 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1580 pCtx->rdx = paValues[iReg++].Reg64;
1581 }
1582 if (fWhat & CPUMCTX_EXTRN_RBX)
1583 {
1584 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1585 pCtx->rbx = paValues[iReg++].Reg64;
1586 }
1587 if (fWhat & CPUMCTX_EXTRN_RSP)
1588 {
1589 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1590 pCtx->rsp = paValues[iReg++].Reg64;
1591 }
1592 if (fWhat & CPUMCTX_EXTRN_RBP)
1593 {
1594 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1595 pCtx->rbp = paValues[iReg++].Reg64;
1596 }
1597 if (fWhat & CPUMCTX_EXTRN_RSI)
1598 {
1599 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1600 pCtx->rsi = paValues[iReg++].Reg64;
1601 }
1602 if (fWhat & CPUMCTX_EXTRN_RDI)
1603 {
1604 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1605 pCtx->rdi = paValues[iReg++].Reg64;
1606 }
1607 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1608 {
1609 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1610 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1611 pCtx->r8 = paValues[iReg++].Reg64;
1612 pCtx->r9 = paValues[iReg++].Reg64;
1613 pCtx->r10 = paValues[iReg++].Reg64;
1614 pCtx->r11 = paValues[iReg++].Reg64;
1615 pCtx->r12 = paValues[iReg++].Reg64;
1616 pCtx->r13 = paValues[iReg++].Reg64;
1617 pCtx->r14 = paValues[iReg++].Reg64;
1618 pCtx->r15 = paValues[iReg++].Reg64;
1619 }
1620 }
1621
1622 /* RIP & Flags */
1623 if (fWhat & CPUMCTX_EXTRN_RIP)
1624 {
1625 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1626 pCtx->rip = paValues[iReg++].Reg64;
1627 }
1628 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1629 {
1630 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1631 pCtx->rflags.u = paValues[iReg++].Reg64;
1632 }
1633
1634 /* Segments */
1635# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1636 do { \
1637 Assert(pInput->Names[a_idx] == a_enmName); \
1638 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1639 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1640 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1641 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1642 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1643 } while (0)
1644 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1645 {
1646 if (fWhat & CPUMCTX_EXTRN_CS)
1647 {
1648 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1649 iReg++;
1650 }
1651 if (fWhat & CPUMCTX_EXTRN_ES)
1652 {
1653 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1654 iReg++;
1655 }
1656 if (fWhat & CPUMCTX_EXTRN_SS)
1657 {
1658 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1659 iReg++;
1660 }
1661 if (fWhat & CPUMCTX_EXTRN_DS)
1662 {
1663 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1664 iReg++;
1665 }
1666 if (fWhat & CPUMCTX_EXTRN_FS)
1667 {
1668 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1669 iReg++;
1670 }
1671 if (fWhat & CPUMCTX_EXTRN_GS)
1672 {
1673 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1674 iReg++;
1675 }
1676 }
1677 /* Descriptor tables and the task segment. */
1678 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1679 {
1680 if (fWhat & CPUMCTX_EXTRN_LDTR)
1681 {
1682 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1683 iReg++;
1684 }
1685 if (fWhat & CPUMCTX_EXTRN_TR)
1686 {
1687 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1688 avoid to trigger sanity assertions around the code, always fix this. */
1689 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1690 switch (pCtx->tr.Attr.n.u4Type)
1691 {
1692 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1693 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1694 break;
1695 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1696 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1697 break;
1698 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1699 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1700 break;
1701 }
1702 iReg++;
1703 }
1704 if (fWhat & CPUMCTX_EXTRN_IDTR)
1705 {
1706 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1707 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1708 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1709 iReg++;
1710 }
1711 if (fWhat & CPUMCTX_EXTRN_GDTR)
1712 {
1713 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1714 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1715 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1716 iReg++;
1717 }
1718 }
1719
1720 /* Control registers. */
1721 bool fMaybeChangedMode = false;
1722 bool fUpdateCr3 = false;
1723 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1724 {
1725 if (fWhat & CPUMCTX_EXTRN_CR0)
1726 {
1727 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1728 if (pCtx->cr0 != paValues[iReg].Reg64)
1729 {
1730 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1731 fMaybeChangedMode = true;
1732 }
1733 iReg++;
1734 }
1735 if (fWhat & CPUMCTX_EXTRN_CR2)
1736 {
1737 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1738 pCtx->cr2 = paValues[iReg].Reg64;
1739 iReg++;
1740 }
1741 if (fWhat & CPUMCTX_EXTRN_CR3)
1742 {
1743 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1744 if (pCtx->cr3 != paValues[iReg].Reg64)
1745 {
1746 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1747 fUpdateCr3 = true;
1748 }
1749 iReg++;
1750 }
1751 if (fWhat & CPUMCTX_EXTRN_CR4)
1752 {
1753 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1754 if (pCtx->cr4 != paValues[iReg].Reg64)
1755 {
1756 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1757 fMaybeChangedMode = true;
1758 }
1759 iReg++;
1760 }
1761 }
1762 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1763 {
1764 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1765 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1766 iReg++;
1767 }
1768
1769 /* Debug registers. */
1770 if (fWhat & CPUMCTX_EXTRN_DR7)
1771 {
1772 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1773 if (pCtx->dr[7] != paValues[iReg].Reg64)
1774 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1775 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1776 iReg++;
1777 }
1778 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1779 {
1780 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1781 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1782 if (pCtx->dr[0] != paValues[iReg].Reg64)
1783 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1784 iReg++;
1785 if (pCtx->dr[1] != paValues[iReg].Reg64)
1786 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1787 iReg++;
1788 if (pCtx->dr[2] != paValues[iReg].Reg64)
1789 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1790 iReg++;
1791 if (pCtx->dr[3] != paValues[iReg].Reg64)
1792 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1793 iReg++;
1794 }
1795 if (fWhat & CPUMCTX_EXTRN_DR6)
1796 {
1797 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1798 if (pCtx->dr[6] != paValues[iReg].Reg64)
1799 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1800 iReg++;
1801 }
1802
1803 /* Floating point state. */
1804 if (fWhat & CPUMCTX_EXTRN_X87)
1805 {
1806 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1807 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1808 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1809 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1810 iReg++;
1811 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1812 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1813 iReg++;
1814 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1815 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1816 iReg++;
1817 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1818 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1819 iReg++;
1820 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1821 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1822 iReg++;
1823 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1824 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1825 iReg++;
1826 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1827 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1828 iReg++;
1829 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1830 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1831 iReg++;
1832
1833 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1834 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1835 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1836 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1837 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1838 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1839 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1840 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1841 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1842 iReg++;
1843 }
1844
1845 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1846 {
1847 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1848 if (fWhat & CPUMCTX_EXTRN_X87)
1849 {
1850 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1851 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1852 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1853 }
1854 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1855 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1856 iReg++;
1857 }
1858
1859 /* Vector state. */
1860 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1861 {
1862 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1863 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1864 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1865 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1866 iReg++;
1867 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1868 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1869 iReg++;
1870 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1871 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1872 iReg++;
1873 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1874 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1875 iReg++;
1876 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1877 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1878 iReg++;
1879 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1880 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1881 iReg++;
1882 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1883 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1884 iReg++;
1885 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1886 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1887 iReg++;
1888 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1889 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1890 iReg++;
1891 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1892 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1893 iReg++;
1894 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1895 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1896 iReg++;
1897 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1898 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1899 iReg++;
1900 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1901 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1902 iReg++;
1903 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1904 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1905 iReg++;
1906 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1907 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1908 iReg++;
1909 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1910 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1911 iReg++;
1912 }
1913
1914
1915 /* MSRs */
1916 // HvX64RegisterTsc - don't touch
1917 if (fWhat & CPUMCTX_EXTRN_EFER)
1918 {
1919 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1920 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1921 {
1922 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1923 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1924 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1925 pCtx->msrEFER = paValues[iReg].Reg64;
1926 fMaybeChangedMode = true;
1927 }
1928 iReg++;
1929 }
1930 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1931 {
1932 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1933 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1934 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1935 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1936 iReg++;
1937 }
1938 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1939 {
1940 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1941 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1942 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1943 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1944 iReg++;
1945
1946 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1947 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1948 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1949 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1950 iReg++;
1951
1952 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1953 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1954 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1955 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1956 iReg++;
1957 }
1958 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1959 {
1960 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1961 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1962 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1963 pCtx->msrSTAR = paValues[iReg].Reg64;
1964 iReg++;
1965
1966 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1967 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1968 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1969 pCtx->msrLSTAR = paValues[iReg].Reg64;
1970 iReg++;
1971
1972 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1973 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1974 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1975 pCtx->msrCSTAR = paValues[iReg].Reg64;
1976 iReg++;
1977
1978 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1979 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1980 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1981 pCtx->msrSFMASK = paValues[iReg].Reg64;
1982 iReg++;
1983 }
1984 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1985 {
1986 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1987 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1988 if (paValues[iReg].Reg64 != uOldBase)
1989 {
1990 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1991 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
1992 int rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
1993 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
1994 }
1995 iReg++;
1996
1997 Assert(pInput->Names[iReg] == HvX64RegisterPat);
1998 if (pCtx->msrPAT != paValues[iReg].Reg64)
1999 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2000 pCtx->msrPAT = paValues[iReg].Reg64;
2001 iReg++;
2002
2003# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2004 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2005 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
2006 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
2007 iReg++;
2008# endif
2009
2010 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
2011 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2012 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2013 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2014 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2015 iReg++;
2016
2017 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2018
2019 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2020 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2021 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2022 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2023 iReg++;
2024
2025 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2026 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2027 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2028 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2029 iReg++;
2030
2031 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2032 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2033 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2034 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2035 iReg++;
2036
2037 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2038 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2039 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2040 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2041 iReg++;
2042
2043 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2044 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2045 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2046 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2047 iReg++;
2048
2049 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2050 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2051 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2052 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2053 iReg++;
2054
2055 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2056 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2057 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2058 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2059 iReg++;
2060
2061 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2062 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2063 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2064 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2065 iReg++;
2066
2067 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2068 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2069 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2070 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2071 iReg++;
2072
2073 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2074 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2075 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2076 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2077 iReg++;
2078
2079 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2080 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2081 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2082 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2083 iReg++;
2084
2085 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2086 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2087 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2088 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2089 iReg++;
2090
2091# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2092 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2093 {
2094 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2095 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2096 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2097 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2098 iReg++;
2099 }
2100# endif
2101# ifdef LOG_ENABLED
2102 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2103 {
2104 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2105 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2106 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2107 iReg++;
2108 }
2109# endif
2110 }
2111
2112 /* Interruptibility. */
2113 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2114 {
2115 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2116 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2117
2118 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2119 {
2120 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2121 if (paValues[iReg].InterruptState.InterruptShadow)
2122 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2123 else
2124 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2125 }
2126
2127 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2128 {
2129 if (paValues[iReg].InterruptState.NmiMasked)
2130 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2131 else
2132 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2133 }
2134
2135 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2136 iReg += 2;
2137 }
2138
2139 /* Event injection. */
2140 /// @todo HvRegisterPendingInterruption
2141 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2142 if (paValues[iReg].PendingInterruption.InterruptionPending)
2143 {
2144 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2145 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2146 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2147 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2148 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2149 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2150 }
2151
2152 /// @todo HvRegisterPendingEvent0
2153 /// @todo HvRegisterPendingEvent1
2154
2155 /* Almost done, just update extrn flags and maybe change PGM mode. */
2156 pCtx->fExtrn &= ~fWhat;
2157 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2158 pCtx->fExtrn = 0;
2159
2160 /* Typical. */
2161 if (!fMaybeChangedMode && !fUpdateCr3)
2162 return VINF_SUCCESS;
2163
2164 /*
2165 * Slow.
2166 */
2167 int rc = VINF_SUCCESS;
2168 if (fMaybeChangedMode)
2169 {
2170 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2171 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2172 }
2173
2174 if (fUpdateCr3)
2175 {
2176 if (fCanUpdateCr3)
2177 {
2178 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2179 rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
2180 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2181 }
2182 else
2183 {
2184 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2185 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2186 }
2187 }
2188
2189 return rc;
2190}
2191#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2192
2193
2194/**
2195 * Import the state from the native API (back to CPUMCTX).
2196 *
2197 * @returns VBox status code
2198 * @param pGVM The ring-0 VM handle.
2199 * @param pVM The cross context VM handle.
2200 * @param idCpu The calling EMT. Necessary for getting the
2201 * hypercall page and arguments.
2202 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2203 * CPUMCTX_EXTERN_ALL for everything.
2204 */
2205VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2206{
2207#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2208 /*
2209 * Validate the call.
2210 */
2211 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2212 if (RT_SUCCESS(rc))
2213 {
2214 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2215 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2216 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2217
2218 /*
2219 * Call worker.
2220 */
2221 rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2222 }
2223 return rc;
2224#else
2225 RT_NOREF(pGVM, pVM, idCpu, fWhat);
2226 return VERR_NOT_IMPLEMENTED;
2227#endif
2228}
2229
2230
2231#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2232/**
2233 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2234 *
2235 * @returns VBox status code.
2236 * @param pGVM The ring-0 VM handle.
2237 * @param pGVCpu The ring-0 VCPU handle.
2238 * @param pcTicks Where to return the current CPU tick count.
2239 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2240 */
2241NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2242{
2243 /*
2244 * Hypercall parameters.
2245 */
2246 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2247 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2248 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2249
2250 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2251 pInput->VpIndex = pGVCpu->idCpu;
2252 pInput->fFlags = 0;
2253 pInput->Names[0] = HvX64RegisterTsc;
2254 pInput->Names[1] = HvX64RegisterTscAux;
2255
2256 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2257 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2258 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2259
2260 /*
2261 * Make the hypercall.
2262 */
2263 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2264 pGVCpu->nem.s.HypercallData.HCPhysPage,
2265 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2266 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2267 VERR_NEM_GET_REGISTERS_FAILED);
2268
2269 /*
2270 * Get results.
2271 */
2272 *pcTicks = paValues[0].Reg64;
2273 if (pcAux)
2274 *pcAux = paValues[0].Reg32;
2275 return VINF_SUCCESS;
2276}
2277#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2278
2279
2280/**
2281 * Queries the TSC and TSC_AUX values, putting the results in .
2282 *
2283 * @returns VBox status code
2284 * @param pGVM The ring-0 VM handle.
2285 * @param pVM The cross context VM handle.
2286 * @param idCpu The calling EMT. Necessary for getting the
2287 * hypercall page and arguments.
2288 */
2289VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2290{
2291#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2292 /*
2293 * Validate the call.
2294 */
2295 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2296 if (RT_SUCCESS(rc))
2297 {
2298 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2299 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2300 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2301
2302 /*
2303 * Call worker.
2304 */
2305 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2306 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2307 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2308 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2309 }
2310 return rc;
2311#else
2312 RT_NOREF(pGVM, pVM, idCpu);
2313 return VERR_NOT_IMPLEMENTED;
2314#endif
2315}
2316
2317
2318#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2319/**
2320 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2321 *
2322 * @returns VBox status code.
2323 * @param pGVM The ring-0 VM handle.
2324 * @param pGVCpu The ring-0 VCPU handle.
2325 * @param uPausedTscValue The TSC value at the time of pausing.
2326 */
2327NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2328{
2329 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2330
2331 /*
2332 * Set up the hypercall parameters.
2333 */
2334 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2335 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2336
2337 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2338 pInput->VpIndex = 0;
2339 pInput->RsvdZ = 0;
2340 pInput->Elements[0].Name = HvX64RegisterTsc;
2341 pInput->Elements[0].Pad0 = 0;
2342 pInput->Elements[0].Pad1 = 0;
2343 pInput->Elements[0].Value.Reg128.High64 = 0;
2344 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2345
2346 /*
2347 * Disable interrupts and do the first virtual CPU.
2348 */
2349 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2350 uint64_t const uFirstTsc = ASMReadTSC();
2351 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2352 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2353 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2354 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2355
2356 /*
2357 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2358 * that we don't introduce too much drift here.
2359 */
2360 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2361 {
2362 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition);
2363 Assert(pInput->RsvdZ == 0);
2364 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2365 Assert(pInput->Elements[0].Pad0 == 0);
2366 Assert(pInput->Elements[0].Pad1 == 0);
2367 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2368
2369 pInput->VpIndex = iCpu;
2370 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2371 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2372
2373 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2374 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2375 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2376 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2377 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2378 }
2379
2380 /*
2381 * Done.
2382 */
2383 ASMSetFlags(fSavedFlags);
2384 return VINF_SUCCESS;
2385}
2386#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2387
2388
2389/**
2390 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2391 *
2392 * @returns VBox status code
2393 * @param pGVM The ring-0 VM handle.
2394 * @param pVM The cross context VM handle.
2395 * @param idCpu The calling EMT. Necessary for getting the
2396 * hypercall page and arguments.
2397 * @param uPausedTscValue The TSC value at the time of pausing.
2398 */
2399VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2400{
2401#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2402 /*
2403 * Validate the call.
2404 */
2405 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2406 if (RT_SUCCESS(rc))
2407 {
2408 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2409 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2410 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2411
2412 /*
2413 * Call worker.
2414 */
2415 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2416 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2417 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2418 }
2419 return rc;
2420#else
2421 RT_NOREF(pGVM, pVM, idCpu, uPausedTscValue);
2422 return VERR_NOT_IMPLEMENTED;
2423#endif
2424}
2425
2426
2427VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2428{
2429#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2430 PVM pVM = pGVM->pVM;
2431 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2432#else
2433 RT_NOREF(pGVM, idCpu);
2434 return VERR_NOT_IMPLEMENTED;
2435#endif
2436}
2437
2438
2439/**
2440 * Updates statistics in the VM structure.
2441 *
2442 * @returns VBox status code.
2443 * @param pGVM The ring-0 VM handle.
2444 * @param pVM The cross context VM handle.
2445 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2446 * page and arguments.
2447 */
2448VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2449{
2450 /*
2451 * Validate the call.
2452 */
2453 int rc;
2454 if (idCpu == NIL_VMCPUID)
2455 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2456 else
2457 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2458 if (RT_SUCCESS(rc))
2459 {
2460 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2461
2462 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2463 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2464 : &pGVM->nem.s.HypercallData;
2465 if ( RT_VALID_PTR(pHypercallData->pbPage)
2466 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2467 {
2468 if (idCpu == NIL_VMCPUID)
2469 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2470 if (RT_SUCCESS(rc))
2471 {
2472 /*
2473 * Query the memory statistics for the partition.
2474 */
2475 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2476 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2477 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2478 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2479 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2480 pInput->ProximityDomainInfo.Id = 0;
2481
2482 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2483 RT_ZERO(*pOutput);
2484
2485 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2486 pHypercallData->HCPhysPage,
2487 pHypercallData->HCPhysPage + sizeof(*pInput));
2488 if (uResult == HV_STATUS_SUCCESS)
2489 {
2490 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2491 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2492 rc = VINF_SUCCESS;
2493 }
2494 else
2495 {
2496 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2497 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2498 rc = VERR_NEM_IPE_0;
2499 }
2500
2501 if (idCpu == NIL_VMCPUID)
2502 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2503 }
2504 }
2505 else
2506 rc = VERR_WRONG_ORDER;
2507 }
2508 return rc;
2509}
2510
2511
2512#if 1 && defined(DEBUG_bird)
2513/**
2514 * Debug only interface for poking around and exploring Hyper-V stuff.
2515 *
2516 * @param pGVM The ring-0 VM handle.
2517 * @param pVM The cross context VM handle.
2518 * @param idCpu The calling EMT.
2519 * @param u64Arg What to query. 0 == registers.
2520 */
2521VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64Arg)
2522{
2523 /*
2524 * Resolve CPU structures.
2525 */
2526 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2527 if (RT_SUCCESS(rc))
2528 {
2529 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2530
2531 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2532 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2533 if (u64Arg == 0)
2534 {
2535 /*
2536 * Query register.
2537 */
2538 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2539 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2540
2541 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2542 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2543 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2544
2545 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2546 pInput->VpIndex = pGVCpu->idCpu;
2547 pInput->fFlags = 0;
2548 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2549
2550 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2551 pGVCpu->nem.s.HypercallData.HCPhysPage,
2552 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2553 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2554 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2555 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2556 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2557 rc = VINF_SUCCESS;
2558 }
2559 else if (u64Arg == 1)
2560 {
2561 /*
2562 * Query partition property.
2563 */
2564 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nem.s.HypercallData.pbPage;
2565 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2566
2567 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2568 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2569 pOutput->PropertyValue = 0;
2570
2571 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2572 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2573 pInput->uPadding = 0;
2574
2575 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2576 pGVCpu->nem.s.HypercallData.HCPhysPage,
2577 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2578 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2579 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2580 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2581 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2582 rc = VINF_SUCCESS;
2583 }
2584 else if (u64Arg == 2)
2585 {
2586 /*
2587 * Set register.
2588 */
2589 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2590 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2591 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2592
2593 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2594 pInput->VpIndex = pGVCpu->idCpu;
2595 pInput->RsvdZ = 0;
2596 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2597 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2598 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2599
2600 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2601 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
2602 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2603 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2604 rc = VINF_SUCCESS;
2605 }
2606 else
2607 rc = VERR_INVALID_FUNCTION;
2608 }
2609 return rc;
2610}
2611#endif /* DEBUG_bird */
2612
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette