VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 72687

Last change on this file since 72687 was 72687, checked in by vboxsync, 6 years ago

NEM: Apparently the IoCtlMessageSlotHandleAndGetNext operation may be distrubed by NtAlertThread, so added a restart wrapper for the nemHCWinStopCpu() code. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 113.8 KB
Line 
1/* $Id: NEMR0Native-win.cpp 72687 2018-06-26 01:52:02Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44
45
46/* Assert compile context sanity. */
47#ifndef RT_OS_WINDOWS
48# error "Windows only file!"
49#endif
50#ifndef RT_ARCH_AMD64
51# error "AMD64 only file!"
52#endif
53
54
55/*********************************************************************************************************************************
56* Internal Functions *
57*********************************************************************************************************************************/
58typedef uint32_t DWORD; /* for winerror.h constants */
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
65
66/**
67 * WinHvr.sys!WinHvDepositMemory
68 *
69 * This API will try allocates cPages on IdealNode and deposit it to the
70 * hypervisor for use with the given partition. The memory will be freed when
71 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
72 *
73 * Apparently node numbers above 64 has a different meaning.
74 */
75static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
76
77
78/*********************************************************************************************************************************
79* Internal Functions *
80*********************************************************************************************************************************/
81NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
82 uint32_t cPages, uint32_t fFlags);
83NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
84NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
85NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat);
86NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
87NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
88DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
89 void *pvOutput, uint32_t cbOutput);
90DECLINLINE(NTSTATUS) nemR0NtPerformIoControlRestart(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput);
91
92
93/*
94 * Instantate the code we share with ring-0.
95 */
96#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
97
98/**
99 * Worker for NEMR0InitVM that allocates a hypercall page.
100 *
101 * @returns VBox status code.
102 * @param pHypercallData The hypercall data page to initialize.
103 */
104static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
105{
106 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
107 if (RT_SUCCESS(rc))
108 {
109 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
110 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
111 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
112 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
113 if (RT_SUCCESS(rc))
114 return VINF_SUCCESS;
115
116 /* bail out */
117 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
118 }
119 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
120 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
121 pHypercallData->pbPage = NULL;
122 return rc;
123}
124
125/**
126 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
127 *
128 * @param pHypercallData The hypercall data page to uninitialize.
129 */
130static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
131{
132 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
133 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
134 if (pHypercallData->pbPage != NULL)
135 {
136 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
137 pHypercallData->pbPage = NULL;
138 }
139 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
140 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
141}
142
143
144/**
145 * Called by NEMR3Init to make sure we've got what we need.
146 *
147 * @returns VBox status code.
148 * @param pGVM The ring-0 VM handle.
149 * @param pVM The cross context VM handle.
150 * @thread EMT(0)
151 */
152VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
153{
154 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
155 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
156
157 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
158 AssertRCReturn(rc, rc);
159
160 /*
161 * We want to perform hypercalls here. The NT kernel started to expose a very low
162 * level interface to do this thru somewhere between build 14271 and 16299. Since
163 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
164 *
165 * We also need to deposit memory to the hypervisor for use with partition (page
166 * mapping structures, stuff).
167 */
168 RTDBGKRNLINFO hKrnlInfo;
169 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
170 if (RT_SUCCESS(rc))
171 {
172 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
173 if (RT_SUCCESS(rc))
174 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
175 RTR0DbgKrnlInfoRelease(hKrnlInfo);
176 if (RT_SUCCESS(rc))
177 {
178 /*
179 * Allocate a page for non-EMT threads to use for hypercalls (update
180 * statistics and such) and a critical section protecting it.
181 */
182 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
183 if (RT_SUCCESS(rc))
184 {
185 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
186 if (RT_SUCCESS(rc))
187 {
188 /*
189 * Allocate a page for each VCPU to place hypercall data on.
190 */
191 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
192 {
193 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
194 if (RT_FAILURE(rc))
195 {
196 while (i-- > 0)
197 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
198 break;
199 }
200 }
201 if (RT_SUCCESS(rc))
202 {
203 /*
204 * So far, so good.
205 */
206 return rc;
207 }
208
209 /*
210 * Bail out.
211 */
212 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
213 }
214 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
215 }
216 }
217 else
218 rc = VERR_NEM_MISSING_KERNEL_API;
219 }
220
221 RT_NOREF(pVM);
222 return rc;
223}
224
225
226/**
227 * Perform an I/O control operation on the partition handle (VID.SYS).
228 *
229 * @returns NT status code.
230 * @param pGVM The ring-0 VM structure.
231 * @param uFunction The function to perform.
232 * @param pvInput The input buffer. This must point within the VM
233 * structure so we can easily convert to a ring-3
234 * pointer if necessary.
235 * @param cbInput The size of the input. @a pvInput must be NULL when
236 * zero.
237 * @param pvOutput The output buffer. This must also point within the
238 * VM structure for ring-3 pointer magic.
239 * @param cbOutput The size of the output. @a pvOutput must be NULL
240 * when zero.
241 */
242DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
243 void *pvOutput, uint32_t cbOutput)
244{
245#ifdef RT_STRICT
246 /*
247 * Input and output parameters are part of the VM CPU structure.
248 */
249 PVM pVM = pGVM->pVM;
250 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
251 if (pvInput)
252 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
253 if (pvOutput)
254 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
255#endif
256
257 int32_t rcNt = STATUS_UNSUCCESSFUL;
258 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
259 pvInput,
260 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
261 cbInput,
262 pvOutput,
263 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
264 cbOutput,
265 &rcNt);
266 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
267 return (NTSTATUS)rcNt;
268 return STATUS_UNSUCCESSFUL;
269}
270
271
272/**
273 * Perform an I/O control operation on the partition handle (VID.SYS),
274 * restarting on alert-like behaviour.
275 *
276 * @returns NT status code.
277 * @param pGVM The ring-0 VM structure.
278 * @param uFunction The function to perform.
279 * @param pvInput The input buffer. This must point within the VM
280 * structure so we can easily convert to a ring-3
281 * pointer if necessary.
282 * @param cbInput The size of the input. @a pvInput must be NULL when
283 * zero.
284 */
285DECLINLINE(NTSTATUS) nemR0NtPerformIoControlRestart(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput)
286{
287#ifdef RT_STRICT
288 /*
289 * Input and output parameters are part of the VM CPU structure.
290 */
291 PVM pVM = pGVM->pVM;
292 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
293 if (pvInput)
294 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
295#endif
296
297 int32_t rcNt = STATUS_UNSUCCESSFUL;
298 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
299 pvInput,
300 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
301 cbInput,
302 NULL,
303 NIL_RTR3PTR,
304 0,
305 &rcNt);
306 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
307 {
308 if (RT_LIKELY(rcNt == STATUS_SUCCESS))
309 return rcNt;
310
311 if ( rcNt == STATUS_TIMEOUT
312 || rcNt == STATUS_ALERTED)
313 {
314 DBGFTRACE_CUSTOM(pVM, "nemR0NtPerformIoControlRestart/1 %#x", rcNt);
315 rcNt = STATUS_UNSUCCESSFUL;
316 rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
317 pvInput,
318 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
319 cbInput,
320 NULL,
321 NIL_RTR3PTR,
322 0,
323 &rcNt);
324 if (!RT_SUCCESS(rc) && NT_SUCCESS((NTSTATUS)rcNt))
325 rcNt = STATUS_UNSUCCESSFUL;
326 DBGFTRACE_CUSTOM(pVM, "nemR0NtPerformIoControlRestart/2 %#x", rcNt);
327 }
328 return (NTSTATUS)rcNt;
329 }
330 return STATUS_UNSUCCESSFUL;
331}
332
333
334/**
335 * 2nd part of the initialization, after we've got a partition handle.
336 *
337 * @returns VBox status code.
338 * @param pGVM The ring-0 VM handle.
339 * @param pVM The cross context VM handle.
340 * @thread EMT(0)
341 */
342VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
343{
344 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
345 AssertRCReturn(rc, rc);
346 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
347
348 /*
349 * Copy and validate the I/O control information from ring-3.
350 */
351 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
352 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
353 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
354 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
355 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
356
357 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
358 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
359 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
360 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
361 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
362 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
363
364 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
365 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
366 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
367 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
368 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
369 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
370 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
371
372 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
373 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
374 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
375 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
376 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
377 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
378 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
379 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
380
381 /*
382 * Setup of an I/O control context for the partition handle for later use.
383 */
384 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
385 AssertLogRelRCReturn(rc, rc);
386 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
387
388 /*
389 * Get the partition ID.
390 */
391 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
392 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
393 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
394 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
395 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
396 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
397 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
398 VERR_NEM_INIT_FAILED);
399
400 return rc;
401}
402
403
404/**
405 * Cleanup the NEM parts of the VM in ring-0.
406 *
407 * This is always called and must deal the state regardless of whether
408 * NEMR0InitVM() was called or not. So, take care here.
409 *
410 * @param pGVM The ring-0 VM handle.
411 */
412VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
413{
414 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
415
416 /* Clean up I/O control context. */
417 if (pGVM->nem.s.pIoCtlCtx)
418 {
419 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
420 AssertRC(rc);
421 pGVM->nem.s.pIoCtlCtx = NULL;
422 }
423
424 /* Free the hypercall pages. */
425 VMCPUID i = pGVM->cCpus;
426 while (i-- > 0)
427 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
428
429 /* The non-EMT one too. */
430 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
431 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
432 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
433}
434
435
436#if 0 /* for debugging GPA unmapping. */
437static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
438{
439 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
440 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
441 pIn->PartitionId = pGVM->nem.s.idHvPartition;
442 pIn->VpIndex = pGVCpu->idCpu;
443 pIn->ByteCount = 0x10;
444 pIn->BaseGpa = GCPhys;
445 pIn->ControlFlags.AsUINT64 = 0;
446 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
447 memset(pOut, 0xfe, sizeof(*pOut));
448 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
449 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
450 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
451 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
452 __debugbreak();
453
454 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
455}
456#endif
457
458
459/**
460 * Worker for NEMR0MapPages and others.
461 */
462NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
463 uint32_t cPages, uint32_t fFlags)
464{
465 /*
466 * Validate.
467 */
468 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
469
470 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
471 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
472 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
473 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
474 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
475 if (GCPhysSrc != GCPhysDst)
476 {
477 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
478 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
479 }
480
481 /*
482 * Compose and make the hypercall.
483 * Ring-3 is not allowed to fill in the host physical addresses of the call.
484 */
485 for (uint32_t iTries = 0;; iTries++)
486 {
487 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
488 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
489 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
490 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
491 pMapPages->MapFlags = fFlags;
492 pMapPages->u32ExplicitPadding = 0;
493 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
494 {
495 RTHCPHYS HCPhys = NIL_RTGCPHYS;
496 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
497 AssertRCReturn(rc, rc);
498 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
499 }
500
501 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
502 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
503 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
504 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
505 if (uResult == ((uint64_t)cPages << 32))
506 return VINF_SUCCESS;
507
508 /*
509 * If the partition is out of memory, try donate another 512 pages to
510 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
511 */
512 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
513 || iTries > 16
514 || g_pfnWinHvDepositMemory == NULL)
515 {
516 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
517 return VERR_NEM_MAP_PAGES_FAILED;
518 }
519
520 size_t cPagesAdded = 0;
521 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
522 if (!cPagesAdded)
523 {
524 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
525 return VERR_NEM_MAP_PAGES_FAILED;
526 }
527 }
528}
529
530
531/**
532 * Maps pages into the guest physical address space.
533 *
534 * Generally the caller will be under the PGM lock already, so no extra effort
535 * is needed to make sure all changes happens under it.
536 *
537 * @returns VBox status code.
538 * @param pGVM The ring-0 VM handle.
539 * @param pVM The cross context VM handle.
540 * @param idCpu The calling EMT. Necessary for getting the
541 * hypercall page and arguments.
542 * @thread EMT(idCpu)
543 */
544VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
545{
546 /*
547 * Unpack the call.
548 */
549 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
550 if (RT_SUCCESS(rc))
551 {
552 PVMCPU pVCpu = &pVM->aCpus[idCpu];
553 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
554
555 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
556 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
557 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
558 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
559
560 /*
561 * Do the work.
562 */
563 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
564 }
565 return rc;
566}
567
568
569/**
570 * Worker for NEMR0UnmapPages and others.
571 */
572NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
573{
574 /*
575 * Validate input.
576 */
577 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
578
579 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
580 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
581 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
582 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
583
584 /*
585 * Compose and make the hypercall.
586 */
587 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
588 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
589 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
590 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
591 pUnmapPages->fFlags = 0;
592
593 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
594 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
595 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
596 if (uResult == ((uint64_t)cPages << 32))
597 {
598#if 1 /* Do we need to do this? Hopefully not... */
599 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
600 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
601 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
602#endif
603 return VINF_SUCCESS;
604 }
605
606 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
607 return VERR_NEM_UNMAP_PAGES_FAILED;
608}
609
610
611/**
612 * Unmaps pages from the guest physical address space.
613 *
614 * Generally the caller will be under the PGM lock already, so no extra effort
615 * is needed to make sure all changes happens under it.
616 *
617 * @returns VBox status code.
618 * @param pGVM The ring-0 VM handle.
619 * @param pVM The cross context VM handle.
620 * @param idCpu The calling EMT. Necessary for getting the
621 * hypercall page and arguments.
622 * @thread EMT(idCpu)
623 */
624VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
625{
626 /*
627 * Unpack the call.
628 */
629 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
630 if (RT_SUCCESS(rc))
631 {
632 PVMCPU pVCpu = &pVM->aCpus[idCpu];
633 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
634
635 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
636 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
637
638 /*
639 * Do the work.
640 */
641 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
642 }
643 return rc;
644}
645
646
647/**
648 * Worker for NEMR0ExportState.
649 *
650 * Intention is to use it internally later.
651 *
652 * @returns VBox status code.
653 * @param pGVM The ring-0 VM handle.
654 * @param pGVCpu The ring-0 VCPU handle.
655 * @param pCtx The CPU context structure to import into.
656 */
657NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
658{
659 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
660 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
661 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
662 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
663
664 pInput->PartitionId = pGVM->nem.s.idHvPartition;
665 pInput->VpIndex = pGVCpu->idCpu;
666 pInput->RsvdZ = 0;
667
668 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
669 if ( !fWhat
670 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
671 return VINF_SUCCESS;
672 uintptr_t iReg = 0;
673
674 /* GPRs */
675 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
676 {
677 if (fWhat & CPUMCTX_EXTRN_RAX)
678 {
679 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
680 pInput->Elements[iReg].Name = HvX64RegisterRax;
681 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
682 iReg++;
683 }
684 if (fWhat & CPUMCTX_EXTRN_RCX)
685 {
686 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
687 pInput->Elements[iReg].Name = HvX64RegisterRcx;
688 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
689 iReg++;
690 }
691 if (fWhat & CPUMCTX_EXTRN_RDX)
692 {
693 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
694 pInput->Elements[iReg].Name = HvX64RegisterRdx;
695 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
696 iReg++;
697 }
698 if (fWhat & CPUMCTX_EXTRN_RBX)
699 {
700 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
701 pInput->Elements[iReg].Name = HvX64RegisterRbx;
702 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
703 iReg++;
704 }
705 if (fWhat & CPUMCTX_EXTRN_RSP)
706 {
707 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
708 pInput->Elements[iReg].Name = HvX64RegisterRsp;
709 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
710 iReg++;
711 }
712 if (fWhat & CPUMCTX_EXTRN_RBP)
713 {
714 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
715 pInput->Elements[iReg].Name = HvX64RegisterRbp;
716 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
717 iReg++;
718 }
719 if (fWhat & CPUMCTX_EXTRN_RSI)
720 {
721 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
722 pInput->Elements[iReg].Name = HvX64RegisterRsi;
723 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
724 iReg++;
725 }
726 if (fWhat & CPUMCTX_EXTRN_RDI)
727 {
728 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
729 pInput->Elements[iReg].Name = HvX64RegisterRdi;
730 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
731 iReg++;
732 }
733 if (fWhat & CPUMCTX_EXTRN_R8_R15)
734 {
735 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
736 pInput->Elements[iReg].Name = HvX64RegisterR8;
737 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
738 iReg++;
739 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
740 pInput->Elements[iReg].Name = HvX64RegisterR9;
741 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
742 iReg++;
743 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
744 pInput->Elements[iReg].Name = HvX64RegisterR10;
745 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
746 iReg++;
747 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
748 pInput->Elements[iReg].Name = HvX64RegisterR11;
749 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
750 iReg++;
751 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
752 pInput->Elements[iReg].Name = HvX64RegisterR12;
753 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
754 iReg++;
755 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
756 pInput->Elements[iReg].Name = HvX64RegisterR13;
757 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
758 iReg++;
759 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
760 pInput->Elements[iReg].Name = HvX64RegisterR14;
761 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
762 iReg++;
763 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
764 pInput->Elements[iReg].Name = HvX64RegisterR15;
765 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
766 iReg++;
767 }
768 }
769
770 /* RIP & Flags */
771 if (fWhat & CPUMCTX_EXTRN_RIP)
772 {
773 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
774 pInput->Elements[iReg].Name = HvX64RegisterRip;
775 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
776 iReg++;
777 }
778 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
779 {
780 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
781 pInput->Elements[iReg].Name = HvX64RegisterRflags;
782 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
783 iReg++;
784 }
785
786 /* Segments */
787#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
788 do { \
789 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
790 pInput->Elements[a_idx].Name = a_enmName; \
791 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
792 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
793 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
794 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
795 } while (0)
796 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
797 {
798 if (fWhat & CPUMCTX_EXTRN_CS)
799 {
800 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
801 iReg++;
802 }
803 if (fWhat & CPUMCTX_EXTRN_ES)
804 {
805 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
806 iReg++;
807 }
808 if (fWhat & CPUMCTX_EXTRN_SS)
809 {
810 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
811 iReg++;
812 }
813 if (fWhat & CPUMCTX_EXTRN_DS)
814 {
815 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
816 iReg++;
817 }
818 if (fWhat & CPUMCTX_EXTRN_FS)
819 {
820 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
821 iReg++;
822 }
823 if (fWhat & CPUMCTX_EXTRN_GS)
824 {
825 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
826 iReg++;
827 }
828 }
829
830 /* Descriptor tables & task segment. */
831 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
832 {
833 if (fWhat & CPUMCTX_EXTRN_LDTR)
834 {
835 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
836 iReg++;
837 }
838 if (fWhat & CPUMCTX_EXTRN_TR)
839 {
840 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
841 iReg++;
842 }
843
844 if (fWhat & CPUMCTX_EXTRN_IDTR)
845 {
846 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
847 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
848 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
849 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
850 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
851 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
852 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
853 iReg++;
854 }
855 if (fWhat & CPUMCTX_EXTRN_GDTR)
856 {
857 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
858 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
859 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
860 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
861 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
862 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
863 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
864 iReg++;
865 }
866 }
867
868 /* Control registers. */
869 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
870 {
871 if (fWhat & CPUMCTX_EXTRN_CR0)
872 {
873 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
874 pInput->Elements[iReg].Name = HvX64RegisterCr0;
875 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
876 iReg++;
877 }
878 if (fWhat & CPUMCTX_EXTRN_CR2)
879 {
880 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
881 pInput->Elements[iReg].Name = HvX64RegisterCr2;
882 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
883 iReg++;
884 }
885 if (fWhat & CPUMCTX_EXTRN_CR3)
886 {
887 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
888 pInput->Elements[iReg].Name = HvX64RegisterCr3;
889 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
890 iReg++;
891 }
892 if (fWhat & CPUMCTX_EXTRN_CR4)
893 {
894 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
895 pInput->Elements[iReg].Name = HvX64RegisterCr4;
896 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
897 iReg++;
898 }
899 }
900 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
901 {
902 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
903 pInput->Elements[iReg].Name = HvX64RegisterCr8;
904 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
905 iReg++;
906 }
907
908 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
909
910 /* Debug registers. */
911/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
912 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
913 {
914 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
915 pInput->Elements[iReg].Name = HvX64RegisterDr0;
916 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
917 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
918 iReg++;
919 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
920 pInput->Elements[iReg].Name = HvX64RegisterDr1;
921 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
922 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
923 iReg++;
924 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
925 pInput->Elements[iReg].Name = HvX64RegisterDr2;
926 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
927 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
928 iReg++;
929 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
930 pInput->Elements[iReg].Name = HvX64RegisterDr3;
931 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
932 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
933 iReg++;
934 }
935 if (fWhat & CPUMCTX_EXTRN_DR6)
936 {
937 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
938 pInput->Elements[iReg].Name = HvX64RegisterDr6;
939 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
940 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
941 iReg++;
942 }
943 if (fWhat & CPUMCTX_EXTRN_DR7)
944 {
945 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
946 pInput->Elements[iReg].Name = HvX64RegisterDr7;
947 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
948 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
949 iReg++;
950 }
951
952 /* Floating point state. */
953 if (fWhat & CPUMCTX_EXTRN_X87)
954 {
955 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
956 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
957 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
958 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
959 iReg++;
960 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
961 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
962 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
963 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
964 iReg++;
965 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
966 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
967 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
968 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
969 iReg++;
970 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
971 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
972 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
973 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
974 iReg++;
975 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
976 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
977 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
978 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
979 iReg++;
980 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
981 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
982 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
983 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
984 iReg++;
985 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
986 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
987 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
988 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
989 iReg++;
990 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
991 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
992 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
993 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
994 iReg++;
995
996 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
997 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
998 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
999 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
1000 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
1001 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
1002 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
1003 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
1004 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
1005 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
1006 iReg++;
1007/** @todo we've got trouble if if we try write just SSE w/o X87. */
1008 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1009 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
1010 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
1011 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
1012 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
1013 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
1014 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
1015 iReg++;
1016 }
1017
1018 /* Vector state. */
1019 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1020 {
1021 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1022 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
1023 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
1024 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
1025 iReg++;
1026 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1027 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
1028 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
1029 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
1030 iReg++;
1031 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1032 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
1033 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
1034 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
1035 iReg++;
1036 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1037 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1038 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
1039 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
1040 iReg++;
1041 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1042 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1043 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
1044 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
1045 iReg++;
1046 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1047 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1048 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
1049 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
1050 iReg++;
1051 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1052 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1053 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1054 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1055 iReg++;
1056 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1057 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1058 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1059 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1060 iReg++;
1061 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1062 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1063 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1064 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1065 iReg++;
1066 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1067 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1068 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1069 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1070 iReg++;
1071 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1072 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1073 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1074 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1075 iReg++;
1076 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1077 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1078 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1079 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1080 iReg++;
1081 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1082 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1083 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1084 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1085 iReg++;
1086 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1087 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1088 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1089 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1090 iReg++;
1091 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1092 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1093 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1094 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1095 iReg++;
1096 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1097 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1098 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1099 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1100 iReg++;
1101 }
1102
1103 /* MSRs */
1104 // HvX64RegisterTsc - don't touch
1105 if (fWhat & CPUMCTX_EXTRN_EFER)
1106 {
1107 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1108 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1109 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1110 iReg++;
1111 }
1112 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1113 {
1114 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1115 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1116 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1117 iReg++;
1118 }
1119 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1120 {
1121 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1122 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1123 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1124 iReg++;
1125 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1126 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1127 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1128 iReg++;
1129 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1130 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1131 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1132 iReg++;
1133 }
1134 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1135 {
1136 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1137 pInput->Elements[iReg].Name = HvX64RegisterStar;
1138 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1139 iReg++;
1140 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1141 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1142 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1143 iReg++;
1144 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1145 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1146 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1147 iReg++;
1148 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1149 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1150 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1151 iReg++;
1152 }
1153 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1154 {
1155 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1156 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1157 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1158 iReg++;
1159 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1160 pInput->Elements[iReg].Name = HvX64RegisterPat;
1161 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1162 iReg++;
1163#if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1164 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1165 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1166 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1167 iReg++;
1168#endif
1169
1170 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1171
1172 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1173 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1174 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1175 iReg++;
1176
1177 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1178
1179 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1180 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1181 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1182 iReg++;
1183 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1184 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1185 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1186 iReg++;
1187 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1188 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1189 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1190 iReg++;
1191 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1192 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1193 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1194 iReg++;
1195 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1196 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1197 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1198 iReg++;
1199 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1200 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1201 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1202 iReg++;
1203 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1204 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1205 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1206 iReg++;
1207 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1208 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1209 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1210 iReg++;
1211 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1212 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1213 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1214 iReg++;
1215 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1216 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1217 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1218 iReg++;
1219 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1220 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1221 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1222 iReg++;
1223 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1224 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1225 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1226 iReg++;
1227
1228#if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1229 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1230 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1231 {
1232 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1233 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1234 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1235 iReg++;
1236 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1237 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1238 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1239 iReg++;
1240 }
1241#endif
1242 }
1243
1244 /* event injection (clear it). */
1245 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1246 {
1247 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1248 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1249 pInput->Elements[iReg].Value.Reg64 = 0;
1250 iReg++;
1251 }
1252
1253 /* Interruptibility state. This can get a little complicated since we get
1254 half of the state via HV_X64_VP_EXECUTION_STATE. */
1255 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1256 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1257 {
1258 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1259 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1260 pInput->Elements[iReg].Value.Reg64 = 0;
1261 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1262 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1263 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1264 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1265 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1266 iReg++;
1267 }
1268 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1269 {
1270 if ( pVCpu->nem.s.fLastInterruptShadow
1271 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1272 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1273 {
1274 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1275 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1276 pInput->Elements[iReg].Value.Reg64 = 0;
1277 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1278 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1279 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1280 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1281 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1282 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1283 iReg++;
1284 }
1285 }
1286 else
1287 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1288
1289 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1290 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1291 if ( fDesiredIntWin
1292 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1293 {
1294 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1295 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1296 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1297 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1298 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1299 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1300 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1301 iReg++;
1302 }
1303
1304 /// @todo HvRegisterPendingEvent0
1305 /// @todo HvRegisterPendingEvent1
1306
1307 /*
1308 * Set the registers.
1309 */
1310 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1311
1312 /*
1313 * Make the hypercall.
1314 */
1315 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1316 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1317 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1318 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1319 VERR_NEM_SET_REGISTERS_FAILED);
1320 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1321 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1322 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1323 return VINF_SUCCESS;
1324}
1325
1326
1327/**
1328 * Export the state to the native API (out of CPUMCTX).
1329 *
1330 * @returns VBox status code
1331 * @param pGVM The ring-0 VM handle.
1332 * @param pVM The cross context VM handle.
1333 * @param idCpu The calling EMT. Necessary for getting the
1334 * hypercall page and arguments.
1335 */
1336VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1337{
1338 /*
1339 * Validate the call.
1340 */
1341 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1342 if (RT_SUCCESS(rc))
1343 {
1344 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1345 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1346 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1347
1348 /*
1349 * Call worker.
1350 */
1351 rc = nemR0WinExportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1352 }
1353 return rc;
1354}
1355
1356
1357/**
1358 * Worker for NEMR0ImportState.
1359 *
1360 * Intention is to use it internally later.
1361 *
1362 * @returns VBox status code.
1363 * @param pGVM The ring-0 VM handle.
1364 * @param pGVCpu The ring-0 VCPU handle.
1365 * @param pCtx The CPU context structure to import into.
1366 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1367 */
1368NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1369{
1370 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1371 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1372 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1373
1374 fWhat &= pCtx->fExtrn;
1375
1376 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1377 pInput->VpIndex = pGVCpu->idCpu;
1378 pInput->fFlags = 0;
1379
1380 /* GPRs */
1381 uintptr_t iReg = 0;
1382 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1383 {
1384 if (fWhat & CPUMCTX_EXTRN_RAX)
1385 pInput->Names[iReg++] = HvX64RegisterRax;
1386 if (fWhat & CPUMCTX_EXTRN_RCX)
1387 pInput->Names[iReg++] = HvX64RegisterRcx;
1388 if (fWhat & CPUMCTX_EXTRN_RDX)
1389 pInput->Names[iReg++] = HvX64RegisterRdx;
1390 if (fWhat & CPUMCTX_EXTRN_RBX)
1391 pInput->Names[iReg++] = HvX64RegisterRbx;
1392 if (fWhat & CPUMCTX_EXTRN_RSP)
1393 pInput->Names[iReg++] = HvX64RegisterRsp;
1394 if (fWhat & CPUMCTX_EXTRN_RBP)
1395 pInput->Names[iReg++] = HvX64RegisterRbp;
1396 if (fWhat & CPUMCTX_EXTRN_RSI)
1397 pInput->Names[iReg++] = HvX64RegisterRsi;
1398 if (fWhat & CPUMCTX_EXTRN_RDI)
1399 pInput->Names[iReg++] = HvX64RegisterRdi;
1400 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1401 {
1402 pInput->Names[iReg++] = HvX64RegisterR8;
1403 pInput->Names[iReg++] = HvX64RegisterR9;
1404 pInput->Names[iReg++] = HvX64RegisterR10;
1405 pInput->Names[iReg++] = HvX64RegisterR11;
1406 pInput->Names[iReg++] = HvX64RegisterR12;
1407 pInput->Names[iReg++] = HvX64RegisterR13;
1408 pInput->Names[iReg++] = HvX64RegisterR14;
1409 pInput->Names[iReg++] = HvX64RegisterR15;
1410 }
1411 }
1412
1413 /* RIP & Flags */
1414 if (fWhat & CPUMCTX_EXTRN_RIP)
1415 pInput->Names[iReg++] = HvX64RegisterRip;
1416 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1417 pInput->Names[iReg++] = HvX64RegisterRflags;
1418
1419 /* Segments */
1420 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1421 {
1422 if (fWhat & CPUMCTX_EXTRN_CS)
1423 pInput->Names[iReg++] = HvX64RegisterCs;
1424 if (fWhat & CPUMCTX_EXTRN_ES)
1425 pInput->Names[iReg++] = HvX64RegisterEs;
1426 if (fWhat & CPUMCTX_EXTRN_SS)
1427 pInput->Names[iReg++] = HvX64RegisterSs;
1428 if (fWhat & CPUMCTX_EXTRN_DS)
1429 pInput->Names[iReg++] = HvX64RegisterDs;
1430 if (fWhat & CPUMCTX_EXTRN_FS)
1431 pInput->Names[iReg++] = HvX64RegisterFs;
1432 if (fWhat & CPUMCTX_EXTRN_GS)
1433 pInput->Names[iReg++] = HvX64RegisterGs;
1434 }
1435
1436 /* Descriptor tables and the task segment. */
1437 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1438 {
1439 if (fWhat & CPUMCTX_EXTRN_LDTR)
1440 pInput->Names[iReg++] = HvX64RegisterLdtr;
1441 if (fWhat & CPUMCTX_EXTRN_TR)
1442 pInput->Names[iReg++] = HvX64RegisterTr;
1443 if (fWhat & CPUMCTX_EXTRN_IDTR)
1444 pInput->Names[iReg++] = HvX64RegisterIdtr;
1445 if (fWhat & CPUMCTX_EXTRN_GDTR)
1446 pInput->Names[iReg++] = HvX64RegisterGdtr;
1447 }
1448
1449 /* Control registers. */
1450 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1451 {
1452 if (fWhat & CPUMCTX_EXTRN_CR0)
1453 pInput->Names[iReg++] = HvX64RegisterCr0;
1454 if (fWhat & CPUMCTX_EXTRN_CR2)
1455 pInput->Names[iReg++] = HvX64RegisterCr2;
1456 if (fWhat & CPUMCTX_EXTRN_CR3)
1457 pInput->Names[iReg++] = HvX64RegisterCr3;
1458 if (fWhat & CPUMCTX_EXTRN_CR4)
1459 pInput->Names[iReg++] = HvX64RegisterCr4;
1460 }
1461 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1462 pInput->Names[iReg++] = HvX64RegisterCr8;
1463
1464 /* Debug registers. */
1465 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1466 {
1467 pInput->Names[iReg++] = HvX64RegisterDr0;
1468 pInput->Names[iReg++] = HvX64RegisterDr1;
1469 pInput->Names[iReg++] = HvX64RegisterDr2;
1470 pInput->Names[iReg++] = HvX64RegisterDr3;
1471 }
1472 if (fWhat & CPUMCTX_EXTRN_DR6)
1473 pInput->Names[iReg++] = HvX64RegisterDr6;
1474 if (fWhat & CPUMCTX_EXTRN_DR7)
1475 pInput->Names[iReg++] = HvX64RegisterDr7;
1476
1477 /* Floating point state. */
1478 if (fWhat & CPUMCTX_EXTRN_X87)
1479 {
1480 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1481 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1482 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1483 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1484 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1485 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1486 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1487 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1488 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1489 }
1490 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1491 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1492
1493 /* Vector state. */
1494 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1495 {
1496 pInput->Names[iReg++] = HvX64RegisterXmm0;
1497 pInput->Names[iReg++] = HvX64RegisterXmm1;
1498 pInput->Names[iReg++] = HvX64RegisterXmm2;
1499 pInput->Names[iReg++] = HvX64RegisterXmm3;
1500 pInput->Names[iReg++] = HvX64RegisterXmm4;
1501 pInput->Names[iReg++] = HvX64RegisterXmm5;
1502 pInput->Names[iReg++] = HvX64RegisterXmm6;
1503 pInput->Names[iReg++] = HvX64RegisterXmm7;
1504 pInput->Names[iReg++] = HvX64RegisterXmm8;
1505 pInput->Names[iReg++] = HvX64RegisterXmm9;
1506 pInput->Names[iReg++] = HvX64RegisterXmm10;
1507 pInput->Names[iReg++] = HvX64RegisterXmm11;
1508 pInput->Names[iReg++] = HvX64RegisterXmm12;
1509 pInput->Names[iReg++] = HvX64RegisterXmm13;
1510 pInput->Names[iReg++] = HvX64RegisterXmm14;
1511 pInput->Names[iReg++] = HvX64RegisterXmm15;
1512 }
1513
1514 /* MSRs */
1515 // HvX64RegisterTsc - don't touch
1516 if (fWhat & CPUMCTX_EXTRN_EFER)
1517 pInput->Names[iReg++] = HvX64RegisterEfer;
1518 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1519 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1520 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1521 {
1522 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1523 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1524 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1525 }
1526 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1527 {
1528 pInput->Names[iReg++] = HvX64RegisterStar;
1529 pInput->Names[iReg++] = HvX64RegisterLstar;
1530 pInput->Names[iReg++] = HvX64RegisterCstar;
1531 pInput->Names[iReg++] = HvX64RegisterSfmask;
1532 }
1533
1534#ifdef LOG_ENABLED
1535 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1536#endif
1537 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1538 {
1539 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1540 pInput->Names[iReg++] = HvX64RegisterPat;
1541#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1542 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1543#endif
1544 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1545 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1546 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1547 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1548 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1549 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1550 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1551 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1552 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1553 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1554 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1555 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1556 pInput->Names[iReg++] = HvX64RegisterTscAux;
1557#if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1558 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1559 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1560#endif
1561#ifdef LOG_ENABLED
1562 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1563 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1564#endif
1565 }
1566
1567 /* Interruptibility. */
1568 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1569 {
1570 pInput->Names[iReg++] = HvRegisterInterruptState;
1571 pInput->Names[iReg++] = HvX64RegisterRip;
1572 }
1573
1574 /* event injection */
1575 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1576 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1577 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1578 size_t const cRegs = iReg;
1579 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1580
1581 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1582 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1583 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1584
1585 /*
1586 * Make the hypercall.
1587 */
1588 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1589 pGVCpu->nem.s.HypercallData.HCPhysPage,
1590 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1591 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1592 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1593 VERR_NEM_GET_REGISTERS_FAILED);
1594 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1595
1596 /*
1597 * Copy information to the CPUM context.
1598 */
1599 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1600 iReg = 0;
1601
1602 /* GPRs */
1603 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1604 {
1605 if (fWhat & CPUMCTX_EXTRN_RAX)
1606 {
1607 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1608 pCtx->rax = paValues[iReg++].Reg64;
1609 }
1610 if (fWhat & CPUMCTX_EXTRN_RCX)
1611 {
1612 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1613 pCtx->rcx = paValues[iReg++].Reg64;
1614 }
1615 if (fWhat & CPUMCTX_EXTRN_RDX)
1616 {
1617 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1618 pCtx->rdx = paValues[iReg++].Reg64;
1619 }
1620 if (fWhat & CPUMCTX_EXTRN_RBX)
1621 {
1622 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1623 pCtx->rbx = paValues[iReg++].Reg64;
1624 }
1625 if (fWhat & CPUMCTX_EXTRN_RSP)
1626 {
1627 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1628 pCtx->rsp = paValues[iReg++].Reg64;
1629 }
1630 if (fWhat & CPUMCTX_EXTRN_RBP)
1631 {
1632 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1633 pCtx->rbp = paValues[iReg++].Reg64;
1634 }
1635 if (fWhat & CPUMCTX_EXTRN_RSI)
1636 {
1637 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1638 pCtx->rsi = paValues[iReg++].Reg64;
1639 }
1640 if (fWhat & CPUMCTX_EXTRN_RDI)
1641 {
1642 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1643 pCtx->rdi = paValues[iReg++].Reg64;
1644 }
1645 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1646 {
1647 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1648 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1649 pCtx->r8 = paValues[iReg++].Reg64;
1650 pCtx->r9 = paValues[iReg++].Reg64;
1651 pCtx->r10 = paValues[iReg++].Reg64;
1652 pCtx->r11 = paValues[iReg++].Reg64;
1653 pCtx->r12 = paValues[iReg++].Reg64;
1654 pCtx->r13 = paValues[iReg++].Reg64;
1655 pCtx->r14 = paValues[iReg++].Reg64;
1656 pCtx->r15 = paValues[iReg++].Reg64;
1657 }
1658 }
1659
1660 /* RIP & Flags */
1661 if (fWhat & CPUMCTX_EXTRN_RIP)
1662 {
1663 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1664 pCtx->rip = paValues[iReg++].Reg64;
1665 }
1666 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1667 {
1668 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1669 pCtx->rflags.u = paValues[iReg++].Reg64;
1670 }
1671
1672 /* Segments */
1673#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1674 do { \
1675 Assert(pInput->Names[a_idx] == a_enmName); \
1676 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1677 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1678 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1679 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1680 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1681 } while (0)
1682 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1683 {
1684 if (fWhat & CPUMCTX_EXTRN_CS)
1685 {
1686 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1687 iReg++;
1688 }
1689 if (fWhat & CPUMCTX_EXTRN_ES)
1690 {
1691 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1692 iReg++;
1693 }
1694 if (fWhat & CPUMCTX_EXTRN_SS)
1695 {
1696 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1697 iReg++;
1698 }
1699 if (fWhat & CPUMCTX_EXTRN_DS)
1700 {
1701 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1702 iReg++;
1703 }
1704 if (fWhat & CPUMCTX_EXTRN_FS)
1705 {
1706 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1707 iReg++;
1708 }
1709 if (fWhat & CPUMCTX_EXTRN_GS)
1710 {
1711 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1712 iReg++;
1713 }
1714 }
1715 /* Descriptor tables and the task segment. */
1716 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1717 {
1718 if (fWhat & CPUMCTX_EXTRN_LDTR)
1719 {
1720 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1721 iReg++;
1722 }
1723 if (fWhat & CPUMCTX_EXTRN_TR)
1724 {
1725 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1726 avoid to trigger sanity assertions around the code, always fix this. */
1727 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1728 switch (pCtx->tr.Attr.n.u4Type)
1729 {
1730 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1731 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1732 break;
1733 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1734 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1735 break;
1736 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1737 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1738 break;
1739 }
1740 iReg++;
1741 }
1742 if (fWhat & CPUMCTX_EXTRN_IDTR)
1743 {
1744 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1745 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1746 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1747 iReg++;
1748 }
1749 if (fWhat & CPUMCTX_EXTRN_GDTR)
1750 {
1751 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1752 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1753 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1754 iReg++;
1755 }
1756 }
1757
1758 /* Control registers. */
1759 bool fMaybeChangedMode = false;
1760 bool fFlushTlb = false;
1761 bool fFlushGlobalTlb = false;
1762 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1763 {
1764 if (fWhat & CPUMCTX_EXTRN_CR0)
1765 {
1766 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1767 if (pCtx->cr0 != paValues[iReg].Reg64)
1768 {
1769 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1770 fMaybeChangedMode = true;
1771 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1772 }
1773 iReg++;
1774 }
1775 if (fWhat & CPUMCTX_EXTRN_CR2)
1776 {
1777 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1778 pCtx->cr2 = paValues[iReg].Reg64;
1779 iReg++;
1780 }
1781 if (fWhat & CPUMCTX_EXTRN_CR3)
1782 {
1783 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1784 if (pCtx->cr3 != paValues[iReg].Reg64)
1785 {
1786 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1787 fFlushTlb = true;
1788 }
1789 iReg++;
1790 }
1791 if (fWhat & CPUMCTX_EXTRN_CR4)
1792 {
1793 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1794 if (pCtx->cr4 != paValues[iReg].Reg64)
1795 {
1796 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1797 fMaybeChangedMode = true;
1798 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1799 }
1800 iReg++;
1801 }
1802 }
1803 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1804 {
1805 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1806 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1807 iReg++;
1808 }
1809
1810 /* Debug registers. */
1811/** @todo fixme */
1812 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1813 {
1814 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1815 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1816 if (pCtx->dr[0] != paValues[iReg].Reg64)
1817 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1818 iReg++;
1819 if (pCtx->dr[1] != paValues[iReg].Reg64)
1820 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1821 iReg++;
1822 if (pCtx->dr[2] != paValues[iReg].Reg64)
1823 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1824 iReg++;
1825 if (pCtx->dr[3] != paValues[iReg].Reg64)
1826 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1827 iReg++;
1828 }
1829 if (fWhat & CPUMCTX_EXTRN_DR6)
1830 {
1831 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1832 if (pCtx->dr[6] != paValues[iReg].Reg64)
1833 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1834 iReg++;
1835 }
1836 if (fWhat & CPUMCTX_EXTRN_DR7)
1837 {
1838 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1839 if (pCtx->dr[7] != paValues[iReg].Reg64)
1840 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1841 iReg++;
1842 }
1843
1844 /* Floating point state. */
1845 if (fWhat & CPUMCTX_EXTRN_X87)
1846 {
1847 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1848 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1849 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1850 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1851 iReg++;
1852 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1853 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1854 iReg++;
1855 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1856 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1857 iReg++;
1858 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1859 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1860 iReg++;
1861 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1862 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1863 iReg++;
1864 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1865 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1866 iReg++;
1867 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1868 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1869 iReg++;
1870 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1871 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1872 iReg++;
1873
1874 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1875 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1876 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1877 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1878 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1879 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1880 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1881 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1882 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1883 iReg++;
1884 }
1885
1886 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1887 {
1888 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1889 if (fWhat & CPUMCTX_EXTRN_X87)
1890 {
1891 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1892 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1893 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1894 }
1895 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1896 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1897 iReg++;
1898 }
1899
1900 /* Vector state. */
1901 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1902 {
1903 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1904 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1905 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1906 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1907 iReg++;
1908 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1909 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1910 iReg++;
1911 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1912 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1913 iReg++;
1914 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1915 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1916 iReg++;
1917 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1918 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1919 iReg++;
1920 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1921 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1922 iReg++;
1923 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1924 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1925 iReg++;
1926 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1927 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1928 iReg++;
1929 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1930 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1931 iReg++;
1932 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1933 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1934 iReg++;
1935 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1936 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1937 iReg++;
1938 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1939 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1940 iReg++;
1941 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1942 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1943 iReg++;
1944 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1945 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1946 iReg++;
1947 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1948 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1949 iReg++;
1950 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1951 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1952 iReg++;
1953 }
1954
1955
1956 /* MSRs */
1957 // HvX64RegisterTsc - don't touch
1958 if (fWhat & CPUMCTX_EXTRN_EFER)
1959 {
1960 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1961 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1962 {
1963 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1964 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1965 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1966 pCtx->msrEFER = paValues[iReg].Reg64;
1967 fMaybeChangedMode = true;
1968 }
1969 iReg++;
1970 }
1971 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1972 {
1973 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1974 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1975 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1976 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1977 iReg++;
1978 }
1979 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1980 {
1981 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1982 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1983 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1984 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1985 iReg++;
1986
1987 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1988 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1989 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1990 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1991 iReg++;
1992
1993 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1994 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1995 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1996 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1997 iReg++;
1998 }
1999 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2000 {
2001 Assert(pInput->Names[iReg] == HvX64RegisterStar);
2002 if (pCtx->msrSTAR != paValues[iReg].Reg64)
2003 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
2004 pCtx->msrSTAR = paValues[iReg].Reg64;
2005 iReg++;
2006
2007 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
2008 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
2009 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
2010 pCtx->msrLSTAR = paValues[iReg].Reg64;
2011 iReg++;
2012
2013 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
2014 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
2015 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
2016 pCtx->msrCSTAR = paValues[iReg].Reg64;
2017 iReg++;
2018
2019 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
2020 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
2021 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
2022 pCtx->msrSFMASK = paValues[iReg].Reg64;
2023 iReg++;
2024 }
2025 bool fUpdateApicBase = false;
2026 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2027 {
2028 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
2029 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
2030 if (paValues[iReg].Reg64 != uOldBase)
2031 {
2032 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2033 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2034 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
2035 if (rc2 == VINF_CPUM_R3_MSR_WRITE)
2036 {
2037 pVCpu->nem.s.uPendingApicBase = paValues[iReg].Reg64;
2038 fUpdateApicBase = true;
2039 }
2040 else
2041 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", VBOXSTRICTRC_VAL(rc2), paValues[iReg].Reg64));
2042 }
2043 iReg++;
2044
2045 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2046 if (pCtx->msrPAT != paValues[iReg].Reg64)
2047 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2048 pCtx->msrPAT = paValues[iReg].Reg64;
2049 iReg++;
2050
2051#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2052 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2053 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
2054 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
2055 iReg++;
2056#endif
2057
2058 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
2059 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2060 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2061 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2062 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2063 iReg++;
2064
2065 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2066
2067 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2068 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2069 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2070 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2071 iReg++;
2072
2073 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2074 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2075 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2076 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2077 iReg++;
2078
2079 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2080 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2081 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2082 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2083 iReg++;
2084
2085 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2086 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2087 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2088 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2089 iReg++;
2090
2091 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2092 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2093 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2094 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2095 iReg++;
2096
2097 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2098 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2099 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2100 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2101 iReg++;
2102
2103 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2104 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2105 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2106 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2107 iReg++;
2108
2109 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2110 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2111 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2112 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2113 iReg++;
2114
2115 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2116 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2117 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2118 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2119 iReg++;
2120
2121 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2122 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2123 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2124 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2125 iReg++;
2126
2127 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2128 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2129 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2130 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2131 iReg++;
2132
2133 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2134 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2135 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2136 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2137 iReg++;
2138
2139#if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2140 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2141 {
2142 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2143 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2144 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2145 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2146 iReg++;
2147 }
2148#endif
2149#ifdef LOG_ENABLED
2150 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2151 {
2152 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2153 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2154 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2155 iReg++;
2156 }
2157#endif
2158 }
2159
2160 /* Interruptibility. */
2161 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2162 {
2163 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2164 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2165
2166 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2167 {
2168 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2169 if (paValues[iReg].InterruptState.InterruptShadow)
2170 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2171 else
2172 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2173 }
2174
2175 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2176 {
2177 if (paValues[iReg].InterruptState.NmiMasked)
2178 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2179 else
2180 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2181 }
2182
2183 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2184 iReg += 2;
2185 }
2186
2187 /* Event injection. */
2188 /// @todo HvRegisterPendingInterruption
2189 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2190 if (paValues[iReg].PendingInterruption.InterruptionPending)
2191 {
2192 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2193 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2194 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2195 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2196 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2197 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2198 }
2199
2200 /// @todo HvRegisterPendingEvent0
2201 /// @todo HvRegisterPendingEvent1
2202
2203 /* Almost done, just update extrn flags and maybe change PGM mode. */
2204 pCtx->fExtrn &= ~fWhat;
2205 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2206 pCtx->fExtrn = 0;
2207
2208 /* Typical. */
2209 if (!fMaybeChangedMode && !fFlushTlb && !fUpdateApicBase)
2210 return VINF_SUCCESS;
2211
2212 /*
2213 * Slow.
2214 */
2215 int rc = VINF_SUCCESS;
2216 if (fMaybeChangedMode)
2217 {
2218 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2219 if (rc == VINF_PGM_CHANGE_MODE)
2220 {
2221 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n"));
2222 return VERR_NEM_CHANGE_PGM_MODE;
2223 }
2224 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
2225 }
2226
2227 if (fFlushTlb)
2228 {
2229 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2230 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2231 }
2232
2233 if (fUpdateApicBase && rc == VINF_SUCCESS)
2234 {
2235 LogFlow(("nemR0WinImportState: -> VERR_NEM_UPDATE_APIC_BASE!\n"));
2236 rc = VERR_NEM_UPDATE_APIC_BASE;
2237 }
2238
2239 return rc;
2240}
2241
2242
2243/**
2244 * Import the state from the native API (back to CPUMCTX).
2245 *
2246 * @returns VBox status code
2247 * @param pGVM The ring-0 VM handle.
2248 * @param pVM The cross context VM handle.
2249 * @param idCpu The calling EMT. Necessary for getting the
2250 * hypercall page and arguments.
2251 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2252 * CPUMCTX_EXTERN_ALL for everything.
2253 */
2254VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2255{
2256 /*
2257 * Validate the call.
2258 */
2259 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2260 if (RT_SUCCESS(rc))
2261 {
2262 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2263 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2264 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2265
2266 /*
2267 * Call worker.
2268 */
2269 rc = nemR0WinImportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu), fWhat);
2270 }
2271 return rc;
2272}
2273
2274
2275/**
2276 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2277 *
2278 * @returns VBox status code.
2279 * @param pGVM The ring-0 VM handle.
2280 * @param pGVCpu The ring-0 VCPU handle.
2281 * @param pcTicks Where to return the current CPU tick count.
2282 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2283 */
2284NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2285{
2286 /*
2287 * Hypercall parameters.
2288 */
2289 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2290 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2291 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2292
2293 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2294 pInput->VpIndex = pGVCpu->idCpu;
2295 pInput->fFlags = 0;
2296 pInput->Names[0] = HvX64RegisterTsc;
2297 pInput->Names[1] = HvX64RegisterTscAux;
2298
2299 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2300 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2301 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2302
2303 /*
2304 * Make the hypercall.
2305 */
2306 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2307 pGVCpu->nem.s.HypercallData.HCPhysPage,
2308 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2309 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2310 VERR_NEM_GET_REGISTERS_FAILED);
2311
2312 /*
2313 * Get results.
2314 */
2315 *pcTicks = paValues[0].Reg64;
2316 if (pcAux)
2317 *pcAux = paValues[0].Reg32;
2318 return VINF_SUCCESS;
2319}
2320
2321
2322/**
2323 * Queries the TSC and TSC_AUX values, putting the results in .
2324 *
2325 * @returns VBox status code
2326 * @param pGVM The ring-0 VM handle.
2327 * @param pVM The cross context VM handle.
2328 * @param idCpu The calling EMT. Necessary for getting the
2329 * hypercall page and arguments.
2330 */
2331VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2332{
2333 /*
2334 * Validate the call.
2335 */
2336 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2337 if (RT_SUCCESS(rc))
2338 {
2339 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2340 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2341 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2342
2343 /*
2344 * Call worker.
2345 */
2346 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2347 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2348 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2349 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2350 }
2351 return rc;
2352}
2353
2354
2355/**
2356 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2357 *
2358 * @returns VBox status code.
2359 * @param pGVM The ring-0 VM handle.
2360 * @param pGVCpu The ring-0 VCPU handle.
2361 * @param uPausedTscValue The TSC value at the time of pausing.
2362 */
2363NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2364{
2365 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2366
2367 /*
2368 * Set up the hypercall parameters.
2369 */
2370 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2371 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2372
2373 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2374 pInput->VpIndex = 0;
2375 pInput->RsvdZ = 0;
2376 pInput->Elements[0].Name = HvX64RegisterTsc;
2377 pInput->Elements[0].Pad0 = 0;
2378 pInput->Elements[0].Pad1 = 0;
2379 pInput->Elements[0].Value.Reg128.High64 = 0;
2380 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2381
2382 /*
2383 * Disable interrupts and do the first virtual CPU.
2384 */
2385 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2386 uint64_t const uFirstTsc = ASMReadTSC();
2387 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2388 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2389 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2390 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2391
2392 /*
2393 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2394 * that we don't introduce too much drift here.
2395 */
2396 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2397 {
2398 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition);
2399 Assert(pInput->RsvdZ == 0);
2400 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2401 Assert(pInput->Elements[0].Pad0 == 0);
2402 Assert(pInput->Elements[0].Pad1 == 0);
2403 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2404
2405 pInput->VpIndex = iCpu;
2406 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2407 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2408
2409 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2410 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2411 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2412 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2413 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2414 }
2415
2416 /*
2417 * Done.
2418 */
2419 ASMSetFlags(fSavedFlags);
2420 return VINF_SUCCESS;
2421}
2422
2423
2424/**
2425 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2426 *
2427 * @returns VBox status code
2428 * @param pGVM The ring-0 VM handle.
2429 * @param pVM The cross context VM handle.
2430 * @param idCpu The calling EMT. Necessary for getting the
2431 * hypercall page and arguments.
2432 * @param uPausedTscValue The TSC value at the time of pausing.
2433 */
2434VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2435{
2436 /*
2437 * Validate the call.
2438 */
2439 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2440 if (RT_SUCCESS(rc))
2441 {
2442 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2443 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2444 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2445
2446 /*
2447 * Call worker.
2448 */
2449 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2450 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2451 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2452 }
2453 return rc;
2454}
2455
2456
2457VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2458{
2459#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2460 PVM pVM = pGVM->pVM;
2461 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2462#else
2463 RT_NOREF(pGVM, idCpu);
2464 return VERR_NOT_IMPLEMENTED;
2465#endif
2466}
2467
2468
2469/**
2470 * Updates statistics in the VM structure.
2471 *
2472 * @returns VBox status code.
2473 * @param pGVM The ring-0 VM handle.
2474 * @param pVM The cross context VM handle.
2475 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2476 * page and arguments.
2477 */
2478VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2479{
2480 /*
2481 * Validate the call.
2482 */
2483 int rc;
2484 if (idCpu == NIL_VMCPUID)
2485 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2486 else
2487 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2488 if (RT_SUCCESS(rc))
2489 {
2490 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2491
2492 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2493 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2494 : &pGVM->nem.s.HypercallData;
2495 if ( RT_VALID_PTR(pHypercallData->pbPage)
2496 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2497 {
2498 if (idCpu == NIL_VMCPUID)
2499 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2500 if (RT_SUCCESS(rc))
2501 {
2502 /*
2503 * Query the memory statistics for the partition.
2504 */
2505 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2506 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2507 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2508 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2509 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2510 pInput->ProximityDomainInfo.Id = 0;
2511
2512 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2513 RT_ZERO(*pOutput);
2514
2515 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2516 pHypercallData->HCPhysPage,
2517 pHypercallData->HCPhysPage + sizeof(*pInput));
2518 if (uResult == HV_STATUS_SUCCESS)
2519 {
2520 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2521 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2522 rc = VINF_SUCCESS;
2523 }
2524 else
2525 {
2526 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2527 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2528 rc = VERR_NEM_IPE_0;
2529 }
2530
2531 if (idCpu == NIL_VMCPUID)
2532 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2533 }
2534 }
2535 else
2536 rc = VERR_WRONG_ORDER;
2537 }
2538 return rc;
2539}
2540
2541
2542#if 1 && defined(DEBUG_bird)
2543/**
2544 * Debug only interface for poking around and exploring Hyper-V stuff.
2545 *
2546 * @param pGVM The ring-0 VM handle.
2547 * @param pVM The cross context VM handle.
2548 * @param idCpu The calling EMT.
2549 * @param u64Arg What to query. 0 == registers.
2550 */
2551VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64Arg)
2552{
2553 /*
2554 * Resolve CPU structures.
2555 */
2556 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2557 if (RT_SUCCESS(rc))
2558 {
2559 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2560
2561 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2562 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2563 if (u64Arg == 0)
2564 {
2565 /*
2566 * Query register.
2567 */
2568 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2569 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2570
2571 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2572 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2573 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2574
2575 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2576 pInput->VpIndex = pGVCpu->idCpu;
2577 pInput->fFlags = 0;
2578 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2579
2580 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2581 pGVCpu->nem.s.HypercallData.HCPhysPage,
2582 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2583 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2584 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2585 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2586 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2587 rc = VINF_SUCCESS;
2588 }
2589 else if (u64Arg == 1)
2590 {
2591 /*
2592 * Query partition property.
2593 */
2594 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nem.s.HypercallData.pbPage;
2595 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2596
2597 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2598 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2599 pOutput->PropertyValue = 0;
2600
2601 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2602 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2603 pInput->uPadding = 0;
2604
2605 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2606 pGVCpu->nem.s.HypercallData.HCPhysPage,
2607 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2608 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2609 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2610 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2611 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2612 rc = VINF_SUCCESS;
2613 }
2614 else if (u64Arg == 2)
2615 {
2616 /*
2617 * Set register.
2618 */
2619 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2620 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2621 RT_BZERO(pInput, RT_OFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2622
2623 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2624 pInput->VpIndex = pGVCpu->idCpu;
2625 pInput->RsvdZ = 0;
2626 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2627 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2628 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2629
2630 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2631 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
2632 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2633 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2634 rc = VINF_SUCCESS;
2635 }
2636 else
2637 rc = VERR_INVALID_FUNCTION;
2638 }
2639 return rc;
2640}
2641#endif /* DEBUG_bird */
2642
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette